celldetective 1.3.9.post5__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/__init__.py +0 -3
- celldetective/_version.py +1 -1
- celldetective/events.py +2 -4
- celldetective/exceptions.py +11 -0
- celldetective/extra_properties.py +132 -0
- celldetective/filters.py +7 -1
- celldetective/gui/InitWindow.py +37 -46
- celldetective/gui/__init__.py +3 -9
- celldetective/gui/about.py +19 -15
- celldetective/gui/analyze_block.py +34 -19
- celldetective/gui/base_annotator.py +786 -0
- celldetective/gui/base_components.py +23 -0
- celldetective/gui/classifier_widget.py +86 -94
- celldetective/gui/configure_new_exp.py +163 -46
- celldetective/gui/control_panel.py +76 -146
- celldetective/gui/{signal_annotator.py → event_annotator.py} +533 -1438
- celldetective/gui/generic_signal_plot.py +11 -13
- celldetective/gui/gui_utils.py +54 -23
- celldetective/gui/help/neighborhood.json +2 -2
- celldetective/gui/json_readers.py +5 -4
- celldetective/gui/layouts.py +265 -31
- celldetective/gui/{signal_annotator2.py → pair_event_annotator.py} +433 -635
- celldetective/gui/plot_measurements.py +21 -17
- celldetective/gui/plot_signals_ui.py +125 -72
- celldetective/gui/process_block.py +283 -188
- celldetective/gui/processes/compute_neighborhood.py +594 -0
- celldetective/gui/processes/downloader.py +37 -34
- celldetective/gui/processes/measure_cells.py +19 -8
- celldetective/gui/processes/segment_cells.py +47 -11
- celldetective/gui/processes/track_cells.py +18 -13
- celldetective/gui/seg_model_loader.py +21 -62
- celldetective/gui/settings/__init__.py +7 -0
- celldetective/gui/settings/_settings_base.py +70 -0
- celldetective/gui/{retrain_signal_model_options.py → settings/_settings_event_model_training.py} +54 -109
- celldetective/gui/{measurement_options.py → settings/_settings_measurements.py} +54 -92
- celldetective/gui/{neighborhood_options.py → settings/_settings_neighborhood.py} +10 -13
- celldetective/gui/settings/_settings_segmentation.py +49 -0
- celldetective/gui/{retrain_segmentation_model_options.py → settings/_settings_segmentation_model_training.py} +38 -92
- celldetective/gui/{signal_annotator_options.py → settings/_settings_signal_annotator.py} +78 -103
- celldetective/gui/{btrack_options.py → settings/_settings_tracking.py} +85 -116
- celldetective/gui/styles.py +2 -1
- celldetective/gui/survival_ui.py +49 -95
- celldetective/gui/tableUI.py +53 -25
- celldetective/gui/table_ops/__init__.py +0 -0
- celldetective/gui/table_ops/merge_groups.py +118 -0
- celldetective/gui/thresholds_gui.py +617 -1221
- celldetective/gui/viewers.py +107 -42
- celldetective/gui/workers.py +8 -4
- celldetective/io.py +137 -57
- celldetective/links/zenodo.json +145 -144
- celldetective/measure.py +94 -53
- celldetective/neighborhood.py +342 -268
- celldetective/preprocessing.py +56 -35
- celldetective/regionprops/_regionprops.py +16 -5
- celldetective/relative_measurements.py +50 -29
- celldetective/scripts/analyze_signals.py +4 -1
- celldetective/scripts/measure_cells.py +5 -5
- celldetective/scripts/measure_relative.py +20 -12
- celldetective/scripts/segment_cells.py +4 -10
- celldetective/scripts/segment_cells_thresholds.py +3 -3
- celldetective/scripts/track_cells.py +10 -8
- celldetective/scripts/train_segmentation_model.py +18 -6
- celldetective/signals.py +29 -14
- celldetective/tracking.py +14 -3
- celldetective/utils.py +91 -62
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/METADATA +24 -16
- celldetective-1.4.1.dist-info/RECORD +123 -0
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/WHEEL +1 -1
- tests/gui/__init__.py +0 -0
- tests/gui/test_new_project.py +228 -0
- tests/gui/test_project.py +99 -0
- tests/test_preprocessing.py +2 -2
- celldetective/models/segmentation_effectors/ricm_bf_all_last/config_input.json +0 -79
- celldetective/models/segmentation_effectors/ricm_bf_all_last/ricm_bf_all_last +0 -0
- celldetective/models/segmentation_effectors/ricm_bf_all_last/training_instructions.json +0 -37
- celldetective/models/segmentation_effectors/test-transfer/config_input.json +0 -39
- celldetective/models/segmentation_effectors/test-transfer/test-transfer +0 -0
- celldetective/models/signal_detection/NucCond/classification_loss.png +0 -0
- celldetective/models/signal_detection/NucCond/classifier.h5 +0 -0
- celldetective/models/signal_detection/NucCond/config_input.json +0 -1
- celldetective/models/signal_detection/NucCond/log_classifier.csv +0 -126
- celldetective/models/signal_detection/NucCond/log_regressor.csv +0 -282
- celldetective/models/signal_detection/NucCond/regression_loss.png +0 -0
- celldetective/models/signal_detection/NucCond/regressor.h5 +0 -0
- celldetective/models/signal_detection/NucCond/scores.npy +0 -0
- celldetective/models/signal_detection/NucCond/test_confusion_matrix.png +0 -0
- celldetective/models/signal_detection/NucCond/test_regression.png +0 -0
- celldetective/models/signal_detection/NucCond/validation_confusion_matrix.png +0 -0
- celldetective/models/signal_detection/NucCond/validation_regression.png +0 -0
- celldetective-1.3.9.post5.dist-info/RECORD +0 -129
- tests/test_qt.py +0 -103
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/entry_points.txt +0 -0
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info/licenses}/LICENSE +0 -0
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/top_level.txt +0 -0
celldetective/preprocessing.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Copright © 2024 Laboratoire Adhesion et Inflammation, Authored by Remy Torro & Ksenija Dervanova.
|
|
3
3
|
"""
|
|
4
|
+
from typing import List
|
|
4
5
|
|
|
5
6
|
from tqdm import tqdm
|
|
6
7
|
import numpy as np
|
|
7
8
|
import os
|
|
8
9
|
from celldetective.io import get_config, get_experiment_wells, interpret_wells_and_positions, extract_well_name_and_number, get_positions_in_well, extract_position_name, get_position_movie_path, load_frames, auto_load_number_of_frames
|
|
9
|
-
from celldetective.utils import interpolate_nan, estimate_unreliable_edge, unpad,
|
|
10
|
+
from celldetective.utils import interpolate_nan, estimate_unreliable_edge, unpad, config_section_to_dict, _extract_channel_indices_from_config, _extract_nbr_channels_from_config, _get_img_num_per_channel
|
|
10
11
|
from celldetective.segmentation import filter_image, threshold_image
|
|
11
12
|
from csbdeep.io import save_tiff_imagej_compatible
|
|
12
13
|
from gc import collect
|
|
@@ -14,7 +15,7 @@ from lmfit import Parameters, Model
|
|
|
14
15
|
import tifffile.tifffile as tiff
|
|
15
16
|
from scipy.ndimage import shift
|
|
16
17
|
|
|
17
|
-
def estimate_background_per_condition(experiment, threshold_on_std=1, well_option='*', target_channel="channel_name", frame_range=[0,5], mode="timeseries", activation_protocol=[['gauss',2],['std',4]], show_progress_per_pos=False, show_progress_per_well=True):
|
|
18
|
+
def estimate_background_per_condition(experiment, threshold_on_std=1, well_option='*', target_channel="channel_name", frame_range=[0,5], mode="timeseries", activation_protocol=[['gauss',2],['std',4]], show_progress_per_pos=False, show_progress_per_well=True, offset=None, fix_nan: bool = False):
|
|
18
19
|
|
|
19
20
|
"""
|
|
20
21
|
Estimate the background for each condition in an experiment.
|
|
@@ -71,8 +72,8 @@ def estimate_background_per_condition(experiment, threshold_on_std=1, well_optio
|
|
|
71
72
|
|
|
72
73
|
config = get_config(experiment)
|
|
73
74
|
wells = get_experiment_wells(experiment)
|
|
74
|
-
len_movie = float(
|
|
75
|
-
movie_prefix =
|
|
75
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
76
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
76
77
|
|
|
77
78
|
well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, "*")
|
|
78
79
|
|
|
@@ -149,6 +150,11 @@ def estimate_background_per_condition(experiment, threshold_on_std=1, well_optio
|
|
|
149
150
|
|
|
150
151
|
try:
|
|
151
152
|
background = np.nanmedian(frame_mean_per_position,axis=0)
|
|
153
|
+
if offset is not None:
|
|
154
|
+
#print("The offset is applied to background...")
|
|
155
|
+
background -= offset
|
|
156
|
+
if fix_nan:
|
|
157
|
+
background = interpolate_nan(background.copy().astype(float))
|
|
152
158
|
backgrounds.append({"bg": background, "well": well_path})
|
|
153
159
|
print(f"Background successfully computed for well {well_name}...")
|
|
154
160
|
except Exception as e:
|
|
@@ -170,11 +176,13 @@ def correct_background_model_free(
|
|
|
170
176
|
opt_coef_nbr = 100,
|
|
171
177
|
operation = 'divide',
|
|
172
178
|
clip = False,
|
|
179
|
+
offset = None,
|
|
173
180
|
show_progress_per_well = True,
|
|
174
181
|
show_progress_per_pos = False,
|
|
175
182
|
export = False,
|
|
176
183
|
return_stacks = False,
|
|
177
184
|
movie_prefix=None,
|
|
185
|
+
fix_nan=False,
|
|
178
186
|
activation_protocol=[['gauss',2],['std',4]],
|
|
179
187
|
export_prefix='Corrected',
|
|
180
188
|
**kwargs,
|
|
@@ -243,9 +251,9 @@ def correct_background_model_free(
|
|
|
243
251
|
|
|
244
252
|
config = get_config(experiment)
|
|
245
253
|
wells = get_experiment_wells(experiment)
|
|
246
|
-
len_movie = float(
|
|
254
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
247
255
|
if movie_prefix is None:
|
|
248
|
-
movie_prefix =
|
|
256
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
249
257
|
|
|
250
258
|
well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
|
|
251
259
|
channel_indices = _extract_channel_indices_from_config(config, [target_channel])
|
|
@@ -259,7 +267,7 @@ def correct_background_model_free(
|
|
|
259
267
|
well_name, _ = extract_well_name_and_number(well_path)
|
|
260
268
|
|
|
261
269
|
try:
|
|
262
|
-
background = estimate_background_per_condition(experiment, threshold_on_std=threshold_on_std, well_option=int(well_indices[k]), target_channel=target_channel, frame_range=frame_range, mode=mode, show_progress_per_pos=True, show_progress_per_well=False, activation_protocol=activation_protocol)
|
|
270
|
+
background = estimate_background_per_condition(experiment, threshold_on_std=threshold_on_std, well_option=int(well_indices[k]), target_channel=target_channel, frame_range=frame_range, mode=mode, show_progress_per_pos=True, show_progress_per_well=False, activation_protocol=activation_protocol, offset=offset, fix_nan=fix_nan)
|
|
263
271
|
background = background[0]
|
|
264
272
|
background = background['bg']
|
|
265
273
|
except Exception as e:
|
|
@@ -283,18 +291,20 @@ def correct_background_model_free(
|
|
|
283
291
|
|
|
284
292
|
corrected_stack = apply_background_to_stack(stack_path,
|
|
285
293
|
background,
|
|
286
|
-
target_channel_index=channel_indices[0],
|
|
287
|
-
nbr_channels=nbr_channels,
|
|
288
|
-
stack_length=len_movie,
|
|
289
|
-
threshold_on_std=threshold_on_std,
|
|
290
|
-
optimize_option=optimize_option,
|
|
291
|
-
opt_coef_range=opt_coef_range,
|
|
292
|
-
opt_coef_nbr=opt_coef_nbr,
|
|
293
|
-
operation=operation,
|
|
294
|
-
clip=clip,
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
294
|
+
target_channel_index = channel_indices[0],
|
|
295
|
+
nbr_channels = nbr_channels,
|
|
296
|
+
stack_length = len_movie,
|
|
297
|
+
threshold_on_std = threshold_on_std,
|
|
298
|
+
optimize_option = optimize_option,
|
|
299
|
+
opt_coef_range = opt_coef_range,
|
|
300
|
+
opt_coef_nbr = opt_coef_nbr,
|
|
301
|
+
operation = operation,
|
|
302
|
+
clip = clip,
|
|
303
|
+
offset = offset,
|
|
304
|
+
export = export,
|
|
305
|
+
fix_nan=fix_nan,
|
|
306
|
+
activation_protocol = activation_protocol,
|
|
307
|
+
prefix = export_prefix,
|
|
298
308
|
)
|
|
299
309
|
print('Correction successful.')
|
|
300
310
|
if return_stacks:
|
|
@@ -310,7 +320,7 @@ def correct_background_model_free(
|
|
|
310
320
|
|
|
311
321
|
|
|
312
322
|
|
|
313
|
-
def apply_background_to_stack(stack_path, background, target_channel_index=0, nbr_channels=1, stack_length=45,activation_protocol=[['gauss',2],['std',4]], threshold_on_std=1, optimize_option=True, opt_coef_range=(0.95,1.05), opt_coef_nbr=100, operation='divide', clip=False, export=False, prefix="Corrected"):
|
|
323
|
+
def apply_background_to_stack(stack_path, background, target_channel_index=0, nbr_channels=1, stack_length=45, offset = None, activation_protocol=[['gauss',2],['std',4]], threshold_on_std=1, optimize_option=True, opt_coef_range=(0.95,1.05), opt_coef_nbr=100, operation='divide', clip=False, export=False, prefix="Corrected", fix_nan=False):
|
|
314
324
|
|
|
315
325
|
"""
|
|
316
326
|
Apply background correction to an image stack.
|
|
@@ -385,11 +395,14 @@ def apply_background_to_stack(stack_path, background, target_channel_index=0, nb
|
|
|
385
395
|
|
|
386
396
|
frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
|
|
387
397
|
target_img = frames[:,:,target_channel_index].copy()
|
|
388
|
-
|
|
398
|
+
if offset is not None:
|
|
399
|
+
#print(f"The offset is applied to image...")
|
|
400
|
+
target_img -= offset
|
|
401
|
+
|
|
389
402
|
if optimize_option:
|
|
390
403
|
|
|
391
404
|
target_copy = target_img.copy()
|
|
392
|
-
|
|
405
|
+
|
|
393
406
|
std_frame = filter_image(target_copy.copy(),filters=activation_protocol)
|
|
394
407
|
edge = estimate_unreliable_edge(activation_protocol)
|
|
395
408
|
mask = threshold_image(std_frame, threshold_on_std, np.inf, foreground_value=1, edge_exclusion=edge)
|
|
@@ -412,7 +425,7 @@ def apply_background_to_stack(stack_path, background, target_channel_index=0, nb
|
|
|
412
425
|
loss.append(s)
|
|
413
426
|
|
|
414
427
|
c = coefficients[np.argmin(loss)]
|
|
415
|
-
print(f"
|
|
428
|
+
print(f"IFD {i}; optimal coefficient: {c}...")
|
|
416
429
|
# if c==min(coefficients) or c==max(coefficients):
|
|
417
430
|
# print('Warning... The optimal coefficient is beyond the range provided... Please adjust your coefficient range...')
|
|
418
431
|
else:
|
|
@@ -422,16 +435,20 @@ def apply_background_to_stack(stack_path, background, target_channel_index=0, nb
|
|
|
422
435
|
correction = np.divide(target_img, background*c, where=background==background)
|
|
423
436
|
correction[background!=background] = np.nan
|
|
424
437
|
correction[target_img!=target_img] = np.nan
|
|
425
|
-
fill_val = 1.0
|
|
426
438
|
|
|
427
439
|
elif operation=="subtract":
|
|
428
440
|
correction = np.subtract(target_img, background*c, where=background==background)
|
|
429
441
|
correction[background!=background] = np.nan
|
|
430
442
|
correction[target_img!=target_img] = np.nan
|
|
431
|
-
fill_val = 0.0
|
|
432
443
|
if clip:
|
|
433
444
|
correction[correction<=0.] = 0.
|
|
445
|
+
else:
|
|
446
|
+
print("Operation not supported... Abort.")
|
|
447
|
+
return
|
|
434
448
|
|
|
449
|
+
correction[~np.isfinite(correction)] = np.nan
|
|
450
|
+
if fix_nan:
|
|
451
|
+
correction = interpolate_nan(correction.copy())
|
|
435
452
|
frames[:,:,target_channel_index] = correction
|
|
436
453
|
corrected_stack.append(frames)
|
|
437
454
|
|
|
@@ -763,9 +780,9 @@ def correct_background_model(
|
|
|
763
780
|
|
|
764
781
|
config = get_config(experiment)
|
|
765
782
|
wells = get_experiment_wells(experiment)
|
|
766
|
-
len_movie = float(
|
|
783
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
767
784
|
if movie_prefix is None:
|
|
768
|
-
movie_prefix =
|
|
785
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
769
786
|
|
|
770
787
|
well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
|
|
771
788
|
channel_indices = _extract_channel_indices_from_config(config, [target_channel])
|
|
@@ -785,6 +802,10 @@ def correct_background_model(
|
|
|
785
802
|
for pidx,pos_path in enumerate(tqdm(selection, disable=not show_progress_per_pos)):
|
|
786
803
|
|
|
787
804
|
stack_path = get_position_movie_path(pos_path, prefix=movie_prefix)
|
|
805
|
+
if stack_path is None:
|
|
806
|
+
print(f"No stack could be found in {pos_path}... Skip...")
|
|
807
|
+
continue
|
|
808
|
+
|
|
788
809
|
print(f'Applying the correction to position {extract_position_name(pos_path)}...')
|
|
789
810
|
len_movie_auto = auto_load_number_of_frames(stack_path)
|
|
790
811
|
if len_movie_auto is not None:
|
|
@@ -901,7 +922,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
|
|
|
901
922
|
frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
|
|
902
923
|
target_img = frames[:,:,target_channel_index].copy()
|
|
903
924
|
|
|
904
|
-
correction = field_correction(target_img,
|
|
925
|
+
correction = field_correction(target_img, threshold=threshold_on_std, operation=operation, model=model, clip=clip, activation_protocol=activation_protocol)
|
|
905
926
|
frames[:,:,target_channel_index] = correction.copy()
|
|
906
927
|
|
|
907
928
|
if return_stacks:
|
|
@@ -922,7 +943,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
|
|
|
922
943
|
frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
|
|
923
944
|
target_img = frames[:,:,target_channel_index].copy()
|
|
924
945
|
|
|
925
|
-
correction = field_correction(target_img,
|
|
946
|
+
correction = field_correction(target_img, threshold=threshold_on_std, operation=operation, model=model, clip=clip, activation_protocol=activation_protocol)
|
|
926
947
|
frames[:,:,target_channel_index] = correction.copy()
|
|
927
948
|
|
|
928
949
|
corrected_stack.append(frames)
|
|
@@ -937,7 +958,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
|
|
|
937
958
|
else:
|
|
938
959
|
return None
|
|
939
960
|
|
|
940
|
-
def field_correction(img,
|
|
961
|
+
def field_correction(img: np.ndarray, threshold: float = 1, operation: str = 'divide', model: str = 'paraboloid', clip: bool = False, return_bg: bool = False, activation_protocol: List[List] = [['gauss',2],['std',4]]):
|
|
941
962
|
|
|
942
963
|
"""
|
|
943
964
|
Apply field correction to an image.
|
|
@@ -950,8 +971,8 @@ def field_correction(img, threshold_on_std=1, operation='divide', model='parabol
|
|
|
950
971
|
----------
|
|
951
972
|
img : numpy.ndarray
|
|
952
973
|
The input image to be corrected.
|
|
953
|
-
|
|
954
|
-
The threshold value on the
|
|
974
|
+
threshold : float, optional
|
|
975
|
+
The threshold value on the image, post activation protocol for masking out cells (default is 1).
|
|
955
976
|
operation : str, optional
|
|
956
977
|
The operation to apply for background correction, either 'divide' or 'subtract' (default is 'divide').
|
|
957
978
|
model : str, optional
|
|
@@ -989,7 +1010,7 @@ def field_correction(img, threshold_on_std=1, operation='divide', model='parabol
|
|
|
989
1010
|
|
|
990
1011
|
std_frame = filter_image(target_copy,filters=activation_protocol)
|
|
991
1012
|
edge = estimate_unreliable_edge(activation_protocol)
|
|
992
|
-
mask = threshold_image(std_frame,
|
|
1013
|
+
mask = threshold_image(std_frame, threshold, np.inf, foreground_value=1, edge_exclusion=edge).astype(int)
|
|
993
1014
|
background = fit_background_model(img, cell_masks=mask, model=model, edge_exclusion=edge)
|
|
994
1015
|
|
|
995
1016
|
if operation=="divide":
|
|
@@ -1078,9 +1099,9 @@ def correct_channel_offset(
|
|
|
1078
1099
|
|
|
1079
1100
|
config = get_config(experiment)
|
|
1080
1101
|
wells = get_experiment_wells(experiment)
|
|
1081
|
-
len_movie = float(
|
|
1102
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
1082
1103
|
if movie_prefix is None:
|
|
1083
|
-
movie_prefix =
|
|
1104
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
1084
1105
|
|
|
1085
1106
|
well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
|
|
1086
1107
|
channel_indices = _extract_channel_indices_from_config(config, [target_channel])
|
|
@@ -59,6 +59,22 @@ COL_DTYPES = {
|
|
|
59
59
|
OBJECT_COLUMNS = [col for col, dtype in COL_DTYPES.items() if dtype == object]
|
|
60
60
|
PROP_VALS = set(PROPS.values())
|
|
61
61
|
|
|
62
|
+
_require_intensity_image = (
|
|
63
|
+
'image_intensity',
|
|
64
|
+
'intensity_max',
|
|
65
|
+
'intensity_mean',
|
|
66
|
+
'intensity_median',
|
|
67
|
+
'intensity_min',
|
|
68
|
+
'intensity_std',
|
|
69
|
+
'moments_weighted',
|
|
70
|
+
'moments_weighted_central',
|
|
71
|
+
'centroid_weighted',
|
|
72
|
+
'centroid_weighted_local',
|
|
73
|
+
'moments_weighted_hu',
|
|
74
|
+
'moments_weighted_normalized',
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
62
78
|
class CustomRegionProps(RegionProperties):
|
|
63
79
|
|
|
64
80
|
"""
|
|
@@ -79,11 +95,6 @@ class CustomRegionProps(RegionProperties):
|
|
|
79
95
|
assert len(self.channel_names)==self._intensity_image.shape[-1],'Mismatch between provided channel names and the number of channels in the image...'
|
|
80
96
|
|
|
81
97
|
if attr == "__setstate__":
|
|
82
|
-
# When deserializing this object with pickle, `__setstate__`
|
|
83
|
-
# is accessed before any other attributes like `self._intensity_image`
|
|
84
|
-
# are available which leads to a RecursionError when trying to
|
|
85
|
-
# access them later on in this function. So guard against this by
|
|
86
|
-
# provoking the default AttributeError (gh-6465).
|
|
87
98
|
return self.__getattribute__(attr)
|
|
88
99
|
|
|
89
100
|
if self._intensity_image is None and attr in _require_intensity_image:
|
|
@@ -396,7 +396,7 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
|
|
|
396
396
|
return df_pairs
|
|
397
397
|
|
|
398
398
|
except KeyError:
|
|
399
|
-
print(f"Neighborhood
|
|
399
|
+
print(f"Neighborhood not found in data frame. Measurements for this neighborhood will not be calculated")
|
|
400
400
|
|
|
401
401
|
|
|
402
402
|
def timeline_matching(timeline1, timeline2):
|
|
@@ -531,7 +531,7 @@ def update_effector_table(df_relative, df_effector):
|
|
|
531
531
|
df_effector.loc[df_effector['ID'] == effector, 'group_neighborhood'] = 0
|
|
532
532
|
return df_effector
|
|
533
533
|
|
|
534
|
-
def extract_neighborhoods_from_pickles(pos):
|
|
534
|
+
def extract_neighborhoods_from_pickles(pos, populations=['targets','effectors']):
|
|
535
535
|
|
|
536
536
|
"""
|
|
537
537
|
Extract neighborhood protocols from pickle files located at a given position.
|
|
@@ -573,29 +573,40 @@ def extract_neighborhoods_from_pickles(pos):
|
|
|
573
573
|
|
|
574
574
|
"""
|
|
575
575
|
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
if
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
576
|
+
neighborhood_protocols = []
|
|
577
|
+
|
|
578
|
+
for pop in populations:
|
|
579
|
+
tab_pop = pos + os.sep.join(['output', 'tables', f'trajectories_{pop}.pkl'])
|
|
580
|
+
if os.path.exists(tab_pop):
|
|
581
|
+
df_pop = np.load(tab_pop, allow_pickle=True)
|
|
582
|
+
for column in list(df_pop.columns):
|
|
583
|
+
if column.startswith('neighborhood'):
|
|
584
|
+
neigh_protocol = extract_neighborhood_settings(column, population=pop)
|
|
585
|
+
neighborhood_protocols.append(neigh_protocol)
|
|
586
|
+
|
|
587
|
+
# tab_tc = pos + os.sep.join(['output', 'tables', 'trajectories_targets.pkl'])
|
|
588
|
+
# if os.path.exists(tab_tc):
|
|
589
|
+
# df_targets = np.load(tab_tc, allow_pickle=True)
|
|
590
|
+
# else:
|
|
591
|
+
# df_targets = None
|
|
592
|
+
# if os.path.exists(tab_tc.replace('targets','effectors')):
|
|
593
|
+
# df_effectors = np.load(tab_tc.replace('targets','effectors'), allow_pickle=True)
|
|
594
|
+
# else:
|
|
595
|
+
# df_effectors = None
|
|
596
|
+
|
|
597
|
+
# neighborhood_protocols=[]
|
|
598
|
+
|
|
599
|
+
# if df_targets is not None:
|
|
600
|
+
# for column in list(df_targets.columns):
|
|
601
|
+
# if column.startswith('neighborhood'):
|
|
602
|
+
# neigh_protocol = extract_neighborhood_settings(column, population='targets')
|
|
603
|
+
# neighborhood_protocols.append(neigh_protocol)
|
|
604
|
+
|
|
605
|
+
# if df_effectors is not None:
|
|
606
|
+
# for column in list(df_effectors.columns):
|
|
607
|
+
# if column.startswith('neighborhood'):
|
|
608
|
+
# neigh_protocol = extract_neighborhood_settings(column, population='effectors')
|
|
609
|
+
# neighborhood_protocols.append(neigh_protocol)
|
|
599
610
|
|
|
600
611
|
return neighborhood_protocols
|
|
601
612
|
|
|
@@ -646,10 +657,18 @@ def extract_neighborhood_settings(neigh_string, population='targets'):
|
|
|
646
657
|
"""
|
|
647
658
|
|
|
648
659
|
assert neigh_string.startswith('neighborhood')
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
660
|
+
print(f"{neigh_string=}")
|
|
661
|
+
|
|
662
|
+
if '_(' in neigh_string and ')_' in neigh_string:
|
|
663
|
+
# determine neigh pop from string
|
|
664
|
+
neighbor_population = neigh_string.split('_(')[-1].split(')_')[0].split('-')[-1]
|
|
665
|
+
print(f'{neighbor_population=}')
|
|
666
|
+
else:
|
|
667
|
+
# old method
|
|
668
|
+
if population=='targets':
|
|
669
|
+
neighbor_population = 'effectors'
|
|
670
|
+
elif population=='effectors':
|
|
671
|
+
neighbor_population = 'targets'
|
|
653
672
|
|
|
654
673
|
if 'self' in neigh_string:
|
|
655
674
|
|
|
@@ -716,6 +735,8 @@ def expand_pair_table(data):
|
|
|
716
735
|
assert 'reference_population' in list(data.columns),"Please provide a valid pair table..."
|
|
717
736
|
assert 'neighbor_population' in list(data.columns),"Please provide a valid pair table..."
|
|
718
737
|
|
|
738
|
+
data.__dict__.update(data.astype({'reference_population': str, 'neighbor_population': str}).__dict__)
|
|
739
|
+
|
|
719
740
|
expanded_table = []
|
|
720
741
|
|
|
721
742
|
for neigh, group in data.groupby(['reference_population','neighbor_population']):
|
|
@@ -15,7 +15,7 @@ parser = argparse.ArgumentParser(description="Classify and regress the signals b
|
|
|
15
15
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
16
16
|
parser.add_argument('-p',"--position", required=True, help="Path to the position")
|
|
17
17
|
parser.add_argument('-m',"--model", required=True, help="Path to the model")
|
|
18
|
-
parser.add_argument("--mode", default="target",
|
|
18
|
+
parser.add_argument("--mode", default="target", help="Cell population of interest")
|
|
19
19
|
parser.add_argument("--use_gpu", default="True", choices=["True","False"],help="use GPU")
|
|
20
20
|
|
|
21
21
|
args = parser.parse_args()
|
|
@@ -36,6 +36,9 @@ if mode.lower()=="target" or mode.lower()=="targets":
|
|
|
36
36
|
|
|
37
37
|
elif mode.lower()=="effector" or mode.lower()=="effectors":
|
|
38
38
|
table_name = "trajectories_effectors.csv"
|
|
39
|
+
else:
|
|
40
|
+
table_name = f"trajectories_{mode}.csv"
|
|
41
|
+
|
|
39
42
|
|
|
40
43
|
# Load trajectories, add centroid if not in trajectory
|
|
41
44
|
trajectories = pos+os.sep.join(['output','tables', table_name])
|
|
@@ -6,7 +6,7 @@ import argparse
|
|
|
6
6
|
import os
|
|
7
7
|
import json
|
|
8
8
|
from celldetective.io import auto_load_number_of_frames, load_frames, fix_missing_labels, locate_labels, extract_position_name
|
|
9
|
-
from celldetective.utils import extract_experiment_channels,
|
|
9
|
+
from celldetective.utils import extract_experiment_channels, config_section_to_dict, _get_img_num_per_channel, extract_experiment_channels
|
|
10
10
|
from celldetective.utils import _remove_invalid_cols, remove_redundant_features, remove_trajectory_measurements, _extract_coordinates_from_features
|
|
11
11
|
from celldetective.measure import drop_tonal_features, measure_features, measure_isotropic_intensity, center_of_mass_to_abs_coordinates, measure_radial_distance_to_center
|
|
12
12
|
from pathlib import Path, PurePath
|
|
@@ -55,10 +55,10 @@ print("Configuration file: ",config)
|
|
|
55
55
|
print(f"Population: {mode}...")
|
|
56
56
|
|
|
57
57
|
# from exp config fetch spatial calib, channel names
|
|
58
|
-
movie_prefix =
|
|
59
|
-
spatial_calibration = float(
|
|
60
|
-
time_calibration = float(
|
|
61
|
-
len_movie = float(
|
|
58
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
59
|
+
spatial_calibration = float(config_section_to_dict(config, "MovieSettings")["pxtoum"])
|
|
60
|
+
time_calibration = float(config_section_to_dict(config, "MovieSettings")["frametomin"])
|
|
61
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
62
62
|
channel_names, channel_indices = extract_experiment_channels(expfolder)
|
|
63
63
|
nbr_channels = len(channel_names)
|
|
64
64
|
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import os
|
|
3
3
|
from celldetective.relative_measurements import measure_pair_signals_at_position, extract_neighborhoods_from_pickles
|
|
4
|
-
from celldetective.utils import
|
|
4
|
+
from celldetective.utils import config_section_to_dict, extract_experiment_channels
|
|
5
|
+
from celldetective.io import get_experiment_populations
|
|
5
6
|
|
|
6
7
|
from pathlib import Path, PurePath
|
|
7
8
|
|
|
@@ -30,13 +31,15 @@ assert os.path.exists(config), 'The configuration file for the experiment could
|
|
|
30
31
|
print("Configuration file: ", config)
|
|
31
32
|
|
|
32
33
|
# from exp config fetch spatial calib, channel names
|
|
33
|
-
movie_prefix =
|
|
34
|
-
spatial_calibration = float(
|
|
35
|
-
time_calibration = float(
|
|
36
|
-
len_movie = float(
|
|
34
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
35
|
+
spatial_calibration = float(config_section_to_dict(config, "MovieSettings")["pxtoum"])
|
|
36
|
+
time_calibration = float(config_section_to_dict(config, "MovieSettings")["frametomin"])
|
|
37
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
37
38
|
channel_names, channel_indices = extract_experiment_channels(expfolder)
|
|
38
39
|
nbr_channels = len(channel_names)
|
|
39
40
|
|
|
41
|
+
populations = get_experiment_populations(expfolder, dtype=str)
|
|
42
|
+
|
|
40
43
|
# from tracking instructions, fetch btrack config, features, haralick, clean_traj, idea: fetch custom timeline?
|
|
41
44
|
instr_path = PurePath(expfolder, Path(f"{instruction_file}"))
|
|
42
45
|
previous_pair_table_path = pos + os.sep.join(['output', 'tables', 'trajectories_pairs.csv'])
|
|
@@ -46,7 +49,7 @@ previous_neighborhoods = []
|
|
|
46
49
|
associated_reference_population = []
|
|
47
50
|
|
|
48
51
|
|
|
49
|
-
neighborhoods_to_measure = extract_neighborhoods_from_pickles(pos)
|
|
52
|
+
neighborhoods_to_measure = extract_neighborhoods_from_pickles(pos, populations=populations)
|
|
50
53
|
all_df_pairs = []
|
|
51
54
|
if os.path.exists(previous_pair_table_path):
|
|
52
55
|
df_0 = pd.read_csv(previous_pair_table_path)
|
|
@@ -76,9 +79,14 @@ if len(all_df_pairs)>1:
|
|
|
76
79
|
df_pairs = pd.merge(df_pairs.round(decimals=6), all_df_pairs[i].round(decimals=6), how="outer", on=cols)
|
|
77
80
|
elif len(all_df_pairs)==1:
|
|
78
81
|
df_pairs = all_df_pairs[0]
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
82
|
+
else:
|
|
83
|
+
df_pairs = None
|
|
84
|
+
print('No dataframe could be computed for the pairs...')
|
|
85
|
+
|
|
86
|
+
if df_pairs is not None:
|
|
87
|
+
print('Writing table...')
|
|
88
|
+
if "reference_population" in list(df_pairs.columns) and "neighbor_population" in list(df_pairs.columns):
|
|
89
|
+
df_pairs = df_pairs.sort_values(by=['reference_population', 'neighbor_population', 'REFERENCE_ID', 'NEIGHBOR_ID', 'FRAME'])
|
|
90
|
+
df_pairs.to_csv(previous_pair_table_path, index=False)
|
|
91
|
+
print('Done.')
|
|
92
|
+
|
|
@@ -7,7 +7,7 @@ import datetime
|
|
|
7
7
|
import os
|
|
8
8
|
import json
|
|
9
9
|
from celldetective.io import locate_segmentation_model, auto_load_number_of_frames, extract_position_name, _load_frames_to_segment, _check_label_dims
|
|
10
|
-
from celldetective.utils import _prep_stardist_model, _prep_cellpose_model, _rescale_labels, _segment_image_with_stardist_model,_segment_image_with_cellpose_model,_get_normalize_kwargs_from_config, _estimate_scale_factor, _extract_channel_indices_from_config,
|
|
10
|
+
from celldetective.utils import _prep_stardist_model, _prep_cellpose_model, _rescale_labels, _segment_image_with_stardist_model,_segment_image_with_cellpose_model,_get_normalize_kwargs_from_config, _estimate_scale_factor, _extract_channel_indices_from_config, config_section_to_dict, _extract_nbr_channels_from_config, _get_img_num_per_channel
|
|
11
11
|
from pathlib import Path, PurePath
|
|
12
12
|
from glob import glob
|
|
13
13
|
from shutil import rmtree
|
|
@@ -91,9 +91,9 @@ normalize_kwargs = _get_normalize_kwargs_from_config(input_config)
|
|
|
91
91
|
|
|
92
92
|
model_type = input_config['model_type']
|
|
93
93
|
|
|
94
|
-
movie_prefix =
|
|
95
|
-
spatial_calibration = float(
|
|
96
|
-
len_movie = float(
|
|
94
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
95
|
+
spatial_calibration = float(config_section_to_dict(config, "MovieSettings")["pxtoum"])
|
|
96
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
97
97
|
|
|
98
98
|
# Try to find the file
|
|
99
99
|
try:
|
|
@@ -183,12 +183,6 @@ with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
183
183
|
print("Exception: ", e)
|
|
184
184
|
|
|
185
185
|
print('Done.')
|
|
186
|
-
|
|
187
|
-
try:
|
|
188
|
-
del model
|
|
189
|
-
except:
|
|
190
|
-
pass
|
|
191
|
-
|
|
192
186
|
gc.collect()
|
|
193
187
|
|
|
194
188
|
|
|
@@ -7,7 +7,7 @@ import os
|
|
|
7
7
|
import json
|
|
8
8
|
from celldetective.io import auto_load_number_of_frames, load_frames, extract_position_name
|
|
9
9
|
from celldetective.segmentation import segment_frame_from_thresholds
|
|
10
|
-
from celldetective.utils import _extract_channel_indices_from_config,
|
|
10
|
+
from celldetective.utils import _extract_channel_indices_from_config, config_section_to_dict, _extract_nbr_channels_from_config, _get_img_num_per_channel, extract_experiment_channels
|
|
11
11
|
from pathlib import Path, PurePath
|
|
12
12
|
from glob import glob
|
|
13
13
|
from shutil import rmtree
|
|
@@ -70,8 +70,8 @@ print(f'Required channels: {required_channels} located at channel indices {chann
|
|
|
70
70
|
|
|
71
71
|
threshold_instructions.update({'target_channel': channel_indices[0]})
|
|
72
72
|
|
|
73
|
-
movie_prefix =
|
|
74
|
-
len_movie = float(
|
|
73
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
74
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
75
75
|
channel_names, channel_indices = extract_experiment_channels(expfolder)
|
|
76
76
|
threshold_instructions.update({'channel_names': channel_names})
|
|
77
77
|
|
|
@@ -6,8 +6,10 @@ import argparse
|
|
|
6
6
|
import datetime
|
|
7
7
|
import os
|
|
8
8
|
import json
|
|
9
|
-
from celldetective.io import auto_load_number_of_frames, interpret_tracking_configuration,
|
|
10
|
-
|
|
9
|
+
from celldetective.io import _load_frames_to_measure, auto_load_number_of_frames, interpret_tracking_configuration, \
|
|
10
|
+
extract_position_name, \
|
|
11
|
+
locate_labels
|
|
12
|
+
from celldetective.utils import _mask_intensity_measurements, extract_experiment_channels, config_section_to_dict, _get_img_num_per_channel, extract_experiment_channels
|
|
11
13
|
from celldetective.measure import drop_tonal_features, measure_features
|
|
12
14
|
from celldetective.tracking import track
|
|
13
15
|
from pathlib import Path, PurePath
|
|
@@ -65,12 +67,12 @@ print("Configuration file: ",config)
|
|
|
65
67
|
print(f"Population: {mode}...")
|
|
66
68
|
|
|
67
69
|
# from exp config fetch spatial calib, channel names
|
|
68
|
-
movie_prefix =
|
|
69
|
-
spatial_calibration = float(
|
|
70
|
-
time_calibration = float(
|
|
71
|
-
len_movie = float(
|
|
72
|
-
shape_x = int(
|
|
73
|
-
shape_y = int(
|
|
70
|
+
movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
|
|
71
|
+
spatial_calibration = float(config_section_to_dict(config, "MovieSettings")["pxtoum"])
|
|
72
|
+
time_calibration = float(config_section_to_dict(config, "MovieSettings")["frametomin"])
|
|
73
|
+
len_movie = float(config_section_to_dict(config, "MovieSettings")["len_movie"])
|
|
74
|
+
shape_x = int(config_section_to_dict(config, "MovieSettings")["shape_x"])
|
|
75
|
+
shape_y = int(config_section_to_dict(config, "MovieSettings")["shape_y"])
|
|
74
76
|
|
|
75
77
|
channel_names, channel_indices = extract_experiment_channels(expfolder)
|
|
76
78
|
nbr_channels = len(channel_names)
|
|
@@ -124,15 +124,18 @@ if model_type=='cellpose':
|
|
|
124
124
|
device = torch.device("cpu")
|
|
125
125
|
else:
|
|
126
126
|
print('Using GPU for training...')
|
|
127
|
-
|
|
127
|
+
|
|
128
|
+
diam_mean = 30.0
|
|
128
129
|
logger, log_file = logger_setup()
|
|
129
130
|
print(f'Pretrained model: ',pretrained)
|
|
130
131
|
if pretrained is not None:
|
|
132
|
+
if pretrained.endswith('CP_nuclei'):
|
|
133
|
+
diam_mean = 17.0
|
|
131
134
|
pretrained_path = os.sep.join([pretrained,os.path.split(pretrained)[-1]])
|
|
132
135
|
else:
|
|
133
136
|
pretrained_path = pretrained
|
|
134
137
|
|
|
135
|
-
model = CellposeModel(gpu=use_gpu, model_type=None, pretrained_model=pretrained_path, diam_mean=
|
|
138
|
+
model = CellposeModel(gpu=use_gpu, model_type=None, pretrained_model=pretrained_path, diam_mean=diam_mean, nchan=X_aug[0].shape[0],)
|
|
136
139
|
model.train(train_data=X_aug, train_labels=Y_aug, normalize=False, channels=None, batch_size=batch_size,
|
|
137
140
|
min_train_masks=1,save_path=target_directory+os.sep+model_name,n_epochs=epochs, model_name=model_name, learning_rate=learning_rate, test_data = X_val, test_labels=Y_val)
|
|
138
141
|
|
|
@@ -152,7 +155,7 @@ if model_type=='cellpose':
|
|
|
152
155
|
config_inputs = {"channels": target_channels, "diameter": standard_diameter, 'cellprob_threshold': 0., 'flow_threshold': 0.4,
|
|
153
156
|
'normalization_percentile': normalization_percentile, 'normalization_clip': normalization_clip,
|
|
154
157
|
'normalization_values': normalization_values, 'model_type': 'cellpose',
|
|
155
|
-
'spatial_calibration': input_spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
|
|
158
|
+
'spatial_calibration': input_spatial_calibration, 'cell_size_um': round(diameter*input_spatial_calibration,4), 'dataset': {'train': files_train, 'validation': files_val}}
|
|
156
159
|
json_input_config = json.dumps(config_inputs, indent=4)
|
|
157
160
|
with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile:
|
|
158
161
|
outfile.write(json_input_config)
|
|
@@ -227,9 +230,18 @@ elif model_type=='stardist':
|
|
|
227
230
|
|
|
228
231
|
config_inputs = {"channels": target_channels, 'normalization_percentile': normalization_percentile,
|
|
229
232
|
'normalization_clip': normalization_clip, 'normalization_values': normalization_values,
|
|
230
|
-
'model_type': 'stardist', 'spatial_calibration': spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
|
|
231
|
-
|
|
232
|
-
|
|
233
|
+
'model_type': 'stardist', 'spatial_calibration': spatial_calibration,'cell_size_um': median_size * spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
|
|
234
|
+
|
|
235
|
+
def make_json_safe(obj):
|
|
236
|
+
if isinstance(obj, np.ndarray):
|
|
237
|
+
return obj.tolist() # convert to list
|
|
238
|
+
if isinstance(obj, (np.int64, np.int32)):
|
|
239
|
+
return int(obj)
|
|
240
|
+
if isinstance(obj, (np.float32, np.float64)):
|
|
241
|
+
return float(obj)
|
|
242
|
+
return str(obj) # fallback
|
|
243
|
+
|
|
244
|
+
json_input_config = json.dumps(config_inputs, indent=4, default=make_json_safe)
|
|
233
245
|
with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile:
|
|
234
246
|
outfile.write(json_input_config)
|
|
235
247
|
|