celldetective 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/__main__.py +12 -5
- celldetective/events.py +28 -2
- celldetective/gui/about.py +0 -1
- celldetective/gui/analyze_block.py +3 -18
- celldetective/gui/btrack_options.py +126 -21
- celldetective/gui/classifier_widget.py +68 -107
- celldetective/gui/configure_new_exp.py +37 -4
- celldetective/gui/control_panel.py +14 -30
- celldetective/gui/generic_signal_plot.py +793 -0
- celldetective/gui/gui_utils.py +401 -226
- celldetective/gui/json_readers.py +0 -2
- celldetective/gui/layouts.py +269 -25
- celldetective/gui/measurement_options.py +14 -23
- celldetective/gui/neighborhood_options.py +6 -16
- celldetective/gui/plot_measurements.py +10 -23
- celldetective/gui/plot_signals_ui.py +53 -687
- celldetective/gui/process_block.py +320 -186
- celldetective/gui/retrain_segmentation_model_options.py +30 -47
- celldetective/gui/retrain_signal_model_options.py +5 -14
- celldetective/gui/seg_model_loader.py +129 -113
- celldetective/gui/signal_annotator.py +93 -103
- celldetective/gui/signal_annotator2.py +9 -13
- celldetective/gui/styles.py +32 -0
- celldetective/gui/survival_ui.py +49 -712
- celldetective/gui/tableUI.py +4 -39
- celldetective/gui/thresholds_gui.py +38 -11
- celldetective/gui/viewers.py +6 -7
- celldetective/io.py +62 -84
- celldetective/measure.py +374 -15
- celldetective/models/segmentation_effectors/ricm-bimodal/config_input.json +130 -0
- celldetective/models/segmentation_effectors/ricm-bimodal/ricm-bimodal +0 -0
- celldetective/models/segmentation_effectors/ricm-bimodal/training_instructions.json +37 -0
- celldetective/neighborhood.py +3 -7
- celldetective/preprocessing.py +2 -4
- celldetective/relative_measurements.py +0 -3
- celldetective/scripts/analyze_signals.py +0 -1
- celldetective/scripts/measure_cells.py +1 -3
- celldetective/scripts/measure_relative.py +1 -2
- celldetective/scripts/segment_cells.py +16 -12
- celldetective/scripts/segment_cells_thresholds.py +17 -10
- celldetective/scripts/track_cells.py +18 -18
- celldetective/scripts/train_segmentation_model.py +1 -2
- celldetective/scripts/train_signal_model.py +0 -3
- celldetective/segmentation.py +1 -1
- celldetective/signals.py +20 -8
- celldetective/tracking.py +2 -1
- celldetective/utils.py +126 -18
- {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/METADATA +19 -12
- celldetective-1.2.2.dist-info/RECORD +92 -0
- {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/WHEEL +1 -1
- celldetective-1.2.0.dist-info/RECORD +0 -88
- {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/LICENSE +0 -0
- {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/entry_points.txt +0 -0
- {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/top_level.txt +0 -0
|
@@ -6,16 +6,14 @@ import argparse
|
|
|
6
6
|
import os
|
|
7
7
|
import json
|
|
8
8
|
from celldetective.io import auto_load_number_of_frames, load_frames
|
|
9
|
-
from celldetective.utils import extract_experiment_channels,
|
|
9
|
+
from celldetective.utils import extract_experiment_channels, ConfigSectionMap, _get_img_num_per_channel, extract_experiment_channels
|
|
10
10
|
from celldetective.utils import remove_redundant_features, remove_trajectory_measurements
|
|
11
11
|
from celldetective.measure import drop_tonal_features, measure_features, measure_isotropic_intensity
|
|
12
12
|
from pathlib import Path, PurePath
|
|
13
13
|
from glob import glob
|
|
14
|
-
from shutil import rmtree
|
|
15
14
|
from tqdm import tqdm
|
|
16
15
|
import numpy as np
|
|
17
16
|
import pandas as pd
|
|
18
|
-
import gc
|
|
19
17
|
from natsort import natsorted
|
|
20
18
|
from art import tprint
|
|
21
19
|
from tifffile import imread
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import os
|
|
3
|
-
import
|
|
4
|
-
from celldetective.relative_measurements import measure_pair_signals_at_position, update_effector_table, extract_neighborhoods_from_pickles
|
|
3
|
+
from celldetective.relative_measurements import measure_pair_signals_at_position, extract_neighborhoods_from_pickles
|
|
5
4
|
from celldetective.utils import ConfigSectionMap, extract_experiment_channels
|
|
6
5
|
|
|
7
6
|
from pathlib import Path, PurePath
|
|
@@ -9,7 +9,7 @@ import json
|
|
|
9
9
|
from stardist.models import StarDist2D
|
|
10
10
|
from cellpose.models import CellposeModel
|
|
11
11
|
from celldetective.io import locate_segmentation_model, auto_load_number_of_frames, load_frames
|
|
12
|
-
from celldetective.utils import interpolate_nan, _estimate_scale_factor, _extract_channel_indices_from_config,
|
|
12
|
+
from celldetective.utils import interpolate_nan, _estimate_scale_factor, _extract_channel_indices_from_config, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel
|
|
13
13
|
from pathlib import Path, PurePath
|
|
14
14
|
from glob import glob
|
|
15
15
|
from shutil import rmtree
|
|
@@ -20,10 +20,7 @@ from csbdeep.io import save_tiff_imagej_compatible
|
|
|
20
20
|
import gc
|
|
21
21
|
from art import tprint
|
|
22
22
|
from scipy.ndimage import zoom
|
|
23
|
-
import threading
|
|
24
23
|
|
|
25
|
-
import matplotlib.pyplot as plt
|
|
26
|
-
import time
|
|
27
24
|
|
|
28
25
|
tprint("Segment")
|
|
29
26
|
|
|
@@ -210,17 +207,24 @@ def segment_index(indices):
|
|
|
210
207
|
del Y_pred;
|
|
211
208
|
gc.collect()
|
|
212
209
|
|
|
210
|
+
|
|
211
|
+
import concurrent.futures
|
|
212
|
+
|
|
213
213
|
# Multithreading
|
|
214
214
|
indices = list(range(img_num_channels.shape[1]))
|
|
215
215
|
chunks = np.array_split(indices, n_threads)
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
216
|
+
|
|
217
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
218
|
+
executor.map(segment_index, chunks)
|
|
219
|
+
|
|
220
|
+
# threads = []
|
|
221
|
+
# for i in range(n_threads):
|
|
222
|
+
# thread_i = threading.Thread(target=segment_index, args=[chunks[i]])
|
|
223
|
+
# threads.append(thread_i)
|
|
224
|
+
# for th in threads:
|
|
225
|
+
# th.start()
|
|
226
|
+
# for th in threads:
|
|
227
|
+
# th.join()
|
|
224
228
|
|
|
225
229
|
print('Done.')
|
|
226
230
|
|
|
@@ -7,7 +7,7 @@ import os
|
|
|
7
7
|
import json
|
|
8
8
|
from celldetective.io import auto_load_number_of_frames, load_frames
|
|
9
9
|
from celldetective.segmentation import segment_frame_from_thresholds
|
|
10
|
-
from celldetective.utils import
|
|
10
|
+
from celldetective.utils import _extract_channel_indices_from_config, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, extract_experiment_channels
|
|
11
11
|
from pathlib import Path, PurePath
|
|
12
12
|
from glob import glob
|
|
13
13
|
from shutil import rmtree
|
|
@@ -16,7 +16,6 @@ import numpy as np
|
|
|
16
16
|
from csbdeep.io import save_tiff_imagej_compatible
|
|
17
17
|
import gc
|
|
18
18
|
from art import tprint
|
|
19
|
-
import threading
|
|
20
19
|
|
|
21
20
|
tprint("Segment")
|
|
22
21
|
|
|
@@ -120,17 +119,25 @@ def segment_index(indices):
|
|
|
120
119
|
del mask;
|
|
121
120
|
gc.collect()
|
|
122
121
|
|
|
122
|
+
import concurrent.futures
|
|
123
|
+
|
|
123
124
|
# Multithreading
|
|
124
125
|
indices = list(range(img_num_channels.shape[1]))
|
|
125
126
|
chunks = np.array_split(indices, n_threads)
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
127
|
+
|
|
128
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
129
|
+
executor.map(segment_index, chunks)
|
|
130
|
+
|
|
131
|
+
# indices = list(range(img_num_channels.shape[1]))
|
|
132
|
+
# chunks = np.array_split(indices, n_threads)
|
|
133
|
+
# threads = []
|
|
134
|
+
# for i in range(n_threads):
|
|
135
|
+
# thread_i = threading.Thread(target=segment_index, args=[chunks[i]])
|
|
136
|
+
# threads.append(thread_i)
|
|
137
|
+
# for th in threads:
|
|
138
|
+
# th.start()
|
|
139
|
+
# for th in threads:
|
|
140
|
+
# th.join()
|
|
134
141
|
|
|
135
142
|
print('Done.')
|
|
136
143
|
gc.collect()
|
|
@@ -7,12 +7,11 @@ import datetime
|
|
|
7
7
|
import os
|
|
8
8
|
import json
|
|
9
9
|
from celldetective.io import auto_load_number_of_frames, load_frames, interpret_tracking_configuration
|
|
10
|
-
from celldetective.utils import extract_experiment_channels,
|
|
10
|
+
from celldetective.utils import extract_experiment_channels, ConfigSectionMap, _get_img_num_per_channel, extract_experiment_channels
|
|
11
11
|
from celldetective.measure import drop_tonal_features, measure_features
|
|
12
12
|
from celldetective.tracking import track
|
|
13
13
|
from pathlib import Path, PurePath
|
|
14
14
|
from glob import glob
|
|
15
|
-
from shutil import rmtree
|
|
16
15
|
from tqdm import tqdm
|
|
17
16
|
import numpy as np
|
|
18
17
|
import pandas as pd
|
|
@@ -21,7 +20,6 @@ import os
|
|
|
21
20
|
from natsort import natsorted
|
|
22
21
|
from art import tprint
|
|
23
22
|
from tifffile import imread
|
|
24
|
-
import threading
|
|
25
23
|
|
|
26
24
|
tprint("Track")
|
|
27
25
|
|
|
@@ -60,7 +58,6 @@ parent1 = Path(pos).parent
|
|
|
60
58
|
expfolder = parent1.parent
|
|
61
59
|
config = PurePath(expfolder,Path("config.ini"))
|
|
62
60
|
assert os.path.exists(config),'The configuration file for the experiment could not be located. Abort.'
|
|
63
|
-
print("Configuration file: ",config)
|
|
64
61
|
|
|
65
62
|
# from exp config fetch spatial calib, channel names
|
|
66
63
|
movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
|
|
@@ -76,10 +73,9 @@ nbr_channels = len(channel_names)
|
|
|
76
73
|
# from tracking instructions, fetch btrack config, features, haralick, clean_traj, idea: fetch custom timeline?
|
|
77
74
|
instr_path = PurePath(expfolder,Path(f"{instruction_file}"))
|
|
78
75
|
if os.path.exists(instr_path):
|
|
79
|
-
print(f"Tracking instructions for the {mode} population
|
|
76
|
+
print(f"Tracking instructions for the {mode} population have been successfully loaded...")
|
|
80
77
|
with open(instr_path, 'r') as f:
|
|
81
78
|
instructions = json.load(f)
|
|
82
|
-
print("Reading the following instructions: ",instructions)
|
|
83
79
|
btrack_config = interpret_tracking_configuration(instructions['btrack_config_path'])
|
|
84
80
|
|
|
85
81
|
if 'features' in instructions:
|
|
@@ -102,7 +98,7 @@ if os.path.exists(instr_path):
|
|
|
102
98
|
else:
|
|
103
99
|
post_processing_options = None
|
|
104
100
|
else:
|
|
105
|
-
print('
|
|
101
|
+
print('Tracking instructions could not be located... Using a standard bTrack motion model instead...')
|
|
106
102
|
btrack_config = interpret_tracking_configuration(None)
|
|
107
103
|
features = None
|
|
108
104
|
mask_channels = None
|
|
@@ -117,7 +113,7 @@ label_path = natsorted(glob(pos+f"{label_folder}"+os.sep+"*.tif"))
|
|
|
117
113
|
if len(label_path)>0:
|
|
118
114
|
print(f"Found {len(label_path)} segmented frames...")
|
|
119
115
|
else:
|
|
120
|
-
print(f"No segmented frames have been found. Please run segmentation first
|
|
116
|
+
print(f"No segmented frames have been found. Please run segmentation first. Abort...")
|
|
121
117
|
os.abort()
|
|
122
118
|
|
|
123
119
|
# Do this if features or Haralick is not None, else don't need stack
|
|
@@ -168,14 +164,20 @@ def measure_index(indices):
|
|
|
168
164
|
# Multithreading
|
|
169
165
|
indices = list(range(img_num_channels.shape[1]))
|
|
170
166
|
chunks = np.array_split(indices, n_threads)
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
167
|
+
|
|
168
|
+
import concurrent.futures
|
|
169
|
+
|
|
170
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
171
|
+
executor.map(measure_index, chunks)
|
|
172
|
+
|
|
173
|
+
# threads = []
|
|
174
|
+
# for i in range(n_threads):
|
|
175
|
+
# thread_i = threading.Thread(target=measure_index, args=[chunks[i]])
|
|
176
|
+
# threads.append(thread_i)
|
|
177
|
+
# for th in threads:
|
|
178
|
+
# th.start()
|
|
179
|
+
# for th in threads:
|
|
180
|
+
# th.join()
|
|
179
181
|
|
|
180
182
|
df = pd.concat(timestep_dataframes)
|
|
181
183
|
df.reset_index(inplace=True, drop=True)
|
|
@@ -202,8 +204,6 @@ trajectories, napari_data = track(None,
|
|
|
202
204
|
clean_trajectories_kwargs=post_processing_options,
|
|
203
205
|
volume=(shape_x, shape_y),
|
|
204
206
|
)
|
|
205
|
-
print(trajectories)
|
|
206
|
-
print(trajectories.columns)
|
|
207
207
|
|
|
208
208
|
# out trajectory table, create POSITION_X_um, POSITION_Y_um, TIME_min (new ones)
|
|
209
209
|
# Save napari data
|
|
@@ -11,11 +11,10 @@ from tqdm import tqdm
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
import random
|
|
13
13
|
|
|
14
|
-
from celldetective.utils import load_image_dataset,
|
|
14
|
+
from celldetective.utils import load_image_dataset, augmenter, interpolate_nan
|
|
15
15
|
from celldetective.io import normalize_multichannel
|
|
16
16
|
from stardist import fill_label_holes
|
|
17
17
|
from art import tprint
|
|
18
|
-
import matplotlib.pyplot as plt
|
|
19
18
|
from distutils.dir_util import copy_tree
|
|
20
19
|
from csbdeep.utils import save_json
|
|
21
20
|
|
|
@@ -5,11 +5,8 @@ Copright © 2023 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
|
|
|
5
5
|
import argparse
|
|
6
6
|
import os
|
|
7
7
|
import json
|
|
8
|
-
from pathlib import Path, PurePath
|
|
9
8
|
from glob import glob
|
|
10
|
-
from tqdm import tqdm
|
|
11
9
|
import numpy as np
|
|
12
|
-
import gc
|
|
13
10
|
from art import tprint
|
|
14
11
|
from celldetective.signals import SignalDetectionModel
|
|
15
12
|
from celldetective.io import locate_signal_model
|
celldetective/segmentation.py
CHANGED
|
@@ -3,7 +3,7 @@ Segmentation module
|
|
|
3
3
|
"""
|
|
4
4
|
import json
|
|
5
5
|
import os
|
|
6
|
-
from .io import locate_segmentation_model,
|
|
6
|
+
from .io import locate_segmentation_model, normalize_multichannel
|
|
7
7
|
from .utils import _estimate_scale_factor, _extract_channel_indices
|
|
8
8
|
from pathlib import Path
|
|
9
9
|
from tqdm import tqdm
|
celldetective/signals.py
CHANGED
|
@@ -9,7 +9,7 @@ from tensorflow.keras.losses import CategoricalCrossentropy, MeanSquaredError, M
|
|
|
9
9
|
from tensorflow.keras.metrics import Precision, Recall, MeanIoU
|
|
10
10
|
from tensorflow.keras.models import load_model,clone_model
|
|
11
11
|
from tensorflow.config.experimental import list_physical_devices, set_memory_growth
|
|
12
|
-
from tensorflow.keras.utils import to_categorical
|
|
12
|
+
from tensorflow.keras.utils import to_categorical
|
|
13
13
|
from tensorflow.keras import Input, Model
|
|
14
14
|
from tensorflow.keras.layers import Conv1D, BatchNormalization, Dense, Activation, Add, MaxPooling1D, Dropout, GlobalAveragePooling1D, Concatenate, ZeroPadding1D, Flatten
|
|
15
15
|
from tensorflow.keras.callbacks import Callback
|
|
@@ -18,13 +18,12 @@ from sklearn.metrics import jaccard_score, balanced_accuracy_score, precision_sc
|
|
|
18
18
|
from scipy.interpolate import interp1d
|
|
19
19
|
from scipy.ndimage import shift
|
|
20
20
|
|
|
21
|
-
from celldetective.io import
|
|
21
|
+
from celldetective.io import locate_signal_model, get_position_pickle, get_position_table
|
|
22
22
|
from celldetective.tracking import clean_trajectories, interpolate_nan_properties
|
|
23
23
|
from celldetective.utils import regression_plot, train_test_split, compute_weights
|
|
24
24
|
import matplotlib.pyplot as plt
|
|
25
25
|
from natsort import natsorted
|
|
26
26
|
from glob import glob
|
|
27
|
-
import shutil
|
|
28
27
|
import random
|
|
29
28
|
from celldetective.utils import color_from_status, color_from_class
|
|
30
29
|
from math import floor, ceil
|
|
@@ -155,6 +154,7 @@ def analyze_signals(trajectories, model, interpolate_na=True,
|
|
|
155
154
|
f = open(model_config_path)
|
|
156
155
|
config = json.load(f)
|
|
157
156
|
required_signals = config["channels"]
|
|
157
|
+
model_signal_length = config['model_signal_length']
|
|
158
158
|
|
|
159
159
|
try:
|
|
160
160
|
label = config['label']
|
|
@@ -190,6 +190,8 @@ def analyze_signals(trajectories, model, interpolate_na=True,
|
|
|
190
190
|
trajectories_clean = clean_trajectories(trajectories, interpolate_na=interpolate_na, interpolate_position_gaps=interpolate_na, column_labels=column_labels)
|
|
191
191
|
|
|
192
192
|
max_signal_size = int(trajectories_clean[column_labels['time']].max()) + 2
|
|
193
|
+
assert max_signal_size <= model_signal_length,f'The current signals are longer ({max_signal_size}) than the maximum expected input ({model_signal_length}) for this signal analysis model. Abort...'
|
|
194
|
+
|
|
193
195
|
tracks = trajectories_clean[column_labels['track']].unique()
|
|
194
196
|
signals = np.zeros((len(tracks),max_signal_size, len(selected_signals)))
|
|
195
197
|
|
|
@@ -3071,7 +3073,7 @@ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], retu
|
|
|
3071
3073
|
3. Tracks with missing or NaN values in the specified signal are ignored during calculation.
|
|
3072
3074
|
4. Tracks are aligned based on their 'FRAME' values and the specified `time_col` (if provided).
|
|
3073
3075
|
"""
|
|
3074
|
-
|
|
3076
|
+
|
|
3075
3077
|
assert signal_name in list(df.columns),"The signal you want to plot is not one of the measured features."
|
|
3076
3078
|
if isinstance(class_value,int):
|
|
3077
3079
|
class_value = [class_value]
|
|
@@ -3080,6 +3082,11 @@ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], retu
|
|
|
3080
3082
|
max_duration = ceil(np.amax(df.groupby(['position','TRACK_ID']).size().values))
|
|
3081
3083
|
else:
|
|
3082
3084
|
max_duration = forced_max_duration
|
|
3085
|
+
|
|
3086
|
+
abs_time = False
|
|
3087
|
+
if isinstance(time_col, (int,float)):
|
|
3088
|
+
abs_time = True
|
|
3089
|
+
|
|
3083
3090
|
n_tracks = len(df.groupby(['position','TRACK_ID']))
|
|
3084
3091
|
signal_matrix = np.zeros((n_tracks,int(max_duration)*2 + 1))
|
|
3085
3092
|
signal_matrix[:,:] = np.nan
|
|
@@ -3091,11 +3098,16 @@ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], retu
|
|
|
3091
3098
|
cclass = track_group[class_col].to_numpy()[0]
|
|
3092
3099
|
if cclass != 0:
|
|
3093
3100
|
ref_time = 0
|
|
3101
|
+
if abs_time:
|
|
3102
|
+
ref_time = time_col
|
|
3094
3103
|
else:
|
|
3095
|
-
|
|
3096
|
-
|
|
3097
|
-
|
|
3098
|
-
|
|
3104
|
+
if not abs_time:
|
|
3105
|
+
try:
|
|
3106
|
+
ref_time = floor(track_group[time_col].to_numpy()[0])
|
|
3107
|
+
except:
|
|
3108
|
+
continue
|
|
3109
|
+
else:
|
|
3110
|
+
ref_time = time_col
|
|
3099
3111
|
if conflict_mode=='mean':
|
|
3100
3112
|
signal = track_group.groupby('FRAME')[signal_name].mean().to_numpy()
|
|
3101
3113
|
elif conflict_mode=='first':
|
celldetective/tracking.py
CHANGED
|
@@ -10,7 +10,6 @@ from celldetective.measure import measure_features
|
|
|
10
10
|
from celldetective.utils import rename_intensity_column, velocity_per_track
|
|
11
11
|
from celldetective.io import view_on_napari_btrack, interpret_tracking_configuration
|
|
12
12
|
|
|
13
|
-
from btrack.datasets import cell_config
|
|
14
13
|
import os
|
|
15
14
|
import subprocess
|
|
16
15
|
|
|
@@ -153,6 +152,8 @@ def track(labels, configuration=None, stack=None, spatial_calibration=1, feature
|
|
|
153
152
|
df_temp = pd.DataFrame(x_scaled, columns=columns, index = df.index)
|
|
154
153
|
df[columns] = df_temp
|
|
155
154
|
|
|
155
|
+
# set dummy features to NaN
|
|
156
|
+
df.loc[df['dummy'],['class_id']+columns] = np.nan
|
|
156
157
|
df = df.sort_values(by=[column_labels['track'],column_labels['time']])
|
|
157
158
|
df = velocity_per_track(df, window_size=3, mode='bi')
|
|
158
159
|
|
celldetective/utils.py
CHANGED
|
@@ -17,7 +17,6 @@ import json
|
|
|
17
17
|
from csbdeep.utils import normalize_mi_ma
|
|
18
18
|
from glob import glob
|
|
19
19
|
from urllib.request import urlopen
|
|
20
|
-
from urllib.parse import urlparse
|
|
21
20
|
import zipfile
|
|
22
21
|
from tqdm import tqdm
|
|
23
22
|
import shutil
|
|
@@ -1289,16 +1288,27 @@ def _extract_channel_indices_from_config(config, channels_to_extract):
|
|
|
1289
1288
|
# V2
|
|
1290
1289
|
channels = []
|
|
1291
1290
|
for c in channels_to_extract:
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
print(f"Error {e}. The channel required by the model is not available in your data... Check the configuration file.")
|
|
1298
|
-
channels = None
|
|
1299
|
-
break
|
|
1300
|
-
else:
|
|
1291
|
+
try:
|
|
1292
|
+
c1 = int(ConfigSectionMap(config,"Channels")[c])
|
|
1293
|
+
channels.append(c1)
|
|
1294
|
+
except Exception as e:
|
|
1295
|
+
print(f"Warning... The channel {c} required by the model is not available in your data...")
|
|
1301
1296
|
channels.append(None)
|
|
1297
|
+
if np.all([c is None for c in channels]):
|
|
1298
|
+
channels = None
|
|
1299
|
+
|
|
1300
|
+
# channels = []
|
|
1301
|
+
# for c in channels_to_extract:
|
|
1302
|
+
# if c!='None' and c is not None:
|
|
1303
|
+
# try:
|
|
1304
|
+
# c1 = int(ConfigSectionMap(config,"Channels")[c])
|
|
1305
|
+
# channels.append(c1)
|
|
1306
|
+
# except Exception as e:
|
|
1307
|
+
# print(f"Error {e}. The channel required by the model is not available in your data... Check the configuration file.")
|
|
1308
|
+
# channels = None
|
|
1309
|
+
# break
|
|
1310
|
+
# else:
|
|
1311
|
+
# channels.append(None)
|
|
1302
1312
|
|
|
1303
1313
|
# LEGACY
|
|
1304
1314
|
if channels is None:
|
|
@@ -2164,6 +2174,8 @@ def load_image_dataset(datasets, channels, train_spatial_calibration=None, mask_
|
|
|
2164
2174
|
- Spatial calibration adjustment involves rescaling the images and masks to match the `train_spatial_calibration`.
|
|
2165
2175
|
- Only images with a corresponding mask and a valid configuration file specifying channel indices and
|
|
2166
2176
|
spatial calibration are loaded.
|
|
2177
|
+
- The image samples must have at least one channel in common with the required channels to be accepted. The missing
|
|
2178
|
+
channels are passed as black frames.
|
|
2167
2179
|
|
|
2168
2180
|
Examples
|
|
2169
2181
|
--------
|
|
@@ -2202,24 +2214,29 @@ def load_image_dataset(datasets, channels, train_spatial_calibration=None, mask_
|
|
|
2202
2214
|
# Load config
|
|
2203
2215
|
with open(config_path, 'r') as f:
|
|
2204
2216
|
config = json.load(f)
|
|
2205
|
-
|
|
2217
|
+
|
|
2218
|
+
existing_channels = config['channels']
|
|
2219
|
+
intersection = list(set(list(channels)) & set(list(existing_channels)))
|
|
2220
|
+
print(f'{existing_channels=} {intersection=}')
|
|
2221
|
+
if len(intersection)==0:
|
|
2222
|
+
print(e,' channels could not be found in the config... Skipping image.')
|
|
2223
|
+
continue
|
|
2224
|
+
else:
|
|
2206
2225
|
ch_idx = []
|
|
2207
2226
|
for c in channels:
|
|
2208
|
-
if c
|
|
2209
|
-
idx =
|
|
2227
|
+
if c in existing_channels:
|
|
2228
|
+
idx = existing_channels.index(c)
|
|
2210
2229
|
ch_idx.append(idx)
|
|
2211
2230
|
else:
|
|
2231
|
+
# For None or missing channel pass black frame
|
|
2212
2232
|
ch_idx.append(np.nan)
|
|
2213
2233
|
im_calib = config['spatial_calibration']
|
|
2214
|
-
except Exception as e:
|
|
2215
|
-
print(e,' channels and/or spatial calibration could not be found in the config... Skipping image.')
|
|
2216
|
-
continue
|
|
2217
2234
|
|
|
2218
2235
|
ch_idx = np.array(ch_idx)
|
|
2219
2236
|
ch_idx_safe = np.copy(ch_idx)
|
|
2220
2237
|
ch_idx_safe[ch_idx_safe!=ch_idx_safe] = 0
|
|
2221
2238
|
ch_idx_safe = ch_idx_safe.astype(int)
|
|
2222
|
-
|
|
2239
|
+
|
|
2223
2240
|
image = image[ch_idx_safe]
|
|
2224
2241
|
image[np.where(ch_idx!=ch_idx)[0],:,:] = 0
|
|
2225
2242
|
|
|
@@ -2233,6 +2250,14 @@ def load_image_dataset(datasets, channels, train_spatial_calibration=None, mask_
|
|
|
2233
2250
|
|
|
2234
2251
|
X.append(image)
|
|
2235
2252
|
Y.append(mask)
|
|
2253
|
+
|
|
2254
|
+
# fig,ax = plt.subplots(1,image.shape[-1]+1)
|
|
2255
|
+
# for k in range(image.shape[-1]):
|
|
2256
|
+
# ax[k].imshow(image[:,:,k],cmap='gray')
|
|
2257
|
+
# ax[image.shape[-1]].imshow(mask)
|
|
2258
|
+
# plt.pause(1)
|
|
2259
|
+
# plt.close()
|
|
2260
|
+
|
|
2236
2261
|
files.append(im)
|
|
2237
2262
|
|
|
2238
2263
|
assert len(X)==len(Y),'The number of images does not match with the number of masks... Abort.'
|
|
@@ -2367,4 +2392,87 @@ def interpolate_nan(img, method='nearest'):
|
|
|
2367
2392
|
interp_grid = griddata(points, values, (x_grid, y_grid), method=method)
|
|
2368
2393
|
return interp_grid
|
|
2369
2394
|
else:
|
|
2370
|
-
return img
|
|
2395
|
+
return img
|
|
2396
|
+
|
|
2397
|
+
def collapse_trajectories_by_status(df, status=None, projection='mean', population='effectors', groupby_columns=['position','TRACK_ID']):
|
|
2398
|
+
|
|
2399
|
+
static_columns = ['well_index', 'well_name', 'pos_name', 'position', 'well', 'status', 't0', 'class','cell_type','concentration', 'antibody', 'pharmaceutical_agent','TRACK_ID','position', 'neighbor_population', 'reference_population', 'NEIGHBOR_ID', 'REFERENCE_ID', 'FRAME']
|
|
2400
|
+
|
|
2401
|
+
if status is None or status not in list(df.columns):
|
|
2402
|
+
print('invalid status selection...')
|
|
2403
|
+
return None
|
|
2404
|
+
|
|
2405
|
+
df = df.dropna(subset=status,ignore_index=True)
|
|
2406
|
+
unique_statuses = np.unique(df[status].to_numpy())
|
|
2407
|
+
|
|
2408
|
+
df_sections = []
|
|
2409
|
+
for s in unique_statuses:
|
|
2410
|
+
subtab = df.loc[df[status]==s,:]
|
|
2411
|
+
op = getattr(subtab.groupby(groupby_columns), projection)
|
|
2412
|
+
subtab_projected = op(subtab.groupby(groupby_columns))
|
|
2413
|
+
frame_duration = subtab.groupby(groupby_columns).size().to_numpy()
|
|
2414
|
+
for c in static_columns:
|
|
2415
|
+
try:
|
|
2416
|
+
subtab_projected[c] = subtab.groupby(groupby_columns)[c].apply(lambda x: x.unique()[0])
|
|
2417
|
+
except Exception as e:
|
|
2418
|
+
print(e)
|
|
2419
|
+
pass
|
|
2420
|
+
subtab_projected['duration_in_state'] = frame_duration
|
|
2421
|
+
df_sections.append(subtab_projected)
|
|
2422
|
+
|
|
2423
|
+
group_table = pd.concat(df_sections,axis=0,ignore_index=True)
|
|
2424
|
+
if population=='pairs':
|
|
2425
|
+
for col in ['duration_in_state',status, 'neighbor_population', 'reference_population', 'NEIGHBOR_ID', 'REFERENCE_ID']:
|
|
2426
|
+
first_column = group_table.pop(col)
|
|
2427
|
+
group_table.insert(0, col, first_column)
|
|
2428
|
+
else:
|
|
2429
|
+
for col in ['duration_in_state',status,'TRACK_ID']:
|
|
2430
|
+
first_column = group_table.pop(col)
|
|
2431
|
+
group_table.insert(0, col, first_column)
|
|
2432
|
+
|
|
2433
|
+
group_table.pop('FRAME')
|
|
2434
|
+
group_table = group_table.sort_values(by=groupby_columns + [status],ignore_index=True)
|
|
2435
|
+
group_table = group_table.reset_index(drop=True)
|
|
2436
|
+
|
|
2437
|
+
return group_table
|
|
2438
|
+
|
|
2439
|
+
def step_function(t, t_shift, dt):
|
|
2440
|
+
|
|
2441
|
+
"""
|
|
2442
|
+
Computes a step function using the logistic sigmoid function.
|
|
2443
|
+
|
|
2444
|
+
This function calculates the value of a sigmoid function, which is often used to model
|
|
2445
|
+
a step change or transition. The sigmoid function is defined as:
|
|
2446
|
+
|
|
2447
|
+
.. math::
|
|
2448
|
+
f(t) = \\frac{1}{1 + \\exp{\\left( -\\frac{t - t_{shift}}{dt} \\right)}}
|
|
2449
|
+
|
|
2450
|
+
where `t` is the input variable, `t_shift` is the point of the transition, and `dt` controls
|
|
2451
|
+
the steepness of the transition.
|
|
2452
|
+
|
|
2453
|
+
Parameters
|
|
2454
|
+
----------
|
|
2455
|
+
t : array_like
|
|
2456
|
+
The input values for which the step function will be computed.
|
|
2457
|
+
t_shift : float
|
|
2458
|
+
The point in the `t` domain where the transition occurs.
|
|
2459
|
+
dt : float
|
|
2460
|
+
The parameter that controls the steepness of the transition. Smaller values make the
|
|
2461
|
+
transition steeper, while larger values make it smoother.
|
|
2462
|
+
|
|
2463
|
+
Returns
|
|
2464
|
+
-------
|
|
2465
|
+
array_like
|
|
2466
|
+
The computed values of the step function for each value in `t`.
|
|
2467
|
+
|
|
2468
|
+
Examples
|
|
2469
|
+
--------
|
|
2470
|
+
>>> import numpy as np
|
|
2471
|
+
>>> t = np.array([0, 1, 2, 3, 4, 5])
|
|
2472
|
+
>>> t_shift = 2
|
|
2473
|
+
>>> dt = 1
|
|
2474
|
+
>>> step_function(t, t_shift, dt)
|
|
2475
|
+
array([0.26894142, 0.37754067, 0.5 , 0.62245933, 0.73105858, 0.81757448])
|
|
2476
|
+
"""
|
|
2477
|
+
|
|
2478
|
+
return 1/(1+np.exp(-(t-t_shift)/dt))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: celldetective
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.2
|
|
4
4
|
Summary: description
|
|
5
5
|
Home-page: http://github.com/remyeltorro/celldetective
|
|
6
6
|
Author: Rémy Torro
|
|
@@ -12,14 +12,14 @@ Requires-Dist: wheel
|
|
|
12
12
|
Requires-Dist: nbsphinx
|
|
13
13
|
Requires-Dist: nbsphinx-link
|
|
14
14
|
Requires-Dist: sphinx-rtd-theme
|
|
15
|
-
Requires-Dist: sphinx
|
|
16
|
-
Requires-Dist: jinja2
|
|
15
|
+
Requires-Dist: sphinx
|
|
16
|
+
Requires-Dist: jinja2
|
|
17
17
|
Requires-Dist: ipykernel
|
|
18
18
|
Requires-Dist: stardist
|
|
19
19
|
Requires-Dist: cellpose<3
|
|
20
20
|
Requires-Dist: scikit-learn
|
|
21
21
|
Requires-Dist: btrack
|
|
22
|
-
Requires-Dist: tensorflow
|
|
22
|
+
Requires-Dist: tensorflow~=2.15.0
|
|
23
23
|
Requires-Dist: napari
|
|
24
24
|
Requires-Dist: tqdm
|
|
25
25
|
Requires-Dist: mahotas
|
|
@@ -32,9 +32,10 @@ Requires-Dist: seaborn
|
|
|
32
32
|
Requires-Dist: opencv-python-headless==4.7.0.72
|
|
33
33
|
Requires-Dist: liblapack
|
|
34
34
|
Requires-Dist: gputools
|
|
35
|
-
Requires-Dist: lmfit
|
|
36
|
-
Requires-Dist: superqt[cmap]
|
|
35
|
+
Requires-Dist: lmfit
|
|
36
|
+
Requires-Dist: superqt[cmap]
|
|
37
37
|
Requires-Dist: matplotlib-scalebar
|
|
38
|
+
Requires-Dist: numpy==1.26.4
|
|
38
39
|
|
|
39
40
|
# Celldetective
|
|
40
41
|
|
|
@@ -165,23 +166,25 @@ steps.
|
|
|
165
166
|
|
|
166
167
|
To use the software, you must install python, *e.g.* through
|
|
167
168
|
[Anaconda](https://www.anaconda.com/download). We developed and tested
|
|
168
|
-
the software in Python 3.9.
|
|
169
|
+
the software in Python 3.9 and more recently 3.11.
|
|
169
170
|
|
|
170
171
|
# Installation
|
|
171
172
|
|
|
172
173
|
## Stable release
|
|
173
174
|
|
|
174
|
-
Celldetective
|
|
175
|
+
Celldetective requires a version of Python between 3.9 and 3.11 (included). If your Python version is older or more recent, consider using `conda` to create an environment as described below.
|
|
176
|
+
|
|
177
|
+
With the proper Python version, Celldetective can be directly installed with `pip`:
|
|
175
178
|
|
|
176
179
|
``` bash
|
|
177
180
|
pip install celldetective
|
|
178
181
|
```
|
|
179
182
|
|
|
180
|
-
We recommend that you create an environment to use Celldetective, *e.g.*
|
|
183
|
+
We recommend that you create an environment to use Celldetective, to protect your package versions and fix the Python version *e.g.*
|
|
181
184
|
with `conda`:
|
|
182
185
|
|
|
183
186
|
``` bash
|
|
184
|
-
conda create -n celldetective python=3.
|
|
187
|
+
conda create -n celldetective python=3.11 pyqt
|
|
185
188
|
conda activate celldetective
|
|
186
189
|
pip install celldetective
|
|
187
190
|
```
|
|
@@ -207,6 +210,10 @@ will be immediately available in the python environment:
|
|
|
207
210
|
git clone git://github.com/remyeltorro/celldetective.git
|
|
208
211
|
cd celldetective
|
|
209
212
|
|
|
213
|
+
# optional: create an environment
|
|
214
|
+
conda create -n celldetective python=3.11 pyqt
|
|
215
|
+
conda activate celldetective
|
|
216
|
+
|
|
210
217
|
# install the celldetective package in editable/development mode
|
|
211
218
|
pip install -r requirements.txt
|
|
212
219
|
pip install -e .
|
|
@@ -229,10 +236,10 @@ with package requirements for other projects. Run the following lines to
|
|
|
229
236
|
create an environment named \"celldetective\":
|
|
230
237
|
|
|
231
238
|
``` bash
|
|
232
|
-
conda create -n celldetective python=3.
|
|
239
|
+
conda create -n celldetective python=3.11 pyqt
|
|
233
240
|
conda activate celldetective
|
|
234
241
|
pip install -r requirements.txt
|
|
235
|
-
pip install .
|
|
242
|
+
pip install -e .
|
|
236
243
|
```
|
|
237
244
|
|
|
238
245
|
The installation of the dependencies will take a few minutes (up to half
|