celldetective 1.1.0__py3-none-any.whl → 1.1.1.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/__main__.py +5 -19
- celldetective/extra_properties.py +63 -53
- celldetective/filters.py +39 -11
- celldetective/gui/classifier_widget.py +56 -7
- celldetective/gui/control_panel.py +5 -0
- celldetective/gui/layouts.py +3 -2
- celldetective/gui/measurement_options.py +13 -109
- celldetective/gui/plot_signals_ui.py +1 -0
- celldetective/gui/process_block.py +1 -1
- celldetective/gui/survival_ui.py +7 -1
- celldetective/gui/tableUI.py +294 -28
- celldetective/gui/thresholds_gui.py +51 -10
- celldetective/gui/viewers.py +169 -22
- celldetective/io.py +41 -17
- celldetective/measure.py +13 -238
- celldetective/models/segmentation_effectors/primNK_cfse/config_input.json +29 -0
- celldetective/models/segmentation_effectors/primNK_cfse/cp-cfse-transfer +0 -0
- celldetective/models/segmentation_effectors/primNK_cfse/training_instructions.json +37 -0
- celldetective/neighborhood.py +4 -1
- celldetective/preprocessing.py +483 -143
- celldetective/scripts/segment_cells.py +26 -7
- celldetective/scripts/train_segmentation_model.py +35 -34
- celldetective/segmentation.py +29 -20
- celldetective/signals.py +13 -231
- celldetective/tracking.py +2 -1
- celldetective/utils.py +440 -26
- {celldetective-1.1.0.dist-info → celldetective-1.1.1.post1.dist-info}/METADATA +1 -1
- {celldetective-1.1.0.dist-info → celldetective-1.1.1.post1.dist-info}/RECORD +34 -30
- {celldetective-1.1.0.dist-info → celldetective-1.1.1.post1.dist-info}/WHEEL +1 -1
- tests/test_preprocessing.py +37 -0
- tests/test_utils.py +48 -1
- {celldetective-1.1.0.dist-info → celldetective-1.1.1.post1.dist-info}/LICENSE +0 -0
- {celldetective-1.1.0.dist-info → celldetective-1.1.1.post1.dist-info}/entry_points.txt +0 -0
- {celldetective-1.1.0.dist-info → celldetective-1.1.1.post1.dist-info}/top_level.txt +0 -0
celldetective/__main__.py
CHANGED
|
@@ -54,6 +54,11 @@ class AppInitWindow(QMainWindow):
|
|
|
54
54
|
self.setCentralWidget(central_widget)
|
|
55
55
|
self.reload_previous_gpu_threads()
|
|
56
56
|
self.show()
|
|
57
|
+
|
|
58
|
+
def closeEvent(self, event):
|
|
59
|
+
QApplication.closeAllWindows()
|
|
60
|
+
event.accept()
|
|
61
|
+
gc.collect()
|
|
57
62
|
|
|
58
63
|
def create_locate_exp_hbox(self):
|
|
59
64
|
|
|
@@ -365,25 +370,6 @@ class AppInitWindow(QMainWindow):
|
|
|
365
370
|
if returnValue == QMessageBox.Ok:
|
|
366
371
|
self.experiment_path_selection.setText('')
|
|
367
372
|
return None
|
|
368
|
-
|
|
369
|
-
def closeEvent(self, event):
|
|
370
|
-
|
|
371
|
-
"""
|
|
372
|
-
Close child windows if closed.
|
|
373
|
-
"""
|
|
374
|
-
|
|
375
|
-
try:
|
|
376
|
-
if self.control_panel:
|
|
377
|
-
self.control_panel.close()
|
|
378
|
-
except:
|
|
379
|
-
pass
|
|
380
|
-
try:
|
|
381
|
-
if self.new_exp_window:
|
|
382
|
-
self.new_exp_window.close()
|
|
383
|
-
except:
|
|
384
|
-
pass
|
|
385
|
-
|
|
386
|
-
gc.collect()
|
|
387
373
|
|
|
388
374
|
if __name__ == "__main__":
|
|
389
375
|
|
|
@@ -7,7 +7,7 @@ If intensity is in function name, it will be replaced by the name of the channel
|
|
|
7
7
|
import warnings
|
|
8
8
|
|
|
9
9
|
import numpy as np
|
|
10
|
-
from scipy.ndimage import distance_transform_edt
|
|
10
|
+
from scipy.ndimage import distance_transform_edt, center_of_mass
|
|
11
11
|
from scipy.spatial.distance import euclidean
|
|
12
12
|
|
|
13
13
|
|
|
@@ -34,67 +34,77 @@ def intensity_percentile_twenty_five(regionmask, intensity_image):
|
|
|
34
34
|
# STD
|
|
35
35
|
|
|
36
36
|
def intensity_std(regionmask, intensity_image):
|
|
37
|
-
|
|
37
|
+
return np.nanstd(intensity_image[regionmask])
|
|
38
38
|
|
|
39
39
|
|
|
40
40
|
def intensity_median(regionmask, intensity_image):
|
|
41
|
-
|
|
41
|
+
return np.nanmedian(intensity_image[regionmask])
|
|
42
42
|
|
|
43
|
+
def intensity_nanmean(regionmask, intensity_image):
|
|
44
|
+
return np.nanmean(intensity_image[regionmask])
|
|
43
45
|
|
|
44
46
|
def intensity_centre_of_mass_displacement(regionmask, intensity_image):
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
47
|
+
y, x = np.mgrid[:regionmask.shape[0], :regionmask.shape[1]]
|
|
48
|
+
xtemp = x.copy()
|
|
49
|
+
ytemp = y.copy()
|
|
50
|
+
intensity_weighted_center = center_of_mass(intensity_image, regionmask)
|
|
51
|
+
centroid_x = intensity_weighted_center[1]
|
|
52
|
+
centroid_y = intensity_weighted_center[0]
|
|
53
|
+
|
|
54
|
+
geometric_centroid_x = np.sum(xtemp * regionmask) / np.sum(regionmask)
|
|
55
|
+
geometric_centroid_y = np.sum(ytemp * regionmask) / np.sum(regionmask)
|
|
56
|
+
distance = euclidean(np.array((geometric_centroid_y, geometric_centroid_x)), np.array((centroid_y, centroid_x)))
|
|
57
|
+
delta_x = geometric_centroid_x - centroid_x
|
|
58
|
+
delta_y = geometric_centroid_y - centroid_y
|
|
59
|
+
direction_arctan = np.arctan2(delta_y, delta_x) * 180 / np.pi
|
|
60
|
+
if direction_arctan < 0:
|
|
61
|
+
direction_arctan += 360
|
|
62
|
+
return distance, direction_arctan
|
|
59
63
|
|
|
60
64
|
def intensity_radial_gradient(regionmask, intensity_image):
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
65
|
+
warnings.filterwarnings('ignore', message="Polyfit may be poorly conditioned")
|
|
66
|
+
cell_mask = regionmask.copy()
|
|
67
|
+
intensity = intensity_image.copy()
|
|
68
|
+
y = intensity[cell_mask].flatten()
|
|
69
|
+
x = distance_transform_edt(cell_mask)
|
|
70
|
+
x = x[cell_mask].flatten()
|
|
71
|
+
params = np.polyfit(x, y, 1)
|
|
72
|
+
line = np.poly1d(params)
|
|
69
73
|
|
|
70
|
-
|
|
74
|
+
return line.coefficients[0], line.coefficients[1]
|
|
71
75
|
|
|
72
76
|
|
|
73
77
|
def intensity_centre_of_mass_displacement_edge(regionmask, intensity_image):
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
78
|
+
edt = distance_transform_edt(regionmask)
|
|
79
|
+
min_distance = 0
|
|
80
|
+
max_distance = 0.1*edt.max()
|
|
81
|
+
thresholded = (edt <= max_distance) * (edt > min_distance)
|
|
82
|
+
edge_mask = np.copy(regionmask)
|
|
83
|
+
edge_mask[np.where(thresholded == 0)] = 0
|
|
84
|
+
y, x = np.mgrid[:edge_mask.shape[0], :edge_mask.shape[1]]
|
|
85
|
+
xtemp = x.copy()
|
|
86
|
+
ytemp = y.copy()
|
|
87
|
+
intensity_edge = intensity_image.copy()
|
|
88
|
+
intensity_edge[np.where(edge_mask == 0)] = 0.
|
|
89
|
+
sum_intensity_edge = np.sum(intensity_edge)
|
|
90
|
+
sum_regionmask = np.sum(regionmask)
|
|
91
|
+
|
|
92
|
+
if sum_intensity_edge != 0 and sum_regionmask != 0:
|
|
93
|
+
y, x = np.mgrid[:regionmask.shape[0], :regionmask.shape[1]]
|
|
94
|
+
xtemp = x.copy()
|
|
95
|
+
ytemp = y.copy()
|
|
96
|
+
intensity_weighted_center = center_of_mass(intensity_image, regionmask)
|
|
97
|
+
centroid_x = intensity_weighted_center[1]
|
|
98
|
+
centroid_y = intensity_weighted_center[0]
|
|
99
|
+
|
|
100
|
+
geometric_centroid_x = np.sum(xtemp * regionmask) / np.sum(regionmask)
|
|
101
|
+
geometric_centroid_y = np.sum(ytemp * regionmask) / np.sum(regionmask)
|
|
102
|
+
distance = euclidean(np.array((geometric_centroid_y, geometric_centroid_x)), np.array((centroid_y, centroid_x)))
|
|
103
|
+
delta_x = geometric_centroid_x - centroid_x
|
|
104
|
+
delta_y = geometric_centroid_y - centroid_y
|
|
105
|
+
direction_arctan = np.arctan2(delta_y, delta_x) * 180 / np.pi
|
|
106
|
+
if direction_arctan < 0:
|
|
107
|
+
direction_arctan += 360
|
|
108
|
+
return distance, direction_arctan
|
|
109
|
+
else:
|
|
110
|
+
return np.nan, np.nan
|
celldetective/filters.py
CHANGED
|
@@ -1,21 +1,36 @@
|
|
|
1
1
|
from skimage.filters import difference_of_gaussians, threshold_otsu, threshold_local, threshold_niblack, threshold_sauvola
|
|
2
|
+
from celldetective.utils import interpolate_nan
|
|
2
3
|
import scipy.ndimage as snd
|
|
3
4
|
import numpy as np
|
|
4
5
|
|
|
5
|
-
def gauss_filter(img, sigma, *kwargs):
|
|
6
|
+
def gauss_filter(img, sigma, interpolate=True, *kwargs):
|
|
7
|
+
if interpolate:
|
|
8
|
+
img = interpolate_nan(img.astype(float))
|
|
6
9
|
return snd.gaussian_filter(img.astype(float), sigma, *kwargs)
|
|
7
10
|
|
|
8
|
-
def median_filter(img, size, *kwargs):
|
|
11
|
+
def median_filter(img, size, interpolate=True, *kwargs):
|
|
12
|
+
if interpolate:
|
|
13
|
+
img = interpolate_nan(img.astype(float))
|
|
14
|
+
|
|
9
15
|
size = int(size)
|
|
10
16
|
return snd.median_filter(img, size, *kwargs)
|
|
11
17
|
|
|
12
|
-
def maximum_filter(img, size, *kwargs):
|
|
18
|
+
def maximum_filter(img, size, interpolate=True, *kwargs):
|
|
19
|
+
if interpolate:
|
|
20
|
+
img = interpolate_nan(img.astype(float))
|
|
21
|
+
|
|
13
22
|
return snd.maximum_filter(img.astype(float), size, *kwargs)
|
|
14
23
|
|
|
15
|
-
def minimum_filter(img, size, *kwargs):
|
|
24
|
+
def minimum_filter(img, size, interpolate=True, *kwargs):
|
|
25
|
+
if interpolate:
|
|
26
|
+
img = interpolate_nan(img.astype(float))
|
|
27
|
+
|
|
16
28
|
return snd.minimum_filter(img.astype(float), size, *kwargs)
|
|
17
29
|
|
|
18
|
-
def percentile_filter(img, percentile, size, *kwargs):
|
|
30
|
+
def percentile_filter(img, percentile, size, interpolate=True, *kwargs):
|
|
31
|
+
if interpolate:
|
|
32
|
+
img = interpolate_nan(img.astype(float))
|
|
33
|
+
|
|
19
34
|
return snd.percentile_filter(img.astype(float), percentile, size, *kwargs)
|
|
20
35
|
|
|
21
36
|
def subtract_filter(img, value, *kwargs):
|
|
@@ -24,14 +39,19 @@ def subtract_filter(img, value, *kwargs):
|
|
|
24
39
|
def abs_filter(img, *kwargs):
|
|
25
40
|
return np.abs(img)
|
|
26
41
|
|
|
27
|
-
def ln_filter(img, *kwargs):
|
|
42
|
+
def ln_filter(img, interpolate=True, *kwargs):
|
|
43
|
+
if interpolate:
|
|
44
|
+
img = interpolate_nan(img.astype(float))
|
|
28
45
|
|
|
29
46
|
img[np.where(img>0.)] = np.log(img[np.where(img>0.)])
|
|
30
47
|
img[np.where(img<=0.)] = 0.
|
|
31
48
|
|
|
32
49
|
return img
|
|
33
50
|
|
|
34
|
-
def variance_filter(img, size):
|
|
51
|
+
def variance_filter(img, size, interpolate=True):
|
|
52
|
+
|
|
53
|
+
if interpolate:
|
|
54
|
+
img = interpolate_nan(img.astype(float))
|
|
35
55
|
|
|
36
56
|
size = int(size)
|
|
37
57
|
img = img.astype(float)
|
|
@@ -41,8 +61,10 @@ def variance_filter(img, size):
|
|
|
41
61
|
|
|
42
62
|
return img
|
|
43
63
|
|
|
44
|
-
def std_filter(img, size):
|
|
64
|
+
def std_filter(img, size, interpolate=True):
|
|
45
65
|
|
|
66
|
+
if interpolate:
|
|
67
|
+
img = interpolate_nan(img.astype(float))
|
|
46
68
|
size = int(size)
|
|
47
69
|
img = img.astype(float)
|
|
48
70
|
win_mean = snd.uniform_filter(img, (size,size), mode='wrap')
|
|
@@ -53,10 +75,14 @@ def std_filter(img, size):
|
|
|
53
75
|
|
|
54
76
|
return img
|
|
55
77
|
|
|
56
|
-
def laplace_filter(img, output=float, *kwargs):
|
|
78
|
+
def laplace_filter(img, output=float, interpolate=True, *kwargs):
|
|
79
|
+
if interpolate:
|
|
80
|
+
img = interpolate_nan(img.astype(float))
|
|
57
81
|
return snd.laplace(img.astype(float), *kwargs)
|
|
58
82
|
|
|
59
|
-
def dog_filter(img, sigma_low, sigma_high, *kwargs):
|
|
83
|
+
def dog_filter(img, sigma_low, sigma_high, interpolate=True, *kwargs):
|
|
84
|
+
if interpolate:
|
|
85
|
+
img = interpolate_nan(img.astype(float))
|
|
60
86
|
return difference_of_gaussians(img.astype(float), sigma_low, sigma_high, *kwargs)
|
|
61
87
|
|
|
62
88
|
def otsu_filter(img, *kwargs):
|
|
@@ -82,7 +108,9 @@ def sauvola_filter(img, *kwargs):
|
|
|
82
108
|
def log_filter(img, sigma, *kwargs):
|
|
83
109
|
return snd.gaussian_laplace(img.astype(float), sigma, *kwargs)
|
|
84
110
|
|
|
85
|
-
def tophat_filter(img, size, connectivity=4, *kwargs):
|
|
111
|
+
def tophat_filter(img, size, connectivity=4, interpolate=True, *kwargs):
|
|
112
|
+
if interpolate:
|
|
113
|
+
img = interpolate_nan(img.astype(float))
|
|
86
114
|
structure = snd.generate_binary_structure(rank=2, connectivity=connectivity)
|
|
87
115
|
img = snd.white_tophat(img.astype(float), structure=structure, size=size, *kwargs)
|
|
88
116
|
return img
|
|
@@ -136,9 +136,10 @@ class ClassifierWidget(QWidget, Styles):
|
|
|
136
136
|
|
|
137
137
|
self.irreversible_event_btn = QRadioButton('irreversible event')
|
|
138
138
|
self.unique_state_btn = QRadioButton('unique state')
|
|
139
|
-
self.time_corr_options = [self.irreversible_event_btn, self.unique_state_btn]
|
|
140
139
|
time_corr_btn_group = QButtonGroup()
|
|
141
140
|
self.unique_state_btn.click()
|
|
141
|
+
self.time_corr_options = [self.irreversible_event_btn, self.unique_state_btn]
|
|
142
|
+
|
|
142
143
|
for btn in self.time_corr_options:
|
|
143
144
|
time_corr_btn_group.addButton(btn)
|
|
144
145
|
btn.setEnabled(False)
|
|
@@ -148,8 +149,28 @@ class ClassifierWidget(QWidget, Styles):
|
|
|
148
149
|
time_corr_layout.addWidget(self.irreversible_event_btn, 50,alignment=Qt.AlignCenter)
|
|
149
150
|
layout.addLayout(time_corr_layout)
|
|
150
151
|
|
|
152
|
+
self.r2_slider = QLabeledDoubleSlider()
|
|
153
|
+
self.r2_slider.setValue(0.75)
|
|
154
|
+
self.r2_slider.setRange(0,1)
|
|
155
|
+
self.r2_slider.setSingleStep(0.01)
|
|
156
|
+
self.r2_slider.setOrientation(1)
|
|
157
|
+
self.r2_label = QLabel('R2 tolerance:')
|
|
158
|
+
self.r2_label.setToolTip('Minimum R2 between the fit sigmoid and the binary response to the filters to accept the event.')
|
|
159
|
+
r2_threshold_layout = QHBoxLayout()
|
|
160
|
+
r2_threshold_layout.addWidget(QLabel(''), 50)
|
|
161
|
+
r2_threshold_layout.addWidget(self.r2_label, 15)
|
|
162
|
+
r2_threshold_layout.addWidget(self.r2_slider, 35)
|
|
163
|
+
layout.addLayout(r2_threshold_layout)
|
|
164
|
+
|
|
165
|
+
self.irreversible_event_btn.clicked.connect(self.activate_r2)
|
|
166
|
+
self.unique_state_btn.clicked.connect(self.activate_r2)
|
|
167
|
+
|
|
168
|
+
for wg in [self.r2_slider, self.r2_label]:
|
|
169
|
+
wg.setEnabled(False)
|
|
170
|
+
|
|
151
171
|
layout.addWidget(QLabel())
|
|
152
172
|
|
|
173
|
+
|
|
153
174
|
self.submit_btn = QPushButton('apply')
|
|
154
175
|
self.submit_btn.setStyleSheet(self.button_style_sheet)
|
|
155
176
|
self.submit_btn.clicked.connect(self.submit_classification)
|
|
@@ -158,14 +179,30 @@ class ClassifierWidget(QWidget, Styles):
|
|
|
158
179
|
self.frame_slider.valueChanged.connect(self.set_frame)
|
|
159
180
|
self.alpha_slider.valueChanged.connect(self.set_transparency)
|
|
160
181
|
|
|
182
|
+
def activate_r2(self):
|
|
183
|
+
if self.irreversible_event_btn.isChecked() and self.time_corr.isChecked():
|
|
184
|
+
for wg in [self.r2_slider, self.r2_label]:
|
|
185
|
+
wg.setEnabled(True)
|
|
186
|
+
else:
|
|
187
|
+
for wg in [self.r2_slider, self.r2_label]:
|
|
188
|
+
wg.setEnabled(False)
|
|
189
|
+
|
|
161
190
|
def activate_time_corr_options(self):
|
|
162
191
|
|
|
163
192
|
if self.time_corr.isChecked():
|
|
164
193
|
for btn in self.time_corr_options:
|
|
165
194
|
btn.setEnabled(True)
|
|
195
|
+
if self.irreversible_event_btn.isChecked():
|
|
196
|
+
for wg in [self.r2_slider, self.r2_label]:
|
|
197
|
+
wg.setEnabled(True)
|
|
198
|
+
else:
|
|
199
|
+
for wg in [self.r2_slider, self.r2_label]:
|
|
200
|
+
wg.setEnabled(False)
|
|
166
201
|
else:
|
|
167
202
|
for btn in self.time_corr_options:
|
|
168
203
|
btn.setEnabled(False)
|
|
204
|
+
for wg in [self.r2_slider, self.r2_label]:
|
|
205
|
+
wg.setEnabled(False)
|
|
169
206
|
|
|
170
207
|
def init_class(self):
|
|
171
208
|
|
|
@@ -207,8 +244,20 @@ class ClassifierWidget(QWidget, Styles):
|
|
|
207
244
|
self.scat_props.set_alpha(self.currentAlpha)
|
|
208
245
|
self.ax_props.set_xlabel(self.features_cb[1].currentText())
|
|
209
246
|
self.ax_props.set_ylabel(self.features_cb[0].currentText())
|
|
210
|
-
|
|
211
|
-
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
feat_x = self.features_cb[1].currentText()
|
|
250
|
+
feat_y = self.features_cb[0].currentText()
|
|
251
|
+
min_x = self.df.dropna(subset=feat_x)[feat_x].min()
|
|
252
|
+
max_x = self.df.dropna(subset=feat_x)[feat_x].max()
|
|
253
|
+
min_y = self.df.dropna(subset=feat_y)[feat_y].min()
|
|
254
|
+
max_y = self.df.dropna(subset=feat_y)[feat_y].max()
|
|
255
|
+
|
|
256
|
+
if min_x==min_x and max_x==max_x:
|
|
257
|
+
self.ax_props.set_xlim(min_x, max_x)
|
|
258
|
+
if min_y==min_y and max_y==max_y:
|
|
259
|
+
self.ax_props.set_ylim(min_y, max_y)
|
|
260
|
+
|
|
212
261
|
if feature_changed:
|
|
213
262
|
self.propscanvas.canvas.toolbar.update()
|
|
214
263
|
self.propscanvas.canvas.draw_idle()
|
|
@@ -400,15 +449,16 @@ class ClassifierWidget(QWidget, Styles):
|
|
|
400
449
|
timeline = group['FRAME'].values
|
|
401
450
|
|
|
402
451
|
try:
|
|
403
|
-
popt, pcov = curve_fit(step_function, timeline, status_signal,p0=[self.df['FRAME'].max()//2, 0.
|
|
404
|
-
|
|
452
|
+
popt, pcov = curve_fit(step_function, timeline.astype(int), status_signal, p0=[self.df['FRAME'].max()//2, 0.8],maxfev=30000)
|
|
453
|
+
values = [step_function(t, *popt) for t in timeline]
|
|
454
|
+
r2 = r2_score(status_signal,values)
|
|
405
455
|
except Exception as e:
|
|
406
456
|
print(e)
|
|
407
457
|
self.df.loc[indices, self.class_name_user] = 2.0
|
|
408
458
|
self.df.loc[indices, self.class_name_user.replace('class','t')] = -1
|
|
409
459
|
continue
|
|
410
460
|
|
|
411
|
-
if r2 >
|
|
461
|
+
if r2 > float(self.r2_slider.value()):
|
|
412
462
|
t0 = popt[0]
|
|
413
463
|
self.df.loc[indices, self.class_name_user.replace('class','t')] = t0
|
|
414
464
|
self.df.loc[indices, self.class_name_user] = 0.0
|
|
@@ -426,4 +476,3 @@ class ClassifierWidget(QWidget, Styles):
|
|
|
426
476
|
|
|
427
477
|
|
|
428
478
|
|
|
429
|
-
|
celldetective/gui/layouts.py
CHANGED
|
@@ -192,6 +192,7 @@ class BackgroundFitCorrectionLayout(QGridLayout, Styles):
|
|
|
192
192
|
clip = clip,
|
|
193
193
|
export= False,
|
|
194
194
|
return_stacks=True,
|
|
195
|
+
activation_protocol=[['gauss',2],['std',4]],
|
|
195
196
|
show_progress_per_well = True,
|
|
196
197
|
show_progress_per_pos = False,
|
|
197
198
|
)
|
|
@@ -701,12 +702,12 @@ class BackgroundModelFreeCorrectionLayout(QGridLayout, Styles):
|
|
|
701
702
|
show_progress_per_pos = False,
|
|
702
703
|
)
|
|
703
704
|
|
|
704
|
-
|
|
705
705
|
self.viewer = StackVisualizer(
|
|
706
706
|
stack=corrected_stacks[0],
|
|
707
707
|
window_title='Corrected channel',
|
|
708
708
|
frame_slider = True,
|
|
709
|
-
contrast_slider = True
|
|
709
|
+
contrast_slider = True,
|
|
710
|
+
target_channel=self.channels_cb.currentIndex(),
|
|
710
711
|
)
|
|
711
712
|
self.viewer.show()
|
|
712
713
|
|
|
@@ -21,8 +21,7 @@ from fonticon_mdi6 import MDI6
|
|
|
21
21
|
from celldetective.gui.thresholds_gui import ThresholdNormalisation, ThresholdSpot
|
|
22
22
|
from celldetective.utils import extract_experiment_channels, get_software_location
|
|
23
23
|
from celldetective.io import interpret_tracking_configuration, load_frames, auto_load_number_of_frames
|
|
24
|
-
from celldetective.measure import compute_haralick_features, contour_of_instance_segmentation,
|
|
25
|
-
field_normalisation, normalise_by_cell
|
|
24
|
+
from celldetective.measure import compute_haralick_features, contour_of_instance_segmentation, normalise_by_cell
|
|
26
25
|
import numpy as np
|
|
27
26
|
from tifffile import imread
|
|
28
27
|
import json
|
|
@@ -736,6 +735,17 @@ class ConfigMeasurements(QMainWindow, Styles):
|
|
|
736
735
|
return None
|
|
737
736
|
else:
|
|
738
737
|
self.current_stack = movies[0]
|
|
738
|
+
self.stack_length = auto_load_number_of_frames(self.current_stack)
|
|
739
|
+
|
|
740
|
+
if self.stack_length is None:
|
|
741
|
+
stack = imread(self.current_stack)
|
|
742
|
+
self.stack_length = len(stack)
|
|
743
|
+
del stack
|
|
744
|
+
gc.collect()
|
|
745
|
+
|
|
746
|
+
self.mid_time = self.stack_length // 2
|
|
747
|
+
indices = self.mid_time + np.arange(len(self.channel_names))
|
|
748
|
+
self.test_frame = load_frames(list(indices.astype(int)),self.current_stack, normalize_input=False)
|
|
739
749
|
|
|
740
750
|
|
|
741
751
|
def control_haralick_digitalization(self):
|
|
@@ -824,7 +834,7 @@ class ConfigMeasurements(QMainWindow, Styles):
|
|
|
824
834
|
Load the first mask of the detected movie.
|
|
825
835
|
"""
|
|
826
836
|
|
|
827
|
-
labels_path = str(Path(self.
|
|
837
|
+
labels_path = str(Path(self.current_stack).parent.parent) + os.sep+f'labels_{self.mode}'+os.sep
|
|
828
838
|
masks = natsorted(glob(labels_path + '*.tif'))
|
|
829
839
|
if len(masks) == 0:
|
|
830
840
|
print('no mask found')
|
|
@@ -859,47 +869,6 @@ class ConfigMeasurements(QMainWindow, Styles):
|
|
|
859
869
|
self.im_mask.set_alpha(value)
|
|
860
870
|
self.fig_contour.canvas.draw_idle()
|
|
861
871
|
|
|
862
|
-
# def populate_normalisation_tabs(self):
|
|
863
|
-
|
|
864
|
-
# """
|
|
865
|
-
# Multi-tab options to perform background correction before measurements.
|
|
866
|
-
# """
|
|
867
|
-
|
|
868
|
-
# layout = QVBoxLayout(self.normalisation_frame)
|
|
869
|
-
|
|
870
|
-
# self.normalisation_lbl = QLabel("BACKGROUND CORRECTION")
|
|
871
|
-
# self.normalisation_lbl.setStyleSheet("""
|
|
872
|
-
# font-weight: bold;
|
|
873
|
-
# padding: 0px;
|
|
874
|
-
# """)
|
|
875
|
-
# layout.addWidget(self.normalisation_lbl, alignment=Qt.AlignCenter)
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
# self.tabs = QTabWidget()
|
|
879
|
-
# self.tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
|
880
|
-
|
|
881
|
-
# self.tab1, self.tab2 = QWidget(), QWidget()
|
|
882
|
-
# self.normalisation_list = QListWidget()
|
|
883
|
-
# self.tabs.addTab(self.tab1, 'Local')
|
|
884
|
-
# self.tabs.addTab(self.tab2, 'Field')
|
|
885
|
-
# self.local_correction_layout = LocalCorrectionLayout(self, self.tab1)
|
|
886
|
-
# self.fit_correction_layout = BackgroundFitCorrectionLayout(self, self.tab2)
|
|
887
|
-
# layout.addWidget(self.tabs)
|
|
888
|
-
|
|
889
|
-
# self.norm_list_lbl = QLabel('Background correction to perform:')
|
|
890
|
-
# hbox = QHBoxLayout()
|
|
891
|
-
# hbox.addWidget(self.norm_list_lbl)
|
|
892
|
-
# self.del_norm_btn = QPushButton("")
|
|
893
|
-
# self.del_norm_btn.setStyleSheet(self.button_select_all)
|
|
894
|
-
# self.del_norm_btn.setIcon(icon(MDI6.trash_can, color="black"))
|
|
895
|
-
# self.del_norm_btn.setToolTip("Remove background correction")
|
|
896
|
-
# self.del_norm_btn.setIconSize(QSize(20, 20))
|
|
897
|
-
# hbox.addWidget(self.del_norm_btn, alignment=Qt.AlignRight)
|
|
898
|
-
# layout.addLayout(hbox)
|
|
899
|
-
# self.del_norm_btn.clicked.connect(self.remove_item_from_list)
|
|
900
|
-
# layout.addWidget(self.normalisation_list)
|
|
901
|
-
|
|
902
|
-
|
|
903
872
|
def remove_item_from_list(self):
|
|
904
873
|
current_item = self.normalisation_list.currentRow()
|
|
905
874
|
if current_item > -1:
|
|
@@ -935,47 +904,6 @@ class ConfigMeasurements(QMainWindow, Styles):
|
|
|
935
904
|
def fun(self, x, y):
|
|
936
905
|
return x ** 2 + y
|
|
937
906
|
|
|
938
|
-
# def preview_normalisation(self):
|
|
939
|
-
# plt.close('all')
|
|
940
|
-
# plt.figure("Intensity Profiles",figsize=(10, 5))
|
|
941
|
-
# self.locate_image()
|
|
942
|
-
# diagonal_length = min(self.test_frame[:, :, self.tab2_channel_dropdown.currentIndex()].shape[0], self.test_frame[:, :, self.tab2_channel_dropdown.currentIndex()].shape[1])
|
|
943
|
-
# if self.tab2_subtract.isChecked():
|
|
944
|
-
# norm_operation='Subtract'
|
|
945
|
-
# else:
|
|
946
|
-
# norm_operation='Divide'
|
|
947
|
-
# normalised, bg_fit = field_normalisation(self.test_frame[:, :, self.tab2_channel_dropdown.currentIndex()],
|
|
948
|
-
# threshold=self.tab2_txt_threshold.text(),
|
|
949
|
-
# normalisation_operation=norm_operation,
|
|
950
|
-
# clip=self.tab2_clip.isChecked(),
|
|
951
|
-
# mode=self.tab2_dropdown.currentText())
|
|
952
|
-
# diagonal_original = [self.test_frame[:, :, self.tab2_channel_dropdown.currentIndex()][i, i] for i in
|
|
953
|
-
# range(diagonal_length)]
|
|
954
|
-
# diagonal_corrected = [normalised[i, i] for i in range(diagonal_length)]
|
|
955
|
-
# diagonal_indices = np.arange(diagonal_length)
|
|
956
|
-
|
|
957
|
-
# plt.subplot(1, 2, 1)
|
|
958
|
-
# plt.plot(diagonal_indices, diagonal_original, color='black', linewidth=0.2) # Adjust linewidth here
|
|
959
|
-
# plt.title('Original Image')
|
|
960
|
-
# plt.xlabel('Pixel Index along Diagonal')
|
|
961
|
-
# plt.ylabel('Intensity')
|
|
962
|
-
|
|
963
|
-
# plt.subplot(1, 2, 2)
|
|
964
|
-
# plt.plot(diagonal_indices, diagonal_corrected, color='black', linewidth=0.2) # Adjust linewidth here
|
|
965
|
-
# plt.title('Corrected Image')
|
|
966
|
-
# plt.xlabel('Pixel Index along Diagonal')
|
|
967
|
-
# plt.ylabel('Intensity')
|
|
968
|
-
|
|
969
|
-
# plt.tight_layout()
|
|
970
|
-
# plt.show()
|
|
971
|
-
|
|
972
|
-
# self.fig, self.ax = plt.subplots()
|
|
973
|
-
# self.normalised_img = FigureCanvas(self.fig, "Corrected background image preview")
|
|
974
|
-
# self.ax.clear()
|
|
975
|
-
# self.ax.imshow(normalised, cmap='gray')
|
|
976
|
-
# self.normalised_img.canvas.draw()
|
|
977
|
-
# self.normalised_img.show()
|
|
978
|
-
|
|
979
907
|
def view_normalisation_contour(self):
|
|
980
908
|
|
|
981
909
|
"""
|
|
@@ -1061,30 +989,6 @@ class ConfigMeasurements(QMainWindow, Styles):
|
|
|
1061
989
|
if self.test_mask is not None:
|
|
1062
990
|
self.spot_visual = ThresholdSpot(current_channel=self.spot_channel.currentIndex(), img=self.test_frame,
|
|
1063
991
|
mask=self.test_mask, parent_window=self)
|
|
1064
|
-
# for dictionary in self.background_correction:
|
|
1065
|
-
# if self.spot_channel.currentText() in dictionary['target channel']:
|
|
1066
|
-
# if dictionary['mode'] == 'field':
|
|
1067
|
-
# if dictionary['operation'] == 'Divide':
|
|
1068
|
-
# normalised, bg_fit = field_normalisation(
|
|
1069
|
-
# self.test_frame[:, :, self.spot_channel.currentIndex()],
|
|
1070
|
-
# threshold=dictionary['threshold'],
|
|
1071
|
-
# normalisation_operation=dictionary['operation'],
|
|
1072
|
-
# clip=False,
|
|
1073
|
-
# mode=dictionary['type'])
|
|
1074
|
-
# else:
|
|
1075
|
-
# normalised, bg_fit = field_normalisation(
|
|
1076
|
-
# self.test_frame[:, :, self.spot_channel.currentIndex()],
|
|
1077
|
-
# threshold=dictionary['threshold'],
|
|
1078
|
-
# normalisation_operation=dictionary['operation'],
|
|
1079
|
-
# clip=dictionary['clip'],
|
|
1080
|
-
# mode=dictionary['type'])
|
|
1081
|
-
# self.test_frame[:, :, self.spot_channel.currentIndex()] = normalised
|
|
1082
|
-
# if dictionary['mode'] == 'local':
|
|
1083
|
-
# normalised_image = normalise_by_cell(self.test_frame[:, :, self.spot_channel.currentIndex()].copy(), self.test_mask,
|
|
1084
|
-
# distance=int(dictionary['distance']), mode=dictionary['type'],
|
|
1085
|
-
# operation=dictionary['operation'])
|
|
1086
|
-
# self.test_frame[:, :, self.spot_channel.currentIndex()] = normalised_image
|
|
1087
|
-
|
|
1088
992
|
|
|
1089
993
|
def enable_spot_detection(self):
|
|
1090
994
|
if self.spot_check.isChecked():
|
|
@@ -1288,7 +1288,7 @@ class PreprocessingPanel(QFrame, Styles):
|
|
|
1288
1288
|
correct_background_model_free(self.exp_dir,
|
|
1289
1289
|
well_option=well_option,
|
|
1290
1290
|
position_option=pos_option,
|
|
1291
|
-
export= True,
|
|
1291
|
+
export = True,
|
|
1292
1292
|
return_stacks=False,
|
|
1293
1293
|
show_progress_per_well = True,
|
|
1294
1294
|
show_progress_per_pos = True,
|
celldetective/gui/survival_ui.py
CHANGED
|
@@ -28,6 +28,7 @@ import matplotlib.cm as mcm
|
|
|
28
28
|
import math
|
|
29
29
|
from celldetective.events import switch_to_events
|
|
30
30
|
from celldetective.gui import Styles
|
|
31
|
+
from matplotlib import colormaps
|
|
31
32
|
|
|
32
33
|
class ConfigSurvival(QWidget, Styles):
|
|
33
34
|
|
|
@@ -124,7 +125,12 @@ class ConfigSurvival(QWidget, Styles):
|
|
|
124
125
|
self.cbs[i].addItems(self.cb_options[i])
|
|
125
126
|
choice_layout.addLayout(hbox)
|
|
126
127
|
|
|
127
|
-
|
|
128
|
+
for cm in list(colormaps):
|
|
129
|
+
try:
|
|
130
|
+
self.cbs[-1].addColormap(cm)
|
|
131
|
+
except:
|
|
132
|
+
pass
|
|
133
|
+
|
|
128
134
|
main_layout.addLayout(choice_layout)
|
|
129
135
|
|
|
130
136
|
self.cbs[0].setCurrentIndex(0)
|