small-fish-gui 2.0.2__py3-none-any.whl → 2.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- small_fish_gui/__init__.py +2 -2
- small_fish_gui/batch/integrity.py +2 -2
- small_fish_gui/batch/pipeline.py +46 -11
- small_fish_gui/batch/prompt.py +102 -41
- small_fish_gui/batch/update.py +26 -13
- small_fish_gui/batch/utils.py +1 -1
- small_fish_gui/gui/__init__.py +1 -0
- small_fish_gui/gui/_napari_widgets.py +418 -6
- small_fish_gui/gui/layout.py +332 -112
- small_fish_gui/gui/napari_visualiser.py +107 -22
- small_fish_gui/gui/prompts.py +161 -48
- small_fish_gui/gui/testing.ipynb +231 -24
- small_fish_gui/gui/tooltips.py +7 -1
- small_fish_gui/hints.py +23 -7
- small_fish_gui/interface/__init__.py +7 -1
- small_fish_gui/interface/default_settings.py +118 -0
- small_fish_gui/interface/image.py +43 -11
- small_fish_gui/interface/settings.json +50 -0
- small_fish_gui/interface/testing.ipynb +4354 -0
- small_fish_gui/interface/user_settings.py +96 -0
- small_fish_gui/main_menu.py +13 -1
- small_fish_gui/pipeline/{_signaltonoise.py → _bigfish_wrapers.py} +59 -7
- small_fish_gui/pipeline/_colocalisation.py +23 -24
- small_fish_gui/pipeline/_preprocess.py +46 -32
- small_fish_gui/pipeline/actions.py +48 -5
- small_fish_gui/pipeline/detection.py +71 -141
- small_fish_gui/pipeline/segmentation.py +360 -268
- small_fish_gui/pipeline/spots.py +3 -3
- small_fish_gui/pipeline/utils.py +5 -1
- small_fish_gui/README.md → small_fish_gui-2.0.3.dist-info/METADATA +35 -0
- small_fish_gui-2.0.3.dist-info/RECORD +46 -0
- {small_fish_gui-2.0.2.dist-info → small_fish_gui-2.0.3.dist-info}/WHEEL +1 -1
- small_fish_gui/.github/workflows/python-publish.yml +0 -39
- small_fish_gui/LICENSE +0 -24
- small_fish_gui/batch/values.txt +0 -65
- small_fish_gui/default_values.py +0 -51
- small_fish_gui/gui/screenshot/general_help_screenshot.png +0 -0
- small_fish_gui/gui/screenshot/mapping_help_screenshot.png +0 -0
- small_fish_gui/gui/screenshot/segmentation_help_screenshot.png +0 -0
- small_fish_gui/illustrations/DetectionVitrine_filtre.png +0 -0
- small_fish_gui/illustrations/DetectionVitrine_signal.png +0 -0
- small_fish_gui/illustrations/FocciVitrine.png +0 -0
- small_fish_gui/illustrations/FocciVitrine_no_spots.png +0 -0
- small_fish_gui/illustrations/Segmentation2D.png +0 -0
- small_fish_gui/illustrations/Segmentation2D_with_labels.png +0 -0
- small_fish_gui/logo.png +0 -0
- small_fish_gui/pipeline/testing.ipynb +0 -3636
- small_fish_gui/requirements.txt +0 -19
- small_fish_gui-2.0.2.dist-info/METADATA +0 -75
- small_fish_gui-2.0.2.dist-info/RECORD +0 -59
- {small_fish_gui-2.0.2.dist-info → small_fish_gui-2.0.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Submodule to handle Small Fish settings.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import json
|
|
7
|
+
from .default_settings import get_default_settings
|
|
8
|
+
from pydantic import BaseModel, ValidationError
|
|
9
|
+
from typing import Tuple, Optional
|
|
10
|
+
|
|
11
|
+
class SettingsDict(BaseModel) :
|
|
12
|
+
working_directory : str
|
|
13
|
+
do_background_removal : bool
|
|
14
|
+
background_channel : int
|
|
15
|
+
multichannel_stack : bool
|
|
16
|
+
stack_3D : bool
|
|
17
|
+
detection_channel : int
|
|
18
|
+
nucleus_channel : int
|
|
19
|
+
anisotropy : float
|
|
20
|
+
flow_threshold : float
|
|
21
|
+
cellprob_threshold : float
|
|
22
|
+
cytoplasm_diameter : int
|
|
23
|
+
cytoplasm_min_size : int
|
|
24
|
+
cytoplasm_max_proj : bool
|
|
25
|
+
cytoplasm_mean_proj : bool
|
|
26
|
+
cytoplasm_select_slice : bool
|
|
27
|
+
cytoplasm_selected_slice : int
|
|
28
|
+
nucleus_diameter : int
|
|
29
|
+
cytoplasm_model : str
|
|
30
|
+
nucleus_model : str
|
|
31
|
+
nucleus_min_size : int
|
|
32
|
+
nucleus_max_proj : bool
|
|
33
|
+
nucleus_mean_proj : bool
|
|
34
|
+
nucleus_select_slice : bool
|
|
35
|
+
nucleus_selected_slice : int
|
|
36
|
+
show_segmentation : bool
|
|
37
|
+
segment_only_nuclei : bool
|
|
38
|
+
do_3D_segmentation : bool
|
|
39
|
+
save_segmentation_visuals : bool
|
|
40
|
+
threshold : Optional[int]
|
|
41
|
+
threshold_penalty : float
|
|
42
|
+
do_dense_regions_deconvolution : bool
|
|
43
|
+
do_cluster : bool
|
|
44
|
+
show_napari_corrector : bool
|
|
45
|
+
interactive_threshold_selector : bool
|
|
46
|
+
alpha : float
|
|
47
|
+
beta : float
|
|
48
|
+
gamma : float
|
|
49
|
+
cluster_size : int
|
|
50
|
+
min_spot : int
|
|
51
|
+
coloc_range : int
|
|
52
|
+
do_csv : bool
|
|
53
|
+
do_excel : bool
|
|
54
|
+
spot_extraction_folder : str
|
|
55
|
+
voxel_size : tuple
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def get_settings() -> SettingsDict :
|
|
60
|
+
|
|
61
|
+
setting_path = get_settings_path()
|
|
62
|
+
|
|
63
|
+
if os.path.isfile(setting_path) :
|
|
64
|
+
return _load_settings()
|
|
65
|
+
else :
|
|
66
|
+
settings = _init_settings()
|
|
67
|
+
write_settings(settings)
|
|
68
|
+
return settings
|
|
69
|
+
|
|
70
|
+
def _load_settings() :
|
|
71
|
+
settings_path = get_settings_path()
|
|
72
|
+
with open(settings_path, "r") as f:
|
|
73
|
+
settings = json.load(f)
|
|
74
|
+
|
|
75
|
+
try : settings = SettingsDict(**settings)
|
|
76
|
+
|
|
77
|
+
except ValidationError as e :
|
|
78
|
+
print(f"Incorrect settings, using default settings \n{e}")
|
|
79
|
+
settings = _init_settings()
|
|
80
|
+
|
|
81
|
+
return settings
|
|
82
|
+
|
|
83
|
+
def _init_settings() :
|
|
84
|
+
default_settings = get_default_settings()
|
|
85
|
+
return SettingsDict(**default_settings)
|
|
86
|
+
|
|
87
|
+
def get_settings_path() :
|
|
88
|
+
return os.path.join(os.path.dirname(__file__) , "settings.json")
|
|
89
|
+
|
|
90
|
+
def write_settings(settings : SettingsDict) :
|
|
91
|
+
if not isinstance(settings, SettingsDict) :
|
|
92
|
+
raise TypeError("Expected SettingsDict type, got {}".format(type(settings)))
|
|
93
|
+
else :
|
|
94
|
+
settings_path = get_settings_path()
|
|
95
|
+
with open(settings_path, mode="w") as f:
|
|
96
|
+
json.dump(settings.dict(), f, indent=4)
|
small_fish_gui/main_menu.py
CHANGED
|
@@ -13,14 +13,17 @@ from .pipeline.actions import compute_colocalisation
|
|
|
13
13
|
from .pipeline.actions import delete_acquisitions, rename_acquisitions
|
|
14
14
|
from .pipeline.actions import save_segmentation, load_segmentation, segment_cells
|
|
15
15
|
from .pipeline.actions import open_wiki
|
|
16
|
-
|
|
16
|
+
from .pipeline.actions import open_settings
|
|
17
17
|
from .pipeline._preprocess import clean_unused_parameters_cache
|
|
18
|
+
|
|
19
|
+
from .interface import get_settings
|
|
18
20
|
from .batch import batch_promp
|
|
19
21
|
from .gui import hub_prompt, prompt_restore_main_menu, default_theme
|
|
20
22
|
from .hints import pipeline_parameters
|
|
21
23
|
|
|
22
24
|
#'Global' parameters
|
|
23
25
|
user_parameters = pipeline_parameters({'segmentation_done' : False}) #TypedDict
|
|
26
|
+
user_parameters.update(get_settings())
|
|
24
27
|
acquisition_id = -1
|
|
25
28
|
result_df = pd.DataFrame(columns=['acquisition_id', 'name'])
|
|
26
29
|
cell_result_df = pd.DataFrame(columns=['acquisition_id'])
|
|
@@ -116,6 +119,15 @@ while True : #Break this loop to close small_fish
|
|
|
116
119
|
elif event == "wiki" :
|
|
117
120
|
open_wiki()
|
|
118
121
|
|
|
122
|
+
elif event == "settings" :
|
|
123
|
+
open_settings()
|
|
124
|
+
user_parameters.update(get_settings())
|
|
125
|
+
|
|
126
|
+
elif event == sg.WINDOW_CLOSE_ATTEMPTED_EVENT :
|
|
127
|
+
answ = sg.popup_ok_cancel("Do you want to close Small Fish ?")
|
|
128
|
+
if answ == "OK" : break
|
|
129
|
+
else : continue
|
|
130
|
+
|
|
119
131
|
else :
|
|
120
132
|
break
|
|
121
133
|
|
|
@@ -1,14 +1,23 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
2
|
+
Wrappers from BigFish code.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
|
|
6
|
-
# ### SNR ###
|
|
7
|
-
import bigfish.stack as stack
|
|
8
5
|
import numpy as np
|
|
9
|
-
|
|
6
|
+
import bigfish.stack as stack
|
|
7
|
+
import bigfish.detection as detection
|
|
8
|
+
from bigfish.detection.utils import (
|
|
9
|
+
get_object_radius_pixel,
|
|
10
|
+
get_spot_volume,
|
|
11
|
+
get_spot_surface,
|
|
12
|
+
get_object_radius_pixel
|
|
13
|
+
)
|
|
10
14
|
|
|
11
|
-
def compute_snr_spots(
|
|
15
|
+
def compute_snr_spots(
|
|
16
|
+
image : np.ndarray,
|
|
17
|
+
spots : np.ndarray,
|
|
18
|
+
voxel_size : tuple[int,int,int],
|
|
19
|
+
spot_radius : int
|
|
20
|
+
):
|
|
12
21
|
"""
|
|
13
22
|
Modified version of bigfish.detection.utils compute_snr_spots :
|
|
14
23
|
# Author: Arthur Imbert <arthur.imbert.pro@gmail.com>
|
|
@@ -216,4 +225,47 @@ def compute_snr_spots(image, spots, voxel_size, spot_radius):
|
|
|
216
225
|
'cell_stdbackground_std' : np.std(std_background_list)
|
|
217
226
|
}
|
|
218
227
|
|
|
219
|
-
return res
|
|
228
|
+
return res
|
|
229
|
+
|
|
230
|
+
def _apply_log_filter(
|
|
231
|
+
image: np.ndarray,
|
|
232
|
+
voxel_size : tuple,
|
|
233
|
+
spot_radius : tuple,
|
|
234
|
+
log_kernel_size : tuple[int] | int,
|
|
235
|
+
|
|
236
|
+
) :
|
|
237
|
+
"""
|
|
238
|
+
Apply spot detection steps until local maxima step (just before final threshold).
|
|
239
|
+
Return filtered image.
|
|
240
|
+
"""
|
|
241
|
+
|
|
242
|
+
ndim = image.ndim
|
|
243
|
+
|
|
244
|
+
if type(log_kernel_size) == type(None) :
|
|
245
|
+
log_kernel_size = get_object_radius_pixel(
|
|
246
|
+
voxel_size_nm=voxel_size,
|
|
247
|
+
object_radius_nm=spot_radius,
|
|
248
|
+
ndim=ndim)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
image_filtered = stack.log_filter(image, log_kernel_size)
|
|
252
|
+
|
|
253
|
+
return image_filtered
|
|
254
|
+
|
|
255
|
+
def _local_maxima_mask(
|
|
256
|
+
image_filtered: np.ndarray,
|
|
257
|
+
voxel_size : tuple,
|
|
258
|
+
spot_radius : tuple,
|
|
259
|
+
minimum_distance : int,
|
|
260
|
+
) :
|
|
261
|
+
|
|
262
|
+
ndim = image_filtered.ndim
|
|
263
|
+
|
|
264
|
+
if type(minimum_distance) == type(None) :
|
|
265
|
+
minimum_distance = get_object_radius_pixel(
|
|
266
|
+
voxel_size_nm=voxel_size,
|
|
267
|
+
object_radius_nm=spot_radius,
|
|
268
|
+
ndim=ndim)
|
|
269
|
+
mask_local_max = detection.local_maximum_detection(image_filtered, minimum_distance)
|
|
270
|
+
|
|
271
|
+
return mask_local_max.astype(bool)
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from ._custom_errors import MissMatchError
|
|
2
2
|
from ..gui import coloc_prompt, add_default_loading
|
|
3
|
+
from ..interface import get_settings
|
|
3
4
|
|
|
4
5
|
import os
|
|
5
6
|
import numpy as np
|
|
@@ -136,7 +137,7 @@ def spots_colocalisation(
|
|
|
136
137
|
voxel_size : (z,y,x) tuple
|
|
137
138
|
"""
|
|
138
139
|
|
|
139
|
-
if len(spot_list1) == 0 or len(spot_list2) == 0 : return np.
|
|
140
|
+
if len(spot_list1) == 0 or len(spot_list2) == 0 : return np.nan
|
|
140
141
|
if len(spot_list1[0]) != len(spot_list2[0]) :
|
|
141
142
|
raise MissMatchError("dimensionalities of spots 1 and spots 2 don't match.")
|
|
142
143
|
|
|
@@ -170,10 +171,15 @@ def initiate_colocalisation(
|
|
|
170
171
|
|
|
171
172
|
result_tables = result_tables.set_index('acquisition_id', drop=False)
|
|
172
173
|
available_spots = dict(zip(result_tables['acquisition_id'].astype(str).str.cat(result_tables['name'],sep='-'), result_tables.index))
|
|
174
|
+
default_values = dict(get_settings())
|
|
173
175
|
|
|
174
176
|
while True :
|
|
175
177
|
try :
|
|
176
|
-
colocalisation_distance, voxel_size, spots1_key, spots2_key = coloc_prompt(
|
|
178
|
+
colocalisation_distance, voxel_size, spots1_key, spots2_key, values = coloc_prompt(
|
|
179
|
+
list(available_spots.keys()),
|
|
180
|
+
**default_values
|
|
181
|
+
)
|
|
182
|
+
default_values.update(values)
|
|
177
183
|
if colocalisation_distance is None :
|
|
178
184
|
return None,None, None,None
|
|
179
185
|
colocalisation_distance = int(colocalisation_distance)
|
|
@@ -194,15 +200,8 @@ def initiate_colocalisation(
|
|
|
194
200
|
raise ValueError("Incorrect value for spots1")
|
|
195
201
|
|
|
196
202
|
except ValueError as e :
|
|
197
|
-
|
|
198
|
-
if str(e) == "Incorrect value for spots1" :
|
|
199
|
-
sg.popup(str(e))
|
|
200
|
-
|
|
201
|
-
elif str(e) == "Incorrect value for spots2" :
|
|
202
|
-
sg.popup(str(e))
|
|
203
|
+
sg.popup(str(e))
|
|
203
204
|
|
|
204
|
-
else :
|
|
205
|
-
sg.popup("Incorrect colocalisation distance")
|
|
206
205
|
else :
|
|
207
206
|
break
|
|
208
207
|
return colocalisation_distance, voxel_size, spots1_key, spots2_key
|
|
@@ -250,8 +249,8 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
|
|
|
250
249
|
fraction_spots2_coloc_spots1 = spots_colocalisation(spot_list1=spots2, spot_list2=spots1, distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
|
|
251
250
|
except MissMatchError as e :
|
|
252
251
|
sg.popup(str(e))
|
|
253
|
-
fraction_spots1_coloc_spots2 = np.
|
|
254
|
-
fraction_spots2_coloc_spots1 = np.
|
|
252
|
+
fraction_spots1_coloc_spots2 = np.nan
|
|
253
|
+
fraction_spots2_coloc_spots1 = np.nan
|
|
255
254
|
|
|
256
255
|
if 'clusters' in acquisition1.columns :
|
|
257
256
|
try :
|
|
@@ -259,12 +258,12 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
|
|
|
259
258
|
fraction_spots2_coloc_cluster1 = spots_colocalisation(spot_list1=spots2, spot_list2=spots1[clusters_id_1 != -1], distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
|
|
260
259
|
except MissMatchError as e :
|
|
261
260
|
sg.popup(str(e))
|
|
262
|
-
fraction_spots2_coloc_cluster1 = np.
|
|
261
|
+
fraction_spots2_coloc_cluster1 = np.nan
|
|
263
262
|
except TypeError : # clusters not computed
|
|
264
|
-
fraction_spots2_coloc_cluster1 = np.
|
|
263
|
+
fraction_spots2_coloc_cluster1 = np.nan
|
|
265
264
|
|
|
266
265
|
|
|
267
|
-
else : fraction_spots2_coloc_cluster1 = np.
|
|
266
|
+
else : fraction_spots2_coloc_cluster1 = np.nan
|
|
268
267
|
|
|
269
268
|
if 'clusters' in acquisition2.columns :
|
|
270
269
|
try :
|
|
@@ -272,12 +271,12 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
|
|
|
272
271
|
fraction_spots1_coloc_cluster2 = spots_colocalisation(spot_list1=spots1, spot_list2=spots2[clusters_id_2 != -1], distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
|
|
273
272
|
except MissMatchError as e :# clusters not computed
|
|
274
273
|
sg.popup(str(e))
|
|
275
|
-
fraction_spots1_coloc_cluster2 = np.
|
|
274
|
+
fraction_spots1_coloc_cluster2 = np.nan
|
|
276
275
|
except TypeError :
|
|
277
|
-
fraction_spots1_coloc_cluster2 = np.
|
|
276
|
+
fraction_spots1_coloc_cluster2 = np.nan
|
|
278
277
|
|
|
279
278
|
|
|
280
|
-
else : fraction_spots1_coloc_cluster2 = np.
|
|
279
|
+
else : fraction_spots1_coloc_cluster2 = np.nan
|
|
281
280
|
|
|
282
281
|
if 'clusters' in acquisition2.columns and 'clusters' in acquisition1.columns :
|
|
283
282
|
try :
|
|
@@ -287,15 +286,15 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
|
|
|
287
286
|
fraction_cluster2_coloc_cluster1 = spots_colocalisation(spot_list1=spots2[clusters_id_2 != -1], spot_list2=spots1[clusters_id_1 != -1], distance= colocalisation_distance, voxel_size=voxel_size) / total_clustered_spots2
|
|
288
287
|
except MissMatchError as e :# clusters not computed
|
|
289
288
|
sg.popup(str(e))
|
|
290
|
-
fraction_cluster1_coloc_cluster2 = np.
|
|
291
|
-
fraction_cluster2_coloc_cluster1 = np.
|
|
289
|
+
fraction_cluster1_coloc_cluster2 = np.nan
|
|
290
|
+
fraction_cluster2_coloc_cluster1 = np.nan
|
|
292
291
|
except TypeError :
|
|
293
|
-
fraction_cluster1_coloc_cluster2 = np.
|
|
294
|
-
fraction_cluster2_coloc_cluster1 = np.
|
|
292
|
+
fraction_cluster1_coloc_cluster2 = np.nan
|
|
293
|
+
fraction_cluster2_coloc_cluster1 = np.nan
|
|
295
294
|
|
|
296
295
|
else :
|
|
297
|
-
fraction_cluster1_coloc_cluster2 = np.
|
|
298
|
-
fraction_cluster2_coloc_cluster1 = np.
|
|
296
|
+
fraction_cluster1_coloc_cluster2 = np.nan
|
|
297
|
+
fraction_cluster2_coloc_cluster1 = np.nan
|
|
299
298
|
|
|
300
299
|
|
|
301
300
|
coloc_df = pd.DataFrame({
|
|
@@ -3,8 +3,10 @@ import os
|
|
|
3
3
|
import FreeSimpleGUI as sg
|
|
4
4
|
from ..gui import _error_popup, _warning_popup, parameters_layout, add_header
|
|
5
5
|
from ..gui.prompts import input_image_prompt, prompt
|
|
6
|
+
from ..gui.layout import _ask_channel_map_layout
|
|
7
|
+
from ..interface import get_settings
|
|
8
|
+
from ..hints import pipeline_parameters
|
|
6
9
|
|
|
7
|
-
import small_fish_gui.default_values as default
|
|
8
10
|
|
|
9
11
|
class ParameterInputError(Exception) :
|
|
10
12
|
"""
|
|
@@ -33,10 +35,9 @@ def prepare_image_detection(map_, user_parameters) :
|
|
|
33
35
|
assert len(image.shape) != 5 , "Time stack not supported, should never be True"
|
|
34
36
|
|
|
35
37
|
if user_parameters['is_multichannel'] :
|
|
36
|
-
channel_to_compute = user_parameters['channel_to_compute']
|
|
38
|
+
channel_to_compute = int(user_parameters['channel_to_compute'])
|
|
37
39
|
other_image = image.copy()
|
|
38
40
|
other_image = np.delete(other_image, channel_to_compute, axis=0)
|
|
39
|
-
other_image = [layer for layer in other_image]
|
|
40
41
|
image: np.ndarray = image[channel_to_compute]
|
|
41
42
|
|
|
42
43
|
else :
|
|
@@ -66,20 +67,20 @@ def map_channels(user_parameters) :
|
|
|
66
67
|
image = user_parameters['image']
|
|
67
68
|
is_3D_stack = user_parameters['is_3D_stack']
|
|
68
69
|
is_time_stack = False
|
|
69
|
-
|
|
70
|
+
is_multichannel = user_parameters['is_multichannel']
|
|
70
71
|
|
|
71
72
|
try :
|
|
72
|
-
map_ = _auto_map_channels(is_3D_stack, is_time_stack,
|
|
73
|
+
map_ = _auto_map_channels(is_3D_stack, is_time_stack, is_multichannel, image=image)
|
|
73
74
|
except MappingError as e :
|
|
74
75
|
sg.popup("Automatic dimension mapping went wrong. Please indicate dimensions positions in the array.")
|
|
75
|
-
map_ = _ask_channel_map(image.shape, is_3D_stack, is_time_stack,
|
|
76
|
+
map_ = _ask_channel_map(image.shape, is_3D_stack, is_time_stack, is_multichannel, preset_map= e.get_map())
|
|
76
77
|
|
|
77
78
|
else :
|
|
78
|
-
map_ = _show_mapping(image.shape, map_, is_3D_stack, is_time_stack,
|
|
79
|
+
map_ = _show_mapping(image.shape, map_, is_3D_stack, is_time_stack, is_multichannel,)
|
|
79
80
|
|
|
80
81
|
return map_
|
|
81
82
|
|
|
82
|
-
def _auto_map_channels(is_3D_stack, is_time_stack,
|
|
83
|
+
def _auto_map_channels(is_3D_stack, is_time_stack, is_multichannel, image: np.ndarray=None, shape=None) :
|
|
83
84
|
if type(shape) == type(None) :
|
|
84
85
|
shape = image.shape
|
|
85
86
|
reducing_list = list(shape)
|
|
@@ -100,7 +101,7 @@ def _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image: np.ndarr
|
|
|
100
101
|
reducing_list.remove(x_val)
|
|
101
102
|
|
|
102
103
|
#smaller value set to c
|
|
103
|
-
if
|
|
104
|
+
if is_multichannel :
|
|
104
105
|
c_val = min(reducing_list)
|
|
105
106
|
c_idx = shape.index(c_val)
|
|
106
107
|
map_['c'] = c_idx
|
|
@@ -124,7 +125,7 @@ def _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image: np.ndarr
|
|
|
124
125
|
|
|
125
126
|
return map_
|
|
126
127
|
|
|
127
|
-
def _ask_channel_map(shape, is_3D_stack, is_time_stack,
|
|
128
|
+
def _ask_channel_map(shape, is_3D_stack, is_time_stack, is_multichannel, preset_map: dict= {}) :
|
|
128
129
|
while True :
|
|
129
130
|
relaunch = False
|
|
130
131
|
save_preset = preset_map.copy()
|
|
@@ -135,13 +136,13 @@ def _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map
|
|
|
135
136
|
t = preset_map.setdefault('t',0)
|
|
136
137
|
|
|
137
138
|
|
|
138
|
-
layout =
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
139
|
+
layout = _ask_channel_map_layout(
|
|
140
|
+
is_3D_stack=is_3D_stack,
|
|
141
|
+
is_time_stack=False,
|
|
142
|
+
is_multichannel=is_multichannel,
|
|
143
|
+
shape=shape,
|
|
144
|
+
preset_map=preset_map
|
|
145
|
+
)
|
|
145
146
|
|
|
146
147
|
event, preset_map = prompt(layout, add_scrollbar=False)
|
|
147
148
|
if event == 'Cancel' : return save_preset
|
|
@@ -160,7 +161,7 @@ def _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map
|
|
|
160
161
|
|
|
161
162
|
return preset_map
|
|
162
163
|
|
|
163
|
-
def _show_mapping(shape, map_, is_3D_stack, is_time_stack,
|
|
164
|
+
def _show_mapping(shape, map_, is_3D_stack, is_time_stack, is_multichannel) :
|
|
164
165
|
while True :
|
|
165
166
|
layout = [
|
|
166
167
|
[sg.Text("Image shape : {0}".format(shape))],
|
|
@@ -176,7 +177,7 @@ def _show_mapping(shape, map_, is_3D_stack, is_time_stack, multichannel) :
|
|
|
176
177
|
if event == 'Ok' :
|
|
177
178
|
return map_
|
|
178
179
|
elif event == 'Change mapping':
|
|
179
|
-
map_ = _ask_channel_map(shape, is_3D_stack, is_time_stack,
|
|
180
|
+
map_ = _ask_channel_map(shape, is_3D_stack, is_time_stack, is_multichannel, preset_map=map_)
|
|
180
181
|
elif event == 'Cancel' :
|
|
181
182
|
return None
|
|
182
183
|
else : raise AssertionError('Unforseen event')
|
|
@@ -208,7 +209,7 @@ def convert_parameters_types(values:dict) :
|
|
|
208
209
|
|
|
209
210
|
#Parameters
|
|
210
211
|
int_list = ['threshold', 'channel_to_compute', 'channel_to_compute', 'min_number_of_spots', 'cluster_size','nucleus channel signal']
|
|
211
|
-
float_list = ['alpha', 'beta', 'gamma', '
|
|
212
|
+
float_list = ['alpha', 'beta', 'gamma', 'threshold_penalty']
|
|
212
213
|
|
|
213
214
|
for parameter in int_list :
|
|
214
215
|
try :
|
|
@@ -230,7 +231,7 @@ def check_integrity(
|
|
|
230
231
|
values: dict,
|
|
231
232
|
do_dense_region_deconvolution,
|
|
232
233
|
do_clustering,
|
|
233
|
-
|
|
234
|
+
is_multichannel,
|
|
234
235
|
segmentation_done,
|
|
235
236
|
map_,
|
|
236
237
|
shape
|
|
@@ -241,8 +242,9 @@ def check_integrity(
|
|
|
241
242
|
|
|
242
243
|
#voxel_size
|
|
243
244
|
if type(values['voxel_size']) == type(None) :
|
|
244
|
-
print(values['voxel_size'])
|
|
245
245
|
raise ParameterInputError('Incorrect voxel size parameter.')
|
|
246
|
+
elif (np.array(values['voxel_size']) < 1).any() :
|
|
247
|
+
raise ParameterInputError('Voxel size can be < 1.')
|
|
246
248
|
|
|
247
249
|
#detection integrity :
|
|
248
250
|
if not isinstance(values['spot_size'], (tuple, list)) and not(isinstance(values['minimum_distance'], (tuple, list)) and isinstance(values['log_kernel_size'], (tuple, list))) :
|
|
@@ -266,7 +268,7 @@ def check_integrity(
|
|
|
266
268
|
raise ParameterInputError("Incorrect cluster size parameter.")
|
|
267
269
|
|
|
268
270
|
#channel
|
|
269
|
-
if
|
|
271
|
+
if is_multichannel :
|
|
270
272
|
ch_len = shape[int(map_['c'])]
|
|
271
273
|
|
|
272
274
|
if type(segmentation_done) == type(None) :
|
|
@@ -317,7 +319,7 @@ def _check_segmentation_parameters(
|
|
|
317
319
|
|
|
318
320
|
available_channels = list(range(len(shape)))
|
|
319
321
|
do_only_nuc = user_parameters['segment_only_nuclei']
|
|
320
|
-
|
|
322
|
+
cytoplasm_model_name = user_parameters['cytoplasm_model_name']
|
|
321
323
|
cyto_size = user_parameters['cytoplasm_diameter']
|
|
322
324
|
cytoplasm_channel = user_parameters['cytoplasm_channel']
|
|
323
325
|
nucleus_model_name = user_parameters['nucleus_model_name']
|
|
@@ -325,7 +327,7 @@ def _check_segmentation_parameters(
|
|
|
325
327
|
nucleus_channel = user_parameters['nucleus_channel']
|
|
326
328
|
|
|
327
329
|
|
|
328
|
-
if type(
|
|
330
|
+
if type(cytoplasm_model_name) != str and not do_only_nuc:
|
|
329
331
|
raise ParameterInputError('Invalid cytoplasm model name.')
|
|
330
332
|
if cytoplasm_channel not in available_channels and not do_only_nuc and is_multichannel:
|
|
331
333
|
raise ParameterInputError('For given input image please select channel in {0}\ncytoplasm_channel : {1}'.format(available_channels, cytoplasm_channel))
|
|
@@ -355,22 +357,33 @@ def clean_unused_parameters_cache(user_parameters: dict) :
|
|
|
355
357
|
|
|
356
358
|
return user_parameters
|
|
357
359
|
|
|
358
|
-
def ask_input_parameters(ask_for_segmentation=True) :
|
|
360
|
+
def ask_input_parameters(image_input_values : pipeline_parameters, ask_for_segmentation=True) :
|
|
359
361
|
"""
|
|
360
362
|
Prompt user with interface allowing parameters setting for bigFish detection / deconvolution.
|
|
361
363
|
"""
|
|
362
364
|
|
|
363
365
|
values = {}
|
|
364
|
-
|
|
366
|
+
default = get_settings()
|
|
365
367
|
while True :
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
368
|
+
filename_preset = image_input_values.setdefault("image_path", default.working_directory)
|
|
369
|
+
|
|
370
|
+
if os.path.isfile(filename_preset) :
|
|
371
|
+
filename_preset = os.path.dirname(filename_preset)
|
|
372
|
+
elif os.path.isdir(filename_preset) :
|
|
373
|
+
pass
|
|
374
|
+
else :
|
|
375
|
+
filename_preset = default.working_directory
|
|
376
|
+
|
|
377
|
+
is_3D_preset = image_input_values.setdefault('is_3D_stack', default.stack_3D)
|
|
378
|
+
is_multichannel_preset = image_input_values.setdefault('is_multichannel', default.multichannel_stack)
|
|
379
|
+
denseregion_preset = image_input_values.setdefault('do_dense_regions_deconvolution', default.do_dense_regions_deconvolution)
|
|
380
|
+
do_clustering_preset = image_input_values.setdefault('do_cluster_computation', default.do_cluster)
|
|
381
|
+
do_napari_preset = image_input_values.setdefault('show_napari_corrector', default.show_napari_corrector)
|
|
382
|
+
do_background_removal_preset = image_input_values.setdefault('do_background_removal_preset', default.do_background_removal)
|
|
371
383
|
|
|
372
384
|
if ask_for_segmentation :
|
|
373
385
|
image_input_values = input_image_prompt(
|
|
386
|
+
filename_preset= filename_preset,
|
|
374
387
|
is_3D_stack_preset=is_3D_preset,
|
|
375
388
|
multichannel_preset=is_multichannel_preset,
|
|
376
389
|
do_dense_regions_deconvolution_preset=None,
|
|
@@ -379,6 +392,7 @@ def ask_input_parameters(ask_for_segmentation=True) :
|
|
|
379
392
|
)
|
|
380
393
|
else :
|
|
381
394
|
image_input_values = input_image_prompt(
|
|
395
|
+
filename_preset= filename_preset,
|
|
382
396
|
is_3D_stack_preset=is_3D_preset,
|
|
383
397
|
multichannel_preset=is_multichannel_preset,
|
|
384
398
|
do_dense_regions_deconvolution_preset=denseregion_preset,
|
|
@@ -2,12 +2,16 @@
|
|
|
2
2
|
This submodule groups all the possible actions of the user in the main windows. It is the start of each action the user can do.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
+
from torch import layout
|
|
5
6
|
from ..gui.prompts import output_image_prompt, prompt_save_segmentation, prompt_load_segmentation
|
|
6
7
|
from ..gui.prompts import ask_detection_confirmation, ask_cancel_detection, ask_confirmation
|
|
7
8
|
from ..gui.prompts import rename_prompt
|
|
9
|
+
from ..gui.prompts import prompt
|
|
10
|
+
from ..gui.layout import settings_layout
|
|
8
11
|
|
|
9
12
|
from ..interface.inoutput import write_results, write_list_of_results
|
|
10
13
|
from ..interface.inoutput import input_segmentation, output_segmentation
|
|
14
|
+
from ..interface import get_settings, SettingsDict, write_settings
|
|
11
15
|
|
|
12
16
|
from ._preprocess import map_channels
|
|
13
17
|
from ._preprocess import prepare_image_detection
|
|
@@ -30,6 +34,7 @@ import pandas as pd
|
|
|
30
34
|
import FreeSimpleGUI as sg
|
|
31
35
|
import numpy as np
|
|
32
36
|
import webbrowser
|
|
37
|
+
from pydantic import ValidationError
|
|
33
38
|
|
|
34
39
|
def open_wiki() :
|
|
35
40
|
webbrowser.open_new_tab(__wiki__)
|
|
@@ -60,7 +65,7 @@ def add_detection(user_parameters : pipeline_parameters, acquisition_id, cytopla
|
|
|
60
65
|
new_cell_results_df = pd.DataFrame()
|
|
61
66
|
|
|
62
67
|
#Ask for image parameters
|
|
63
|
-
new_parameters = ask_input_parameters(ask_for_segmentation= False) #The image is open and stored inside user_parameters
|
|
68
|
+
new_parameters = ask_input_parameters(user_parameters, ask_for_segmentation= False) #The image is open and stored inside user_parameters
|
|
64
69
|
if type(new_parameters) == type(None) : #if user clicks 'Cancel'
|
|
65
70
|
return new_results_df, new_cell_results_df, acquisition_id, user_parameters
|
|
66
71
|
else :
|
|
@@ -93,7 +98,7 @@ def add_detection(user_parameters : pipeline_parameters, acquisition_id, cytopla
|
|
|
93
98
|
nucleus_signal = get_nucleus_signal(image, other_image, user_parameters)
|
|
94
99
|
|
|
95
100
|
try : # Catch error raised if user enter a spot size too small compare to voxel size
|
|
96
|
-
user_parameters, frame_result, spots, clusters, spots_cluster_id = launch_detection(
|
|
101
|
+
user_parameters, frame_result, spots, clusters, spots_cluster_id, image = launch_detection(
|
|
97
102
|
image,
|
|
98
103
|
other_image,
|
|
99
104
|
user_parameters,
|
|
@@ -252,7 +257,7 @@ def save_results(
|
|
|
252
257
|
path = dic['folder']
|
|
253
258
|
filename = dic['filename']
|
|
254
259
|
do_excel = dic['Excel']
|
|
255
|
-
do_feather =
|
|
260
|
+
do_feather = False
|
|
256
261
|
do_csv = dic['csv']
|
|
257
262
|
|
|
258
263
|
if 'rna_coords' in cell_result_df.columns : cell_result_df = cell_result_df.drop(columns='rna_coords')
|
|
@@ -270,7 +275,7 @@ def save_results(
|
|
|
270
275
|
path = dic['folder']
|
|
271
276
|
filename = dic['filename']
|
|
272
277
|
do_excel = dic['Excel']
|
|
273
|
-
do_feather =
|
|
278
|
+
do_feather = False
|
|
274
279
|
do_csv = dic['csv']
|
|
275
280
|
|
|
276
281
|
if 'rna_coords' in cell_result_df.columns : cell_result_df = cell_result_df.drop(columns='rna_coords')
|
|
@@ -456,4 +461,42 @@ def rename_acquisitions(
|
|
|
456
461
|
df.columns = pd.MultiIndex.from_tuples(new_columns)
|
|
457
462
|
cell_coloc_df[key] = df
|
|
458
463
|
|
|
459
|
-
return result_df, cell_result_df, global_coloc_df, cell_coloc_df
|
|
464
|
+
return result_df, cell_result_df, global_coloc_df, cell_coloc_df
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
def open_settings() :
|
|
468
|
+
settings = get_settings()
|
|
469
|
+
|
|
470
|
+
while True :
|
|
471
|
+
layout = settings_layout(settings)
|
|
472
|
+
event, values = prompt(layout)
|
|
473
|
+
|
|
474
|
+
if event == "Cancel" :
|
|
475
|
+
return False
|
|
476
|
+
else :
|
|
477
|
+
try :
|
|
478
|
+
if values['threshold'] == "" : values['threshold'] = None
|
|
479
|
+
if values["voxel_size_z"] == "z" : values["voxel_size_z"] = 1
|
|
480
|
+
if values["voxel_size_y"] == "y" : values["voxel_size_y"] = 1
|
|
481
|
+
if values["voxel_size_x"] == "x" : values["voxel_size_x"] = 1
|
|
482
|
+
values["voxel_size"] = (int(values['voxel_size_z']), int(values['voxel_size_y']), int(values['voxel_size_x']))
|
|
483
|
+
new_settings = SettingsDict(**values)
|
|
484
|
+
|
|
485
|
+
except ValidationError as e :
|
|
486
|
+
|
|
487
|
+
layout = []
|
|
488
|
+
for error in e.errors() :
|
|
489
|
+
layout.append([sg.Text(f"{error['loc']} : {error['msg']}")])
|
|
490
|
+
layout.append([sg.Button("Close")])
|
|
491
|
+
prompt(layout, add_ok_cancel=False)
|
|
492
|
+
|
|
493
|
+
except ValueError as e :
|
|
494
|
+
if not "invalid literal" in str(e) :
|
|
495
|
+
raise e
|
|
496
|
+
sg.popup("Incorrect voxel size parameters")
|
|
497
|
+
|
|
498
|
+
else :
|
|
499
|
+
break
|
|
500
|
+
|
|
501
|
+
write_settings(new_settings)
|
|
502
|
+
return True
|