small-fish-gui 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,266 @@
1
+ from ._custom_errors import MissMatchError
2
+ from ..gui import coloc_prompt, add_default_loading
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import PySimpleGUI as sg
7
+ from scipy.ndimage import distance_transform_edt
8
+ from scipy.signal import fftconvolve
9
+
10
+ def reconstruct_boolean_signal(image_shape, spot_list: list):
11
+ signal = np.zeros(image_shape, dtype= bool)
12
+ if len(spot_list) == 0 : return signal
13
+ dim = len(spot_list[0])
14
+
15
+ if dim == 3 :
16
+ Z, Y, X = list(zip(*spot_list))
17
+ signal[Z,Y,X] = True
18
+
19
+ else :
20
+ Y, X = list(zip(*spot_list))
21
+ signal[Y,X] = True
22
+
23
+
24
+ return signal
25
+
26
+ def nanometer_to_pixel(value, scale) :
27
+ if isinstance(scale, (float,int)) : scale = [scale]
28
+ if isinstance(value, (float,int)) : value = [value]*len(scale)
29
+ if len(value) != len(scale) : raise ValueError("value and scale must have the same dimensionality")
30
+
31
+ return list(np.array(value) / np.array(scale))
32
+
33
+ def _create_counting_kernel(radius_nm, voxel_size) :
34
+
35
+ max_pixel_distance = int(max(nanometer_to_pixel(radius_nm, voxel_size)))
36
+ kernel = np.ones(shape=[2*max_pixel_distance+1 for i in range(len(voxel_size))]) #always odd number so middle is always at [pixel_radius-1, pixel_radius-1, pixel_radius-1]
37
+ if len(voxel_size) == 3 :
38
+ kernel[max_pixel_distance, max_pixel_distance, max_pixel_distance] = 0
39
+ else :
40
+ kernel[max_pixel_distance, max_pixel_distance] = 0
41
+
42
+ kernel = distance_transform_edt(kernel, sampling= voxel_size) <= radius_nm
43
+
44
+ return kernel.astype(int)
45
+
46
+ def _spot_count_map(spots_array, radius_px, voxel_size) :
47
+ """
48
+ Create a map where each pixel value correspond to the number of spots closer than radius to the position.
49
+ """
50
+
51
+ kernel = _create_counting_kernel(radius_px, voxel_size)
52
+ map = fftconvolve(spots_array, kernel, mode= 'same')
53
+
54
+ return np.round(map).astype(int)
55
+
56
+ def _reconstruct_spot_signal(image_shape, spot_list: list, dim=3):
57
+ """
58
+ Create a map where each pixel value correspond to the number of spots located in this position.
59
+ """
60
+ signal = np.zeros(image_shape, dtype= int)
61
+ unique_list, counts = np.unique(spot_list, return_counts= True, axis=0)
62
+ if dim == 3 :
63
+ Z, Y, X = list(zip(*unique_list))
64
+ signal[Z,Y,X] = counts
65
+ elif dim == 2 :
66
+ Y, X = list(zip(*unique_list))
67
+ signal[Y,X] = counts
68
+ else :
69
+ raise ValueError("Wrong dim passed should be 2 or 3, it is {0}".format(dim))
70
+
71
+ return signal
72
+
73
+ def spots_multicolocalisation(spots_list, anchor_list, radius_nm, image_shape, voxel_size) :
74
+
75
+ """
76
+ Compute the number of spots from spots_list closer than radius to a spot from anchor_list. Each spots_list spots will be counted as many times as there are anchors close enough.
77
+ Note that the radius in nm is converted to pixel using voxel size, and rounded to the closest int value.
78
+
79
+ Example in 2D
80
+ --------
81
+
82
+ >>> Anchors Spots Radius (2px) Count
83
+ >>> 0 0 0 0 0 0 0 X 0 0 X 0 1 0 1 0 0 0 0
84
+ >>> 0 X 0 0 0 0 X 0 0 X 0 0 1 1 1 1 0 0 0 0 0
85
+ >>> 0 X 0 0 0 0 X X 0 0 0 0 1 1 2 0 0 0 0 --> 5
86
+ >>> 0 0 0 0 X 0 0 0 X 0 0 0 0 0 0 0 0 0
87
+ >>> 0 0 0 0 0 0 0 0 0 X 0 0 0 0 0 0 0 0
88
+
89
+ Parameters
90
+ ----------
91
+ spots_list : list
92
+ anchor_list : list
93
+ radius_nm : int, float
94
+ image_shape : tuple (Z, Y, X)
95
+ voxel_size : tuple (Z, Y, X)
96
+
97
+ Returns
98
+ -------
99
+ Returns the list of neighbouring spot number to 'spots_list'.
100
+ """
101
+ if len(spots_list) == 0 or len(anchor_list) == 0 : return 0
102
+ if len(voxel_size) != len(spots_list[0]) : raise ValueError("Dimensions missmatched; voxel_size : {0} spots : {1}".format(len(voxel_size), len(spots_list[0])))
103
+
104
+ dim = len(voxel_size)
105
+
106
+ anchor_array = _reconstruct_spot_signal(image_shape=image_shape, spot_list=anchor_list, dim=dim)
107
+ count_map = _spot_count_map(anchor_array, radius_px=radius_nm, voxel_size=voxel_size)
108
+
109
+ if dim == 3 :
110
+ Z,Y,X = list(zip(*spots_list))
111
+ res = list(count_map[Z,Y,X])
112
+
113
+ if dim == 2 :
114
+ Y,X = list(zip(*spots_list))
115
+ res = list(count_map[Y,X])
116
+
117
+ return res
118
+
119
+ def spots_colocalisation(image_shape, spot_list1:list, spot_list2:list, distance: float, voxel_size)-> int :
120
+ """
121
+ Return number of spots from spot_list1 located closer(large) than distance to at least one spot of spot_list2.
122
+
123
+ Parameters
124
+ ----------
125
+ image_shape : tuple
126
+ spot_list1 : list
127
+ spot_list2 : list
128
+ distance : nanometer
129
+ distance in nanometer.
130
+ voxel_size : (z,y,x) tuple
131
+ """
132
+
133
+ if len(spot_list1) == 0 or len(spot_list2) == 0 : return np.NaN
134
+ if len(spot_list1[0]) != len(spot_list2[0]) :
135
+ raise MissMatchError("dimensionalities of spots 1 and spots 2 don't match.")
136
+
137
+ if len(voxel_size) == 3 :
138
+ image_shape = image_shape[-3:]
139
+ else :
140
+ image_shape = image_shape[-2:]
141
+
142
+ signal2 = reconstruct_boolean_signal(image_shape, spot_list2)
143
+ mask = np.logical_not(signal2)
144
+ distance_map = distance_transform_edt(mask, sampling= voxel_size)
145
+
146
+ if len(voxel_size) == 3 :
147
+ Z,Y,X = zip(*spot_list1)
148
+ count = (distance_map[Z,Y,X] <= distance).sum()
149
+ else :
150
+ Y,X = zip(*spot_list1)
151
+ count = (distance_map[Y,X] <= distance).sum()
152
+
153
+ return count
154
+
155
+
156
+
157
+
158
+
159
+
160
+ def initiate_colocalisation(result_tables) :
161
+ if len(result_tables) != 2 :
162
+ sg.popup("Please select 2 acquisitions for colocalisation (Ctrl + click in the table)")
163
+ return False
164
+ else :
165
+ while True :
166
+ colocalisation_distance = coloc_prompt()
167
+ if colocalisation_distance == False : return False
168
+ try :
169
+ colocalisation_distance = int(colocalisation_distance)
170
+ except Exception :
171
+ sg.popup("Incorrect colocalisation distance")
172
+ else :
173
+ break
174
+ return colocalisation_distance
175
+
176
+ @add_default_loading
177
+ def launch_colocalisation(result_tables, result_dataframe, colocalisation_distance) :
178
+ """
179
+
180
+ Target :
181
+
182
+ - acquisition_couple
183
+ - colocalisation_distance
184
+ - spot1_total
185
+ - spot2_total
186
+ - fraction_spot1_coloc_spots
187
+ - fraction_spot2_coloc_spots
188
+ - fraction_spot1_coloc_clusters
189
+ - fraction_spot2_coloc_spots
190
+
191
+ """
192
+
193
+ acquisition1 = result_dataframe.iloc[result_tables[0]]
194
+ acquisition2 = result_dataframe.iloc[result_tables[1]]
195
+
196
+ voxel_size1 = acquisition1.at['voxel_size']
197
+ voxel_size2 = acquisition2.at['voxel_size']
198
+ shape1 = acquisition1.at['reordered_shape']
199
+ shape2 = acquisition2.at['reordered_shape']
200
+
201
+ if voxel_size1 != voxel_size2 :
202
+ raise MissMatchError("voxel size 1 different than voxel size 2")
203
+ else :
204
+ voxel_size = voxel_size1
205
+
206
+ if shape1 != shape2 :
207
+ print(shape1)
208
+ print(shape2)
209
+ raise MissMatchError("shape 1 different than shape 2")
210
+ else :
211
+ shape = shape1
212
+ print(shape1)
213
+ print(shape2)
214
+
215
+ acquisition_couple = (acquisition1.at['acquisition_id'], acquisition2.at['acquisition_id'])
216
+
217
+ spots1 = acquisition1['spots']
218
+ spots2 = acquisition2['spots']
219
+
220
+ spot1_total = len(spots1)
221
+ spot2_total = len(spots2)
222
+
223
+ try :
224
+ fraction_spots1_coloc_spots2 = spots_colocalisation(image_shape=shape, spot_list1=spots1, spot_list2=spots2, distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
225
+ fraction_spots2_coloc_spots1 = spots_colocalisation(image_shape=shape, spot_list1=spots2, spot_list2=spots1, distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
226
+ except MissMatchError as e :
227
+ sg.popup(str(e))
228
+ fraction_spots1_coloc_spots2 = np.NaN
229
+ fraction_spots2_coloc_spots1 = np.NaN
230
+
231
+ if 'clusters' in acquisition1.index :
232
+ try :
233
+ clusters1 = acquisition1['clusters'][:,:len(voxel_size)]
234
+ fraction_spots2_coloc_cluster1 = spots_colocalisation(image_shape=shape, spot_list1=spots2, spot_list2=clusters1, distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
235
+ except MissMatchError as e :
236
+ sg.popup(str(e))
237
+ fraction_spots2_coloc_cluster1 = np.NaN
238
+
239
+ else : fraction_spots2_coloc_cluster1 = np.NaN
240
+
241
+ if 'clusters' in acquisition2.index :
242
+ try :
243
+ clusters2 = acquisition2['clusters'][:,:len(voxel_size)]
244
+ fraction_spots1_coloc_cluster2 = spots_colocalisation(image_shape=shape, spot_list1=spots1, spot_list2=clusters2, distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
245
+ except MissMatchError as e :
246
+ sg.popup(str(e))
247
+ fraction_spots1_coloc_cluster2 = np.NaN
248
+
249
+ else : fraction_spots1_coloc_cluster2 = np.NaN
250
+
251
+ coloc_df = pd.DataFrame({
252
+ "acquisition_couple" : [acquisition_couple],
253
+ "acquisition_id_1" : [acquisition_couple[0]],
254
+ "acquisition_id_2" : [acquisition_couple[1]],
255
+ "colocalisation_distance" : [colocalisation_distance],
256
+ "spot1_total" : [spot1_total],
257
+ "spot2_total" : [spot2_total],
258
+ 'fraction_spots1_coloc_spots2' : [fraction_spots1_coloc_spots2],
259
+ 'fraction_spots2_coloc_spots1' : [fraction_spots2_coloc_spots1],
260
+ 'fraction_spots2_coloc_cluster1' : [fraction_spots2_coloc_cluster1],
261
+ 'fraction_spots1_coloc_cluster2' : [fraction_spots1_coloc_cluster2],
262
+ })
263
+
264
+ print(coloc_df.loc[:,['fraction_spots1_coloc_spots2','fraction_spots2_coloc_spots1', 'fraction_spots2_coloc_cluster1', 'fraction_spots1_coloc_cluster2']])
265
+
266
+ return coloc_df
@@ -0,0 +1,2 @@
1
+ class MissMatchError(ValueError) :
2
+ pass
@@ -0,0 +1,139 @@
1
+ """
2
+ Contains Napari wrappers to visualise and correct spots/clusters.
3
+ """
4
+
5
+
6
+ import numpy as np
7
+ import scipy.ndimage as ndi
8
+ import napari
9
+
10
+ from napari.utils.events import Event
11
+ from napari.layers import Points
12
+ from bigfish.stack import check_parameter
13
+ from ..utils import compute_anisotropy_coef
14
+ from ._colocalisation import spots_multicolocalisation
15
+
16
+ class Points_callback :
17
+ """
18
+ Custom class to handle points number evolution during Napari run.
19
+ """
20
+
21
+ def __init__(self, points, next_id) -> None:
22
+ self.points = points
23
+ self.next_id = next_id
24
+ self._set_callback()
25
+
26
+ def __str__(self) -> str:
27
+ string = 'Points_callback object state :\ncurrent_points_number : {0}\ncurrnet_id : {1}'.format(self.current_points_number, self.next_id)
28
+ return string
29
+
30
+ def get_points(self) :
31
+ return self.points
32
+
33
+ def get_next_id(self) :
34
+ return self.next_id
35
+
36
+ def _set_callback(self) :
37
+ def callback(event:Event) :
38
+
39
+ old_points = self.get_points()
40
+ new_points:Points = event.source.data
41
+ features = event.source.features
42
+
43
+ current_point_number = len(old_points)
44
+ next_id = self.get_next_id()
45
+ new_points_number = len(new_points)
46
+
47
+ if new_points_number > current_point_number :
48
+ features.at[new_points_number - 1, "id"] = next_id
49
+ self.next_id += 1
50
+
51
+ #preparing next callback
52
+ self.points = new_points
53
+ self._set_callback()
54
+ self.callback = callback
55
+
56
+ def _update_clusters(new_clusters: np.ndarray, spots: np.ndarray, voxel_size, cluster_size, min_spot_number, shape) :
57
+ if len(new_clusters) == 0 : return new_clusters
58
+ if len(spots) == 0 : return new_clusters
59
+ assert len(new_clusters[0]) == 4 or len(new_clusters[0]) == 5, "Wrong number of coordinates for clusters should not happen."
60
+
61
+ # Update spots clusters
62
+ if len(voxel_size) == 3 :
63
+ new_clusters[:,-2] = spots_multicolocalisation(new_clusters[:,:3], spots, radius_nm= cluster_size, voxel_size=voxel_size, image_shape=shape)
64
+ elif len(voxel_size) == 2 :
65
+ new_clusters[:,-2] = spots_multicolocalisation(new_clusters[:,:2], spots, radius_nm= cluster_size, voxel_size=voxel_size, image_shape=shape)
66
+
67
+ # delete too small clusters
68
+ new_clusters = np.delete(new_clusters, new_clusters[:,-2] < min_spot_number, 0)
69
+
70
+ return new_clusters
71
+
72
+ def correct_spots(image, spots, voxel_size= (1,1,1), clusters= None, cluster_size=None, min_spot_number=0, cell_label= None, nucleus_label= None, other_images =[]):
73
+ """
74
+ Open Napari viewer for user to visualize and corrects spots, clusters.
75
+
76
+ Returns
77
+ -------
78
+ new_spots,new_clusters
79
+ """
80
+ check_parameter(image= np.ndarray, voxel_size= (tuple,list))
81
+ dim = len(voxel_size)
82
+
83
+ if dim == 3 and type(cell_label) != type(None) :
84
+ cell_label = np.repeat(
85
+ cell_label[np.newaxis],
86
+ repeats= len(image),
87
+ axis=0
88
+ )
89
+ if dim == 3 and type(nucleus_label) != type(None) :
90
+ nucleus_label = np.repeat(
91
+ nucleus_label[np.newaxis],
92
+ repeats= len(image),
93
+ axis=0
94
+ )
95
+
96
+ scale = compute_anisotropy_coef(voxel_size)
97
+ try :
98
+ Viewer = napari.Viewer(ndisplay=2, title= 'Spot correction', axis_labels=['z','y','x'], show= False)
99
+ Viewer.add_image(image, scale=scale, name= "rna signal", blending= 'additive', colormap='red')
100
+ other_colors = ['green', 'blue', 'gray', 'cyan', 'bop orange', 'bop purple'] * ((len(other_images)-1 // 7) + 1)
101
+ for im, color in zip(other_images, other_colors) :
102
+ Viewer.add_image(im, scale=scale, blending='additive', visible=False, colormap=color)
103
+ layer_offset = len(other_images)
104
+
105
+ Viewer.add_points(spots, size = 5, scale=scale, face_color= 'green', opacity= 1, symbol= 'ring', name= 'single spots') # spots
106
+ if type(clusters) != type(None) : Viewer.add_points(clusters[:,:dim], size = 10, scale=scale, face_color= 'blue', opacity= 0.7, symbol= 'diamond', name= 'foci', features= {"spot_number" : clusters[:,dim], "id" : clusters[:,dim+1]}, feature_defaults= {"spot_number" : 0, "id" : -1}) # cluster
107
+ if type(cell_label) != type(None) and np.array_equal(nucleus_label, cell_label) : Viewer.add_labels(cell_label, scale=scale, opacity= 0.2, blending= 'additive')
108
+ if type(nucleus_label) != type(None) : Viewer.add_labels(nucleus_label, scale=scale, opacity= 0.2, blending= 'additive')
109
+
110
+ #prepare cluster update
111
+ if type(clusters) != type(None) :
112
+ next_cluster_id = clusters[-1,-1] + 1 if len(clusters) > 0 else 1
113
+ _callback = Points_callback(points=clusters[:dim], next_id=next_cluster_id)
114
+ points_callback = Viewer.layers[2 + layer_offset].events.data.connect((_callback, 'callback'))
115
+ Viewer.show(block=False)
116
+ napari.run()
117
+
118
+
119
+ new_spots = np.array(Viewer.layers[1 + layer_offset].data, dtype= int)
120
+
121
+ if type(clusters) != type(None) :
122
+ if len(clusters) > 0 :
123
+ new_clusters = np.concatenate([
124
+ np.array(Viewer.layers[2 + layer_offset].data, dtype= int),
125
+ np.array(Viewer.layers[2 + layer_offset].features, dtype= int)
126
+ ],
127
+ axis= 1)
128
+
129
+ new_clusters = _update_clusters(new_clusters, new_spots, voxel_size=voxel_size, cluster_size=cluster_size, min_spot_number=min_spot_number, shape=image.shape)
130
+ else : new_clusters = None
131
+
132
+ except Exception as error :
133
+ new_spots = spots
134
+ new_clusters = clusters
135
+ raise error
136
+
137
+ return new_spots, new_clusters
138
+
139
+
@@ -0,0 +1,272 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ import PySimpleGUI as sg
4
+ from ..gui import _error_popup, _warning_popup, parameters_layout, add_header, prompt, prompt_with_help
5
+
6
+ class ParameterInputError(Exception) :
7
+ """
8
+ Raised when user inputs an incorrect parameter.
9
+ """
10
+ pass
11
+
12
+ class MappingError(ValueError) :
13
+ """
14
+ Raised when user inputs an incorrect image mapping.
15
+ """
16
+ def __init__(self, map ,*args: object) -> None:
17
+ super().__init__(*args)
18
+ self.map = map
19
+
20
+ def get_map(self) :
21
+ return self.map
22
+
23
+ def prepare_image_detection(map, user_parameters) :
24
+ """
25
+ Return monochannel image for ready for spot detection;
26
+ if image is already monochannel, nothing happens.
27
+ else : image is the image on which detection is performed, other_image are the other layer to show in Napari Viewer.
28
+ """
29
+ image = reorder_image_stack(map, user_parameters)
30
+ assert len(image.shape) != 5 , "Time stack not supported, should never be True"
31
+
32
+ if user_parameters['multichannel'] :
33
+ channel_to_compute = user_parameters['channel to compute']
34
+ other_image = image.copy()
35
+ other_image = np.delete(other_image, channel_to_compute, axis=0)
36
+ other_image = [layer for layer in other_image]
37
+ image: np.ndarray = image[channel_to_compute]
38
+
39
+ else :
40
+ other_image = []
41
+
42
+ return image, other_image
43
+
44
+ def reorder_image_stack(map, user_parameters) :
45
+ image_stack = user_parameters['image']
46
+ x = (int(map['x']),)
47
+ y = (int(map['y']),)
48
+ z = (int(map['z']),) if type(map.get('z')) != type(None) else ()
49
+ c = (int(map['c']),) if type(map.get('c')) != type(None) else ()
50
+ t = (int(map['t']),) if type(map.get('t')) != type(None) else ()
51
+
52
+ source = t+c+z+y+x
53
+
54
+ image_stack = np.moveaxis(
55
+ image_stack,
56
+ source= source,
57
+ destination= tuple(range(len(source)))
58
+ )
59
+
60
+ return image_stack
61
+
62
+ def map_channels(user_parameters) :
63
+
64
+ image = user_parameters['image']
65
+ is_3D_stack = user_parameters['3D stack']
66
+ is_time_stack = user_parameters['time stack']
67
+ multichannel = user_parameters['multichannel']
68
+
69
+ try :
70
+ map = _auto_map_channels(image, is_3D_stack, is_time_stack, multichannel)
71
+ except MappingError as e :
72
+ sg.popup("Automatic dimension mapping went wrong. Please indicate manually dimensions positions in the array.")
73
+ map = _ask_channel_map(image.shape, is_3D_stack, is_time_stack, multichannel, preset_map= e.get_map())
74
+
75
+ else :
76
+ map = _show_mapping(image.shape, map, is_3D_stack, is_time_stack, multichannel,)
77
+
78
+ return map
79
+
80
+ def _auto_map_channels(image: np.ndarray, is_3D_stack, is_time_stack, multichannel) :
81
+ shape = image.shape
82
+ reducing_list = list(shape)
83
+
84
+ #Set the biggest dimension to y
85
+ y_val = max(reducing_list)
86
+ y_idx = shape.index(y_val)
87
+ map = {'y' : y_idx}
88
+
89
+ #2nd biggest set to x
90
+ reducing_list[y_idx] = -1
91
+ x_val = max(reducing_list)
92
+ x_idx = reducing_list.index(x_val)
93
+ reducing_list[y_idx] = y_val
94
+
95
+ map['x'] = x_idx
96
+ reducing_list.remove(y_val)
97
+ reducing_list.remove(x_val)
98
+
99
+ #smaller value set to c
100
+ if multichannel :
101
+ c_val = min(reducing_list)
102
+ c_idx = shape.index(c_val)
103
+ map['c'] = c_idx
104
+ reducing_list.remove(c_val)
105
+
106
+ if is_time_stack :
107
+ t_val = reducing_list[0]
108
+ t_idx = shape.index(t_val)
109
+ map['t'] = t_idx
110
+ reducing_list.remove(t_val)
111
+
112
+ if is_3D_stack :
113
+ z_val = reducing_list[0]
114
+ z_idx = shape.index(z_val)
115
+ map['z'] = z_idx
116
+
117
+ total_channels = len(map)
118
+ unique_channel = len(np.unique(list(map.values())))
119
+
120
+ if total_channels != unique_channel : raise MappingError(map,"{0} channel(s) are not uniquely mapped.".format(total_channels - unique_channel))
121
+
122
+ return map
123
+
124
+ def _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map: dict= {}) :
125
+ map = preset_map
126
+ while True :
127
+ relaunch = False
128
+ x = map.setdefault('x',0)
129
+ y = map.setdefault('y',0)
130
+ z = map.setdefault('z',0)
131
+ c = map.setdefault('c',0)
132
+ t = map.setdefault('t',0)
133
+
134
+ layout = [
135
+ add_header("Dimensions mapping", [sg.Text("Image shape : {0}".format(shape))])
136
+ ]
137
+ layout += [parameters_layout(['x','y'], default_values=[x,y])]
138
+ if is_3D_stack : layout += [parameters_layout(['z'], default_values=[z])]
139
+ if multichannel : layout += [parameters_layout(['c'], default_values=[c])]
140
+ if is_time_stack : layout += [parameters_layout(['t'], default_values=[t])]
141
+
142
+ event, map = prompt_with_help(layout,help= 'mapping')
143
+ if event == 'Cancel' : quit()
144
+
145
+ #Check integrity
146
+ channels_values = np.array(list(map.values()), dtype= int)
147
+ total_channels = len(map)
148
+ unique_channel = len(np.unique(channels_values))
149
+ if total_channels != unique_channel :
150
+ sg.popup("{0} channel(s) are not uniquely mapped.".format(total_channels - unique_channel))
151
+ relaunch= True
152
+ if not all(channels_values < len(shape)):
153
+ sg.popup("Channels values out of range for image dimensions.\nPlease select dimensions from {0}".format(list(range(len(shape)))))
154
+ relaunch= True
155
+ if not relaunch : break
156
+
157
+ return map
158
+
159
+ def _show_mapping(shape, map, is_3D_stack, is_time_stack, multichannel) :
160
+ layout = [
161
+ [sg.Text("Image shape : {0}".format(shape))],
162
+ [sg.Text('Dimensions mapping was set to :')],
163
+ [sg.Text('x : {0} \ny : {1} \nz : {2} \nc : {3} \nt : {4}'.format(
164
+ map['x'], map['y'], map.get('z'), map.get("c"), map.get('t')
165
+ ))],
166
+ [sg.Button('Change mapping')]
167
+ ]
168
+
169
+ event, values = prompt_with_help(layout, help='mapping')
170
+
171
+ if event == 'Ok' :
172
+ return map
173
+ elif event == 'Change mapping' or event == 'Cancel':
174
+ map = _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map=map)
175
+ else : raise AssertionError('Unforseen event')
176
+
177
+ return map
178
+
179
+ def convert_parameters_types(values:dict) :
180
+ """
181
+ Convert parameters from `ask_input_parameters` from strings to float, int or tuple type.
182
+ """
183
+
184
+ #Tuples
185
+ tuples_list = ['voxel_size', 'spot_size', 'log_kernel_size', 'minimum_distance', 'deconvolution_kernel']
186
+ dim = values['dim']
187
+ if dim == 3 : dim_tuple = ('z', 'y', 'x')
188
+ else : dim_tuple = ('y', 'x')
189
+
190
+ for tuple_parameter in tuples_list :
191
+ try :
192
+ tuple_values = tuple([float(values.get(tuple_parameter + '_{0}'.format(dimension))) for dimension in dim_tuple])
193
+ except Exception : #execption when str cannot be converted to float or no parameter was given.
194
+ values[tuple_parameter] = None
195
+ else : values[tuple_parameter] = tuple_values
196
+
197
+ #Parameters
198
+ int_list = ['threshold', 'channel_to_compute', 'min number of spots', 'cluster size','nucleus channel signal']
199
+ float_list = ['time_step', 'alpha', 'beta', 'gamma', 'threshold penalty']
200
+
201
+ for parameter in int_list :
202
+ try :
203
+ parameter_value = int(values[parameter])
204
+ except Exception :
205
+ values[parameter] = None
206
+ else : values[parameter] = parameter_value
207
+
208
+ for parameter in float_list :
209
+ try :
210
+ parameter_value = float(values[parameter])
211
+ except Exception :
212
+ values[parameter] = None
213
+ else : values[parameter] = parameter_value
214
+
215
+ return values
216
+
217
+ def check_integrity(values: dict, do_dense_region_deconvolution, multichannel,segmentation_done, map, shape):
218
+ """
219
+ Checks that parameters given in input by user are fit to be used for bigfish detection.
220
+ """
221
+
222
+ #voxel_size
223
+ if type(values['voxel_size']) == type(None) : raise ParameterInputError('Incorrect voxel size parameter.')
224
+
225
+ #detection integrity :
226
+ if not isinstance(values['spot_size'], (tuple, list)) and not(isinstance(values['minimum_distance'], (tuple, list)) and isinstance(values['log_kernel_size'], (tuple, list))) :
227
+ raise ParameterInputError("Either minimum_distance and 'log_kernel_size' must be correctly set\n OR 'spot_size' must be correctly set.")
228
+
229
+ #Deconvolution integrity
230
+ if do_dense_region_deconvolution :
231
+ if not isinstance(values['alpha'], (float, int)) or not isinstance(values['beta'], (float, int)) :
232
+ raise ParameterInputError("Incorrect alpha or beta parameters.")
233
+ if type(values['gamma']) == type(None) and not isinstance(values['deconvolution_kernel'], (list, tuple)):
234
+ _warning_popup('No gamma found; image will not be denoised before deconvolution.')
235
+ values['gamma'] = 0
236
+
237
+ #channel
238
+ if multichannel :
239
+ ch_len = shape[int(map['c'])]
240
+ if segmentation_done :
241
+ try : nuc_signal_ch = int(values['nucleus channel signal'])
242
+ except Exception :
243
+ raise ParameterInputError("Incorrect channel for nucleus signal measure.")
244
+ if nuc_signal_ch > ch_len :
245
+ raise ParameterInputError("Nucleus signal channel is out of range for image.\nPlease select from {0}".format(list(range(ch_len))))
246
+ values['nucleus channel signal'] = nuc_signal_ch
247
+
248
+ try :
249
+ ch = int(values['channel to compute'])
250
+ except Exception :
251
+ raise ParameterInputError("Incorrect channel to compute parameter.")
252
+ if ch >= ch_len :
253
+ raise ParameterInputError("Channel to compute is out of range for image.\nPlease select from {0}".format(list(range(ch_len))))
254
+ values['channel to compute'] = ch
255
+
256
+ return values
257
+
258
+
259
+ def reorder_shape(shape, map) :
260
+ x = [int(map['x']),]
261
+ y = [int(map['y']),]
262
+ z = [int(map['z']),] if type(map.get('z')) != type(None) else []
263
+ c = [int(map['c']),] if type(map.get('c')) != type(None) else []
264
+ t = [int(map['t']),] if type(map.get('t')) != type(None) else []
265
+
266
+ source = t + c + z + y + x
267
+
268
+ new_shape = tuple(
269
+ np.array(shape)[source]
270
+ )
271
+
272
+ return new_shape