small-fish-gui 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- small_fish_gui/__init__.py +1 -1
- small_fish_gui/__main__.py +3 -3
- small_fish_gui/gui/_napari_widgets.py +93 -0
- small_fish_gui/{pipeline/_napari_wrapper.py → gui/napari.py} +43 -21
- small_fish_gui/gui/prompts.py +12 -8
- small_fish_gui/interface/output.py +18 -14
- small_fish_gui/pipeline/_colocalisation.py +181 -18
- small_fish_gui/pipeline/_segmentation.py +2 -2
- small_fish_gui/pipeline/actions.py +77 -14
- small_fish_gui/pipeline/detection.py +15 -4
- small_fish_gui/pipeline/main.py +41 -14
- {small_fish_gui-1.5.0.dist-info → small_fish_gui-1.7.0.dist-info}/METADATA +1 -1
- {small_fish_gui-1.5.0.dist-info → small_fish_gui-1.7.0.dist-info}/RECORD +15 -14
- {small_fish_gui-1.5.0.dist-info → small_fish_gui-1.7.0.dist-info}/WHEEL +0 -0
- {small_fish_gui-1.5.0.dist-info → small_fish_gui-1.7.0.dist-info}/licenses/LICENSE +0 -0
small_fish_gui/__init__.py
CHANGED
small_fish_gui/__main__.py
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import sys, subprocess
|
|
2
2
|
import PySimpleGUI as sg
|
|
3
3
|
|
|
4
|
+
from small_fish_gui import __version__
|
|
5
|
+
|
|
4
6
|
def main():
|
|
5
7
|
import small_fish_gui.pipeline.main
|
|
6
8
|
|
|
@@ -10,9 +12,7 @@ def is_last_version() :
|
|
|
10
12
|
latest_version = latest_version[:latest_version.find(')')]
|
|
11
13
|
latest_version = latest_version.replace(' ','').split(',')[-1]
|
|
12
14
|
|
|
13
|
-
current_version =
|
|
14
|
-
current_version = current_version[current_version.find('Version:')+8:]
|
|
15
|
-
current_version = current_version[:current_version.find('\\n')].replace(' ','')
|
|
15
|
+
current_version = __version__
|
|
16
16
|
|
|
17
17
|
return current_version == latest_version
|
|
18
18
|
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Submodule containing custom class for napari widgets
|
|
3
|
+
"""
|
|
4
|
+
import numpy as np
|
|
5
|
+
from napari.layers import Labels
|
|
6
|
+
from magicgui import magicgui
|
|
7
|
+
|
|
8
|
+
class cell_label_eraser :
|
|
9
|
+
"""
|
|
10
|
+
Must be instanced within Napari Viewer definition range for update connection to work, cell deletion works fine anyway.
|
|
11
|
+
"""
|
|
12
|
+
def __init__(self, label_list: 'list[Labels]'):
|
|
13
|
+
self.widget = self._create_eraser(label_list)
|
|
14
|
+
for label_layer in label_list :
|
|
15
|
+
label_layer.events.selected_label.connect((self, 'update'))
|
|
16
|
+
|
|
17
|
+
def update(self, event) :
|
|
18
|
+
layer : Labels = event.source
|
|
19
|
+
new_label = layer.selected_label
|
|
20
|
+
self.widget.label_number.value = new_label
|
|
21
|
+
self.widget.update()
|
|
22
|
+
|
|
23
|
+
def _create_eraser(self, label_list: 'list[Labels]') :
|
|
24
|
+
@magicgui(
|
|
25
|
+
call_button="Delete cell",
|
|
26
|
+
auto_call=False
|
|
27
|
+
)
|
|
28
|
+
def label_eraser(label_number: int) -> None :
|
|
29
|
+
|
|
30
|
+
for i, label in enumerate(label_list) :
|
|
31
|
+
label_list[i].data[label.data == label_number] = 0
|
|
32
|
+
label.refresh()
|
|
33
|
+
|
|
34
|
+
return label_eraser
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class free_label_picker :
|
|
39
|
+
def __init__(self, label_list):
|
|
40
|
+
self.widget = self._create_free_label_picker(label_list)
|
|
41
|
+
|
|
42
|
+
def _create_free_label_picker(self, label_list : 'list[Labels]') :
|
|
43
|
+
@magicgui(
|
|
44
|
+
call_button="Pick free label",
|
|
45
|
+
auto_call=False
|
|
46
|
+
)
|
|
47
|
+
def label_pick()->None :
|
|
48
|
+
max_list = [label_layer.data.max() for label_layer in label_list]
|
|
49
|
+
new_label = max(max_list) + 1
|
|
50
|
+
for label_layer in label_list :
|
|
51
|
+
label_layer.selected_label = new_label
|
|
52
|
+
label_layer.refresh()
|
|
53
|
+
|
|
54
|
+
return label_pick
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class segmentation_reseter :
|
|
58
|
+
def __init__(self, label_list):
|
|
59
|
+
self.save = self._get_save(label_list)
|
|
60
|
+
self.widget = self._create_widget(label_list)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _get_save(self, label_list : 'list[Labels]') :
|
|
64
|
+
return [label.data.copy() for label in label_list]
|
|
65
|
+
|
|
66
|
+
def _create_widget(self, label_list: 'list[Labels]') :
|
|
67
|
+
@magicgui(
|
|
68
|
+
call_button= 'Reset segmentation',
|
|
69
|
+
auto_call=False,
|
|
70
|
+
)
|
|
71
|
+
def reset_segmentation() -> None:
|
|
72
|
+
for save_data, layer in zip(self.save, label_list) :
|
|
73
|
+
layer.data = save_data.copy()
|
|
74
|
+
layer.refresh()
|
|
75
|
+
|
|
76
|
+
return reset_segmentation
|
|
77
|
+
|
|
78
|
+
class changes_propagater :
|
|
79
|
+
def __init__(self, label_list):
|
|
80
|
+
self.widget = self._create_widget(label_list)
|
|
81
|
+
|
|
82
|
+
def _create_widget(self, label_list: 'list[Labels]') :
|
|
83
|
+
@magicgui(
|
|
84
|
+
call_button='Apply changes',
|
|
85
|
+
auto_call=False,
|
|
86
|
+
)
|
|
87
|
+
def apply_changes() -> None:
|
|
88
|
+
for layer in label_list :
|
|
89
|
+
slices = layer.data.shape[0]
|
|
90
|
+
layer_2D = np.max(layer.data, axis=0)
|
|
91
|
+
layer.data = np.repeat(layer_2D[np.newaxis], slices, axis=0)
|
|
92
|
+
layer.refresh()
|
|
93
|
+
return apply_changes
|
|
@@ -7,13 +7,21 @@ import napari.types
|
|
|
7
7
|
import numpy as np
|
|
8
8
|
import napari
|
|
9
9
|
|
|
10
|
+
from napari.layers import Labels
|
|
11
|
+
|
|
12
|
+
from magicgui import widgets
|
|
13
|
+
from magicgui import magicgui
|
|
14
|
+
|
|
10
15
|
from bigfish.stack import check_parameter
|
|
16
|
+
from ._napari_widgets import cell_label_eraser, segmentation_reseter, changes_propagater, free_label_picker
|
|
11
17
|
from ..utils import compute_anisotropy_coef
|
|
12
|
-
from ._colocalisation import spots_multicolocalisation
|
|
18
|
+
from ..pipeline._colocalisation import spots_multicolocalisation
|
|
19
|
+
|
|
20
|
+
#Post detection
|
|
13
21
|
|
|
14
|
-
def _update_clusters(new_clusters: np.ndarray, spots: np.ndarray, voxel_size, cluster_size,
|
|
22
|
+
def _update_clusters(new_clusters: np.ndarray, spots: np.ndarray, voxel_size, cluster_size, shape) :
|
|
15
23
|
if len(new_clusters) == 0 : return new_clusters
|
|
16
|
-
if len(spots) == 0 : return
|
|
24
|
+
if len(spots) == 0 : return np.empty(shape=(0,2+len(voxel_size)))
|
|
17
25
|
|
|
18
26
|
if len(new_clusters[0]) in [2,3] :
|
|
19
27
|
new_clusters = np.concatenate([
|
|
@@ -25,13 +33,10 @@ def _update_clusters(new_clusters: np.ndarray, spots: np.ndarray, voxel_size, cl
|
|
|
25
33
|
assert len(new_clusters[0]) == 4 or len(new_clusters[0]) == 5, "Wrong number of coordinates for clusters should not happen."
|
|
26
34
|
|
|
27
35
|
# Update spots clusters
|
|
28
|
-
|
|
29
|
-
new_clusters[:,-2] = spots_multicolocalisation(new_clusters[:,:3], spots, radius_nm= cluster_size, voxel_size=voxel_size, image_shape=shape)
|
|
30
|
-
elif len(voxel_size) == 2 :
|
|
31
|
-
new_clusters[:,-2] = spots_multicolocalisation(new_clusters[:,:2], spots, radius_nm= cluster_size, voxel_size=voxel_size, image_shape=shape)
|
|
36
|
+
new_clusters[:,-2] = spots_multicolocalisation(new_clusters[:,:-2], spots, radius_nm= cluster_size, voxel_size=voxel_size, image_shape=shape)
|
|
32
37
|
|
|
33
38
|
# delete too small clusters
|
|
34
|
-
|
|
39
|
+
new_clusters = np.delete(new_clusters, new_clusters[:,-2] == 0, 0)
|
|
35
40
|
|
|
36
41
|
return new_clusters
|
|
37
42
|
|
|
@@ -98,13 +103,16 @@ def correct_spots(image, spots, voxel_size= (1,1,1), clusters= None, cluster_siz
|
|
|
98
103
|
new_spots = np.array(Viewer.layers['single spots'].data, dtype= int)
|
|
99
104
|
|
|
100
105
|
if type(clusters) != type(None) :
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
new_clusters = _update_clusters(new_clusters, new_spots, voxel_size=voxel_size, cluster_size=cluster_size, min_spot_number=min_spot_number, shape=image.shape)
|
|
106
|
+
new_clusters = np.array(Viewer.layers['foci'].data, dtype= int)
|
|
107
|
+
new_clusters = _update_clusters(new_clusters, new_spots, voxel_size=voxel_size, cluster_size=cluster_size, shape=image.shape)
|
|
104
108
|
else : new_clusters = None
|
|
105
109
|
|
|
106
110
|
return new_spots, new_clusters
|
|
107
111
|
|
|
112
|
+
# Segmentation
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
|
|
108
116
|
def show_segmentation(
|
|
109
117
|
nuc_image : np.ndarray,
|
|
110
118
|
nuc_label : np.ndarray,
|
|
@@ -135,26 +143,40 @@ def show_segmentation(
|
|
|
135
143
|
)
|
|
136
144
|
|
|
137
145
|
#Init Napari viewer
|
|
138
|
-
Viewer = napari.Viewer(ndisplay=2, title= 'Show segmentation', axis_labels=['z','y','x'] if dim == 3 else ['y', 'x']
|
|
146
|
+
Viewer = napari.Viewer(ndisplay=2, title= 'Show segmentation', axis_labels=['z','y','x'] if dim == 3 else ['y', 'x'])
|
|
139
147
|
|
|
140
|
-
# Adding
|
|
148
|
+
# Adding nuclei
|
|
141
149
|
nuc_signal_layer = Viewer.add_image(nuc_image, name= "nucleus signal", blending= 'additive', colormap='blue', contrast_limits=[nuc_image.min(), nuc_image.max()])
|
|
142
|
-
nuc_label_layer = Viewer.add_labels(nuc_label, opacity= 0.
|
|
150
|
+
nuc_label_layer = Viewer.add_labels(nuc_label, opacity= 0.6, name= 'nucleus_label',)
|
|
143
151
|
nuc_label_layer.preserve_labels = True
|
|
152
|
+
labels_layer_list = [nuc_label_layer]
|
|
144
153
|
|
|
145
|
-
#Adding
|
|
146
|
-
if type(cyto_image) != type(None) : Viewer.add_image(cyto_image, name= "cytoplasm signal", blending= 'additive', colormap='red', contrast_limits=[cyto_image.min(), cyto_image.max()])
|
|
154
|
+
#Adding cytoplasm
|
|
147
155
|
if (type(cyto_label) != type(None) and not np.array_equal(cyto_label, nuc_label) ) or (type(cyto_label) != type(None) and cyto_label.max() == 0):
|
|
148
|
-
|
|
156
|
+
Viewer.add_image(cyto_image, name= "cytoplasm signal", blending= 'additive', colormap='red', contrast_limits=[cyto_image.min(), cyto_image.max()])
|
|
157
|
+
cyto_label_layer = Viewer.add_labels(cyto_label, opacity= 0.6, name= 'cytoplasm_label')
|
|
149
158
|
cyto_label_layer.preserve_labels = True
|
|
150
|
-
|
|
159
|
+
labels_layer_list += [cyto_label_layer]
|
|
160
|
+
|
|
161
|
+
#Adding widget
|
|
162
|
+
label_eraser = cell_label_eraser(labels_layer_list)
|
|
163
|
+
label_picker = free_label_picker(labels_layer_list)
|
|
164
|
+
label_reseter = segmentation_reseter(labels_layer_list)
|
|
165
|
+
changes_applier = changes_propagater(labels_layer_list)
|
|
166
|
+
|
|
167
|
+
buttons_container = widgets.Container(widgets=[label_picker.widget, changes_applier.widget, label_reseter.widget], labels=False, layout='horizontal')
|
|
168
|
+
tools_container = widgets.Container(
|
|
169
|
+
widgets = [buttons_container, label_eraser.widget],
|
|
170
|
+
labels=False,
|
|
171
|
+
)
|
|
172
|
+
Viewer.window.add_dock_widget(tools_container, name='SmallFish', area='left')
|
|
173
|
+
|
|
151
174
|
#Launch Napari
|
|
152
|
-
Viewer.show(block=False)
|
|
153
175
|
napari.run()
|
|
154
176
|
|
|
155
|
-
|
|
156
177
|
new_nuc_label = Viewer.layers['nucleus_label'].data
|
|
157
|
-
if
|
|
178
|
+
if 'cytoplasm_label' in Viewer.layers :
|
|
179
|
+
new_cyto_label = Viewer.layers['cytoplasm_label'].data
|
|
158
180
|
else : new_cyto_label = new_nuc_label
|
|
159
181
|
|
|
160
182
|
return new_nuc_label, new_cyto_label
|
small_fish_gui/gui/prompts.py
CHANGED
|
@@ -136,8 +136,8 @@ def output_image_prompt(filename) :
|
|
|
136
136
|
excel_filename = values['filename'] + ".xlsx"
|
|
137
137
|
feather_filename = values['filename'] + ".feather"
|
|
138
138
|
|
|
139
|
-
if not values['Excel'] and not values['Feather'] :
|
|
140
|
-
sg.popup("Please check at least one box : Excel/Feather")
|
|
139
|
+
if not values['Excel'] and not values['Feather'] and not values['csv'] :
|
|
140
|
+
sg.popup("Please check at least one box : Excel/Feather/csv")
|
|
141
141
|
relaunch = True
|
|
142
142
|
elif not os.path.isdir(values['folder']) :
|
|
143
143
|
sg.popup("Incorrect folder")
|
|
@@ -280,7 +280,7 @@ def _warning_popup(warning:str) :
|
|
|
280
280
|
|
|
281
281
|
def _sumup_df(results: pd.DataFrame) :
|
|
282
282
|
|
|
283
|
-
COLUMNS = ['acquisition_id','threshold', 'spot_number', 'cell_number', 'filename', 'channel to compute']
|
|
283
|
+
COLUMNS = ['acquisition_id','name','threshold', 'spot_number', 'cell_number', 'filename', 'channel to compute']
|
|
284
284
|
|
|
285
285
|
if len(results) > 0 :
|
|
286
286
|
if 'channel to compute' not in results : results['channel to compute'] = np.NaN
|
|
@@ -303,7 +303,7 @@ def hub_prompt(fov_results, do_segmentation=False) :
|
|
|
303
303
|
[sg.Text('RESULTS', font= 'bold 13')],
|
|
304
304
|
[sg.Table(values= list(sumup_df.values), headings= list(sumup_df.columns), row_height=20, num_rows= 5, vertical_scroll_only=False, key= "result_table"), segmentation_object],
|
|
305
305
|
[sg.Button('Add detection'), sg.Button('Compute colocalisation'), sg.Button('Batch detection')],
|
|
306
|
-
[sg.Button('Save results', button_color= 'green'), sg.Button('Delete acquisitions',button_color= 'gray'), sg.Button('Reset segmentation',button_color= 'gray'), sg.Button('Reset results',button_color= 'gray')]
|
|
306
|
+
[sg.Button('Rename acquisition', button_color= 'green'), sg.Button('Save results', button_color= 'green'), sg.Button('Delete acquisitions',button_color= 'gray'), sg.Button('Reset segmentation',button_color= 'gray'), sg.Button('Reset results',button_color= 'gray')]
|
|
307
307
|
# [sg.Button('Save results', button_color= 'green'), sg.Button('Reset results',button_color= 'gray')]
|
|
308
308
|
]
|
|
309
309
|
|
|
@@ -318,16 +318,20 @@ def hub_prompt(fov_results, do_segmentation=False) :
|
|
|
318
318
|
return event, values
|
|
319
319
|
|
|
320
320
|
def coloc_prompt() :
|
|
321
|
-
layout = [
|
|
322
|
-
[parameters_layout(['colocalisation distance'], header= 'Colocalisation', default_values= 0)]
|
|
323
|
-
]
|
|
324
|
-
|
|
321
|
+
layout = parameters_layout(['colocalisation distance'], unit= 'nm', header= 'Colocalisation', default_values= 0)
|
|
325
322
|
event, values = prompt_with_help(layout)
|
|
326
323
|
|
|
327
324
|
if event == 'Ok' :
|
|
328
325
|
return values['colocalisation distance']
|
|
329
326
|
else : return False
|
|
330
327
|
|
|
328
|
+
def rename_prompt() :
|
|
329
|
+
layout = parameters_layout(['name'], header= "Rename acquisitions", size=12)
|
|
330
|
+
event, values = prompt_with_help(layout)
|
|
331
|
+
if event == 'Ok' :
|
|
332
|
+
return values['name']
|
|
333
|
+
else : return False
|
|
334
|
+
|
|
331
335
|
def ask_detection_confirmation(used_threshold) :
|
|
332
336
|
layout = [
|
|
333
337
|
[sg.Text("Proceed with current detection ?", font= 'bold 10')],
|
|
@@ -10,11 +10,9 @@ def _cast_spot_to_tuple(spot) :
|
|
|
10
10
|
def _cast_spots_to_tuple(spots) :
|
|
11
11
|
return tuple(list(map(_cast_spot_to_tuple, spots)))
|
|
12
12
|
|
|
13
|
-
def write_results(dataframe: pd.DataFrame, path:str, filename:str, do_excel= True, do_feather= False, do_csv=False, overwrite=False) :
|
|
13
|
+
def write_results(dataframe: pd.DataFrame, path:str, filename:str, do_excel= True, do_feather= False, do_csv=False, overwrite=False, reset_index=True) :
|
|
14
14
|
check_parameter(dataframe= pd.DataFrame, path= str, filename = str, do_excel = bool, do_feather = bool)
|
|
15
15
|
|
|
16
|
-
dataframe.columns = dataframe.columns.astype(str) # assert columns header are string for feather
|
|
17
|
-
|
|
18
16
|
if len(dataframe) == 0 : return True
|
|
19
17
|
if not do_excel and not do_feather and not do_csv :
|
|
20
18
|
return False
|
|
@@ -22,6 +20,14 @@ def write_results(dataframe: pd.DataFrame, path:str, filename:str, do_excel= Tru
|
|
|
22
20
|
if not path.endswith('/') : path +='/'
|
|
23
21
|
assert os.path.isdir(path)
|
|
24
22
|
|
|
23
|
+
#Casting cols name to str for feather format
|
|
24
|
+
index_dim = dataframe.columns.nlevels
|
|
25
|
+
if index_dim == 1 :
|
|
26
|
+
dataframe.columns = dataframe.columns.astype(str)
|
|
27
|
+
else :
|
|
28
|
+
casted_cols = [dataframe.columns.get_level_values(level).astype(str) for level in range(index_dim)]
|
|
29
|
+
casted_cols = zip(*casted_cols)
|
|
30
|
+
dataframe.columns = pd.MultiIndex.from_tuples(casted_cols)
|
|
25
31
|
|
|
26
32
|
new_filename = filename
|
|
27
33
|
i= 1
|
|
@@ -31,23 +37,21 @@ def write_results(dataframe: pd.DataFrame, path:str, filename:str, do_excel= Tru
|
|
|
31
37
|
new_filename = filename + '_{0}'.format(i)
|
|
32
38
|
i+=1
|
|
33
39
|
|
|
34
|
-
|
|
35
|
-
|
|
40
|
+
COLUMNS_TO_DROP = ['image', 'spots', 'clusters', 'rna_coords', 'cluster_coords']
|
|
41
|
+
for col in COLUMNS_TO_DROP :
|
|
42
|
+
dataframe = dataframe.drop(columns=col)
|
|
36
43
|
|
|
37
|
-
if
|
|
38
|
-
dataframe = dataframe.drop(['spots'], axis= 1)
|
|
39
|
-
|
|
40
|
-
if 'clusters' in dataframe.columns :
|
|
41
|
-
dataframe = dataframe.drop(['clusters'], axis= 1)
|
|
44
|
+
if reset_index : dataframe = dataframe.reset_index(drop=True)
|
|
42
45
|
|
|
43
|
-
if
|
|
44
|
-
if do_csv : dataframe.reset_index(drop=True).to_csv(path + new_filename + '.csv', sep=";")
|
|
46
|
+
if do_csv : dataframe.to_csv(path + new_filename + '.csv', sep=";")
|
|
45
47
|
if do_excel :
|
|
46
48
|
if len(dataframe) < MAX_LEN_EXCEL :
|
|
47
|
-
dataframe.
|
|
49
|
+
dataframe.to_excel(path + new_filename + '.xlsx')
|
|
48
50
|
else :
|
|
49
51
|
print("Error : Table too big to be saved in excel format.")
|
|
50
52
|
return False
|
|
51
53
|
|
|
54
|
+
if do_feather :
|
|
55
|
+
dataframe.to_parquet(path + new_filename + '.parquet')
|
|
52
56
|
|
|
53
|
-
return True
|
|
57
|
+
return True
|
|
@@ -79,7 +79,7 @@ def spots_multicolocalisation(spots_list, anchor_list, radius_nm, image_shape, v
|
|
|
79
79
|
Example in 2D
|
|
80
80
|
--------
|
|
81
81
|
|
|
82
|
-
>>> Anchors
|
|
82
|
+
>>> Anchors spots Radius (2px) Count
|
|
83
83
|
>>> 0 0 0 0 0 0 0 X 0 0 X 0 1 0 1 0 0 0 0
|
|
84
84
|
>>> 0 X 0 0 0 0 X 0 0 X 0 0 1 1 1 1 0 0 0 0 0
|
|
85
85
|
>>> 0 X 0 0 0 0 X X 0 0 0 0 1 1 2 0 0 0 0 --> 5
|
|
@@ -116,7 +116,7 @@ def spots_multicolocalisation(spots_list, anchor_list, radius_nm, image_shape, v
|
|
|
116
116
|
|
|
117
117
|
return res
|
|
118
118
|
|
|
119
|
-
def spots_colocalisation(
|
|
119
|
+
def spots_colocalisation(spot_list1:list, spot_list2:list, distance: float, voxel_size)-> int :
|
|
120
120
|
"""
|
|
121
121
|
Return number of spots from spot_list1 located closer(large) than distance to at least one spot of spot_list2.
|
|
122
122
|
|
|
@@ -134,10 +134,9 @@ def spots_colocalisation(image_shape, spot_list1:list, spot_list2:list, distance
|
|
|
134
134
|
if len(spot_list1[0]) != len(spot_list2[0]) :
|
|
135
135
|
raise MissMatchError("dimensionalities of spots 1 and spots 2 don't match.")
|
|
136
136
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
image_shape = image_shape[-2:]
|
|
137
|
+
shape1 = np.max(spot_list1,axis=0)
|
|
138
|
+
shape2 = np.max(spot_list2,axis=0)
|
|
139
|
+
image_shape = np.max([shape1, shape2],axis=0) + 1
|
|
141
140
|
|
|
142
141
|
signal2 = reconstruct_boolean_signal(image_shape, spot_list2)
|
|
143
142
|
mask = np.logical_not(signal2)
|
|
@@ -173,8 +172,7 @@ def initiate_colocalisation(result_tables) :
|
|
|
173
172
|
break
|
|
174
173
|
return colocalisation_distance
|
|
175
174
|
|
|
176
|
-
|
|
177
|
-
def launch_colocalisation(result_tables, result_dataframe, colocalisation_distance) :
|
|
175
|
+
def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisation_distance) :
|
|
178
176
|
"""
|
|
179
177
|
|
|
180
178
|
Target :
|
|
@@ -190,8 +188,10 @@ def launch_colocalisation(result_tables, result_dataframe, colocalisation_distan
|
|
|
190
188
|
|
|
191
189
|
"""
|
|
192
190
|
|
|
193
|
-
acquisition1 = result_dataframe.
|
|
194
|
-
acquisition2 = result_dataframe.
|
|
191
|
+
acquisition1 = result_dataframe.loc[result_dataframe['acquisition_id'] == acquisition_id1]
|
|
192
|
+
acquisition2 = result_dataframe.loc[result_dataframe['acquisition_id'] == acquisition_id2]
|
|
193
|
+
|
|
194
|
+
acquisition_couple = (acquisition_id1,acquisition_id2)
|
|
195
195
|
|
|
196
196
|
voxel_size1 = acquisition1.at['voxel_size']
|
|
197
197
|
voxel_size2 = acquisition2.at['voxel_size']
|
|
@@ -208,7 +208,6 @@ def launch_colocalisation(result_tables, result_dataframe, colocalisation_distan
|
|
|
208
208
|
else :
|
|
209
209
|
shape = shape1
|
|
210
210
|
|
|
211
|
-
acquisition_couple = (acquisition1.at['acquisition_id'], acquisition2.at['acquisition_id'])
|
|
212
211
|
|
|
213
212
|
spots1 = acquisition1['spots']
|
|
214
213
|
spots2 = acquisition2['spots']
|
|
@@ -217,8 +216,8 @@ def launch_colocalisation(result_tables, result_dataframe, colocalisation_distan
|
|
|
217
216
|
spot2_total = len(spots2)
|
|
218
217
|
|
|
219
218
|
try :
|
|
220
|
-
fraction_spots1_coloc_spots2 = spots_colocalisation(
|
|
221
|
-
fraction_spots2_coloc_spots1 = spots_colocalisation(
|
|
219
|
+
fraction_spots1_coloc_spots2 = spots_colocalisation(spot_list1=spots1, spot_list2=spots2, distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
|
|
220
|
+
fraction_spots2_coloc_spots1 = spots_colocalisation(spot_list1=spots2, spot_list2=spots1, distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
|
|
222
221
|
except MissMatchError as e :
|
|
223
222
|
sg.popup(str(e))
|
|
224
223
|
fraction_spots1_coloc_spots2 = np.NaN
|
|
@@ -227,11 +226,11 @@ def launch_colocalisation(result_tables, result_dataframe, colocalisation_distan
|
|
|
227
226
|
if 'clusters' in acquisition1.index :
|
|
228
227
|
try :
|
|
229
228
|
clusters1 = acquisition1['clusters'][:,:len(voxel_size)]
|
|
230
|
-
fraction_spots2_coloc_cluster1 = spots_colocalisation(
|
|
229
|
+
fraction_spots2_coloc_cluster1 = spots_colocalisation(spot_list1=spots2, spot_list2=clusters1, distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
|
|
231
230
|
except MissMatchError as e :
|
|
232
231
|
sg.popup(str(e))
|
|
233
232
|
fraction_spots2_coloc_cluster1 = np.NaN
|
|
234
|
-
except TypeError : #
|
|
233
|
+
except TypeError : # clusters not computed
|
|
235
234
|
fraction_spots2_coloc_cluster1 = np.NaN
|
|
236
235
|
|
|
237
236
|
|
|
@@ -240,8 +239,8 @@ def launch_colocalisation(result_tables, result_dataframe, colocalisation_distan
|
|
|
240
239
|
if 'clusters' in acquisition2.index :
|
|
241
240
|
try :
|
|
242
241
|
clusters2 = acquisition2['clusters'][:,:len(voxel_size)]
|
|
243
|
-
fraction_spots1_coloc_cluster2 = spots_colocalisation(
|
|
244
|
-
except MissMatchError as e :#
|
|
242
|
+
fraction_spots1_coloc_cluster2 = spots_colocalisation(spot_list1=spots1, spot_list2=clusters2, distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
|
|
243
|
+
except MissMatchError as e :# clusters not computed
|
|
245
244
|
sg.popup(str(e))
|
|
246
245
|
fraction_spots1_coloc_cluster2 = np.NaN
|
|
247
246
|
except TypeError :
|
|
@@ -263,4 +262,168 @@ def launch_colocalisation(result_tables, result_dataframe, colocalisation_distan
|
|
|
263
262
|
'fraction_spots1_coloc_cluster2' : [fraction_spots1_coloc_cluster2],
|
|
264
263
|
})
|
|
265
264
|
|
|
266
|
-
|
|
265
|
+
coloc_df['fraction_spots1_coloc_free2'] = coloc_df['fraction_spots1_coloc_spots2'] - coloc_df['fraction_spots1_coloc_cluster2']
|
|
266
|
+
coloc_df['fraction_spots2_coloc_free1'] = coloc_df['fraction_spots2_coloc_spots1'] - coloc_df['fraction_spots2_coloc_cluster1']
|
|
267
|
+
|
|
268
|
+
#Add names
|
|
269
|
+
coloc_df_col = list(coloc_df.columns)
|
|
270
|
+
coloc_df['name1'] = acquisition1.at['name']
|
|
271
|
+
coloc_df['name2'] = acquisition2.at['name']
|
|
272
|
+
coloc_df = coloc_df.loc[:,['name1','name2'] + coloc_df_col]
|
|
273
|
+
|
|
274
|
+
return coloc_df
|
|
275
|
+
|
|
276
|
+
def _cell_coloc(
|
|
277
|
+
acquisition_id1: int,
|
|
278
|
+
acquisition_id2: int,
|
|
279
|
+
result_dataframe : pd.DataFrame,
|
|
280
|
+
cell_dataframe : pd.DataFrame,
|
|
281
|
+
colocalisation_distance : float,
|
|
282
|
+
) :
|
|
283
|
+
|
|
284
|
+
acquisition1 = result_dataframe.loc[result_dataframe['acquisition_id'] == acquisition_id1]
|
|
285
|
+
acquisition2 = result_dataframe.loc[result_dataframe['acquisition_id'] == acquisition_id2]
|
|
286
|
+
|
|
287
|
+
acquisition_name_id1 = acquisition1['name'].iat[0]
|
|
288
|
+
acquisition_name_id2 = acquisition2['name'].iat[0]
|
|
289
|
+
cluster_radius1 = acquisition1['cluster size'].iat[0]
|
|
290
|
+
cluster_radius2 = acquisition2['cluster size'].iat[0]
|
|
291
|
+
result_dataframe = result_dataframe.set_index('acquisition_id', drop=False)
|
|
292
|
+
coloc_name = '{0}nm_{1}{2}_{3}{4}'.format(colocalisation_distance, acquisition_id1,acquisition_name_id1, acquisition_id2,acquisition_name_id2)
|
|
293
|
+
|
|
294
|
+
#Getting shape
|
|
295
|
+
if not result_dataframe.at[acquisition_id1, 'reordered_shape'] == result_dataframe.at[acquisition_id2, 'reordered_shape'] :
|
|
296
|
+
raise ValueError("Selected acquisitions have different shapes. Most likely they don't belong to the same fov.")
|
|
297
|
+
|
|
298
|
+
#Getting voxel_size
|
|
299
|
+
if not result_dataframe.at[acquisition_id1, 'voxel_size'] == result_dataframe.at[acquisition_id2, 'voxel_size'] :
|
|
300
|
+
raise ValueError("Selected acquisitions have different voxel_size. Most likely they don't belong to the same fov.")
|
|
301
|
+
voxel_size = result_dataframe.at[acquisition_id1, 'voxel_size']
|
|
302
|
+
|
|
303
|
+
#Selecting relevant cells in Cell table
|
|
304
|
+
cell_dataframe = cell_dataframe.loc[(cell_dataframe['acquisition_id'] == acquisition_id1)|(cell_dataframe['acquisition_id'] == acquisition_id2)]
|
|
305
|
+
|
|
306
|
+
#Putting spots lists in 2 cols for corresponding cells
|
|
307
|
+
pivot_values_columns = ['rna_coords', 'total_rna_number']
|
|
308
|
+
if 'clusters' in acquisition2.columns or 'clusters' in acquisition1.columns :
|
|
309
|
+
pivot_values_columns.extend(['cluster_coords','foci_number'])
|
|
310
|
+
colocalisation_df = cell_dataframe.pivot(
|
|
311
|
+
columns=['name', 'acquisition_id'],
|
|
312
|
+
values= pivot_values_columns,
|
|
313
|
+
index= 'cell_id'
|
|
314
|
+
)
|
|
315
|
+
#spots _vs spots
|
|
316
|
+
colocalisation_df[("spots_to_spots_count",coloc_name,"forward")] = colocalisation_df['rna_coords'].apply(
|
|
317
|
+
lambda x: spots_colocalisation(
|
|
318
|
+
spot_list1= x[(acquisition_name_id1,acquisition_id1)],
|
|
319
|
+
spot_list2= x[(acquisition_name_id2,acquisition_id2)],
|
|
320
|
+
distance=colocalisation_distance,
|
|
321
|
+
voxel_size=voxel_size
|
|
322
|
+
),axis=1
|
|
323
|
+
)
|
|
324
|
+
colocalisation_df[("spots_to_spots_fraction",coloc_name,"forward")] = colocalisation_df[("spots_to_spots_count",coloc_name,"forward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id1,acquisition_id1)].astype(float)
|
|
325
|
+
|
|
326
|
+
colocalisation_df[("spots_to_spots_count",coloc_name,"backward")] = colocalisation_df['rna_coords'].apply(
|
|
327
|
+
lambda x: spots_colocalisation(
|
|
328
|
+
spot_list1= x[(acquisition_name_id2,acquisition_id2)],
|
|
329
|
+
spot_list2= x[(acquisition_name_id1,acquisition_id1)],
|
|
330
|
+
distance=colocalisation_distance,
|
|
331
|
+
voxel_size=voxel_size
|
|
332
|
+
),axis=1
|
|
333
|
+
)
|
|
334
|
+
colocalisation_df[("spots_to_spots_fraction",coloc_name,"backward")] = colocalisation_df[("spots_to_spots_count",coloc_name,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
|
|
335
|
+
|
|
336
|
+
if acquisition2['Cluster computation'].iat[0] :
|
|
337
|
+
if len(acquisition2['clusters'].iat[0]) > 0 :
|
|
338
|
+
|
|
339
|
+
#spots to clusters
|
|
340
|
+
colocalisation_df[("spots_to_clusters_count",coloc_name,"forward")] = colocalisation_df.apply(
|
|
341
|
+
lambda x: spots_colocalisation(
|
|
342
|
+
spot_list1= x[('rna_coords',acquisition_name_id1,acquisition_id1)],
|
|
343
|
+
spot_list2= x[('cluster_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
|
|
344
|
+
distance=colocalisation_distance + cluster_radius2,
|
|
345
|
+
voxel_size=voxel_size
|
|
346
|
+
),axis=1
|
|
347
|
+
)
|
|
348
|
+
colocalisation_df[("spots_to_clusters_fraction",coloc_name,"forward")] = colocalisation_df[("spots_to_clusters_count",coloc_name,"forward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id1,acquisition_id1)].astype(float)
|
|
349
|
+
|
|
350
|
+
if acquisition1['Cluster computation'].iat[0] :
|
|
351
|
+
if len(acquisition1['clusters'].iat[0]) > 0 :
|
|
352
|
+
colocalisation_df[("spots_to_clusters_count",coloc_name,"backward")] = colocalisation_df.apply(
|
|
353
|
+
lambda x: spots_colocalisation(
|
|
354
|
+
spot_list1= x[('rna_coords',acquisition_name_id2,acquisition_id2)],
|
|
355
|
+
spot_list2= x[('cluster_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
|
|
356
|
+
distance=colocalisation_distance + cluster_radius1,
|
|
357
|
+
voxel_size=voxel_size
|
|
358
|
+
),axis=1
|
|
359
|
+
)
|
|
360
|
+
colocalisation_df[("spots_to_clusters_fraction",coloc_name,"backward")] = colocalisation_df[("spots_to_clusters_count",coloc_name,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
|
|
361
|
+
|
|
362
|
+
if acquisition2['Cluster computation'].iat[0] and acquisition1['Cluster computation'].iat[0] :
|
|
363
|
+
if len(acquisition1['clusters'].iat[0]) > 0 and len(acquisition2['clusters'].iat[0]) > 0 :
|
|
364
|
+
#clusters to clusters
|
|
365
|
+
colocalisation_df[("clusters_to_clusters_count",coloc_name,"forward")] = colocalisation_df.apply(
|
|
366
|
+
lambda x: spots_colocalisation(
|
|
367
|
+
spot_list1= x[('cluster_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
|
|
368
|
+
spot_list2= x[('cluster_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
|
|
369
|
+
distance=colocalisation_distance + cluster_radius1 + cluster_radius2,
|
|
370
|
+
voxel_size=voxel_size
|
|
371
|
+
),axis=1
|
|
372
|
+
)
|
|
373
|
+
colocalisation_df[("clusters_to_clusters_fraction",coloc_name,"forward")] = colocalisation_df[("clusters_to_clusters_count",coloc_name,"forward")].astype(float) / colocalisation_df[('foci_number',acquisition_name_id1,acquisition_id1)].astype(float)
|
|
374
|
+
|
|
375
|
+
colocalisation_df[("clusters_to_clusters_count",coloc_name,"backward")] = colocalisation_df.apply(
|
|
376
|
+
lambda x: spots_colocalisation(
|
|
377
|
+
spot_list1= x[('cluster_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
|
|
378
|
+
spot_list2= x[('cluster_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
|
|
379
|
+
distance=colocalisation_distance + cluster_radius1 + cluster_radius2,
|
|
380
|
+
voxel_size=voxel_size
|
|
381
|
+
),axis=1
|
|
382
|
+
)
|
|
383
|
+
colocalisation_df[("clusters_to_clusters_fraction",coloc_name,"backward")] = colocalisation_df[("clusters_to_clusters_count",coloc_name,"backward")].astype(float) / colocalisation_df[('foci_number',acquisition_name_id2,acquisition_id2)].astype(float)
|
|
384
|
+
|
|
385
|
+
colocalisation_df = colocalisation_df.sort_index(axis=0).sort_index(axis=1, level=0)
|
|
386
|
+
|
|
387
|
+
if 'cluster_coords' in cell_dataframe.columns : colocalisation_df = colocalisation_df.drop('cluster_coords', axis=1)
|
|
388
|
+
colocalisation_df = colocalisation_df.drop('rna_coords', axis=1)
|
|
389
|
+
|
|
390
|
+
return colocalisation_df
|
|
391
|
+
|
|
392
|
+
@add_default_loading
|
|
393
|
+
def launch_colocalisation(result_tables, result_dataframe, cell_result_dataframe, colocalisation_distance, global_coloc_df, cell_coloc_df: pd.DataFrame) :
|
|
394
|
+
|
|
395
|
+
acquisition1 = result_dataframe.iloc[result_tables[0]]
|
|
396
|
+
acquisition2 = result_dataframe.iloc[result_tables[1]]
|
|
397
|
+
|
|
398
|
+
acquisition_id1, acquisition_id2 = (acquisition1.at['acquisition_id'], acquisition2.at['acquisition_id'])
|
|
399
|
+
|
|
400
|
+
if acquisition_id1 in cell_result_dataframe['acquisition_id'] and acquisition_id2 in cell_result_dataframe['acquisition_id'] :
|
|
401
|
+
print("Launching cell to cell colocalisation.")
|
|
402
|
+
new_coloc = _cell_coloc(
|
|
403
|
+
acquisition_id1 = acquisition_id1,
|
|
404
|
+
acquisition_id2 = acquisition_id2,
|
|
405
|
+
result_dataframe = result_dataframe,
|
|
406
|
+
cell_dataframe=cell_result_dataframe,
|
|
407
|
+
colocalisation_distance=colocalisation_distance
|
|
408
|
+
)
|
|
409
|
+
cell_coloc_df = pd.concat([
|
|
410
|
+
cell_coloc_df,
|
|
411
|
+
new_coloc,
|
|
412
|
+
], axis=1).sort_index(axis=1, level=0)
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
else :
|
|
416
|
+
print("Launching global colocalisation.")
|
|
417
|
+
new_coloc = _global_coloc(
|
|
418
|
+
acquisition_id1=acquisition_id1,
|
|
419
|
+
acquisition_id2=acquisition_id2,
|
|
420
|
+
result_dataframe=result_dataframe,
|
|
421
|
+
colocalisation_distance=colocalisation_distance,
|
|
422
|
+
)
|
|
423
|
+
global_coloc_df = pd.concat([
|
|
424
|
+
global_coloc_df,
|
|
425
|
+
new_coloc,
|
|
426
|
+
], axis=0).reset_index(drop=True)
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
return global_coloc_df, cell_coloc_df
|
|
@@ -7,7 +7,7 @@ from skimage.measure import label
|
|
|
7
7
|
from ..gui.layout import _segmentation_layout
|
|
8
8
|
from ..gui import prompt, prompt_with_help, ask_cancel_segmentation
|
|
9
9
|
from ..interface import open_image
|
|
10
|
-
from .
|
|
10
|
+
from ..gui.napari import show_segmentation as napari_show_segmentation
|
|
11
11
|
from .utils import from_label_get_centeroidscoords
|
|
12
12
|
from matplotlib.colors import ListedColormap
|
|
13
13
|
|
|
@@ -149,7 +149,7 @@ def launch_segmentation(image: np.ndarray, user_parameters: dict) :
|
|
|
149
149
|
relaunch=True
|
|
150
150
|
values['other_nucleus_image'] = user_parameters.setdefault('other_nucleus_image', None)
|
|
151
151
|
|
|
152
|
-
elif nucleus_image.shape != image[cytoplasm_channel] :
|
|
152
|
+
elif nucleus_image.shape != image[cytoplasm_channel].shape :
|
|
153
153
|
sg.popup("Nucleus image shape missmatched. Expected same shape as cytoplasm_image \ncytoplasm shape : {0}, nucleus shape : {1}".format(image[cytoplasm_channel].shape, nucleus_image.shape))
|
|
154
154
|
nucleus_image = None
|
|
155
155
|
relaunch=True
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
This submodule groups all the possible actions of the user in the main windows. It is the start of each action the user can do.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from ..gui.prompts import output_image_prompt, ask_detection_confirmation, ask_cancel_detection
|
|
5
|
+
from ..gui.prompts import output_image_prompt, ask_detection_confirmation, ask_cancel_detection, rename_prompt
|
|
6
6
|
from ..interface.output import write_results
|
|
7
7
|
from ._preprocess import map_channels, prepare_image_detection, reorder_shape, reorder_image_stack
|
|
8
8
|
from .detection import ask_input_parameters, initiate_detection, launch_detection, launch_features_computation, get_nucleus_signal
|
|
@@ -118,7 +118,7 @@ def add_detection(user_parameters, segmentation_done, acquisition_id, cytoplasm_
|
|
|
118
118
|
)
|
|
119
119
|
return new_results_df, new_cell_results_df, acquisition_id, user_parameters, segmentation_done, cytoplasm_label, nucleus_label
|
|
120
120
|
|
|
121
|
-
def save_results(result_df, cell_result_df,
|
|
121
|
+
def save_results(result_df, cell_result_df, global_coloc_df, cell_coloc_df) :
|
|
122
122
|
if len(result_df) != 0 :
|
|
123
123
|
dic = output_image_prompt(filename=result_df.iloc[0].at['filename'])
|
|
124
124
|
|
|
@@ -128,34 +128,46 @@ def save_results(result_df, cell_result_df, coloc_df) :
|
|
|
128
128
|
do_excel = dic['Excel']
|
|
129
129
|
do_feather = dic['Feather']
|
|
130
130
|
do_csv = dic['csv']
|
|
131
|
+
|
|
132
|
+
if 'rna_coords' in cell_result_df.columns : cell_result_df = cell_result_df.drop(columns='rna_coords')
|
|
133
|
+
|
|
131
134
|
sucess1 = write_results(result_df, path= path, filename=filename, do_excel= do_excel, do_feather= do_feather, do_csv=do_csv)
|
|
132
135
|
sucess2 = write_results(cell_result_df, path= path, filename=filename + '_cell_result', do_excel= do_excel, do_feather= do_feather, do_csv=do_csv)
|
|
133
|
-
sucess3 = write_results(
|
|
134
|
-
|
|
136
|
+
sucess3 = write_results(global_coloc_df, path= path, filename=filename + 'global_coloc_result', do_excel= do_excel, do_feather= do_feather, do_csv=do_csv)
|
|
137
|
+
sucess4 = write_results(cell_coloc_df, path= path, filename=filename + 'cell2cell_coloc_result', do_excel= do_excel, do_feather= do_feather, do_csv=do_csv, reset_index=False)
|
|
138
|
+
if all([sucess1,sucess2, sucess3, sucess4,]) : sg.popup("Sucessfully saved at {0}.".format(path))
|
|
135
139
|
|
|
136
140
|
else :
|
|
137
141
|
dic = None
|
|
138
142
|
sg.popup('No results to save.')
|
|
139
143
|
|
|
140
|
-
def compute_colocalisation(result_tables, result_dataframe) :
|
|
144
|
+
def compute_colocalisation(result_tables, result_dataframe, cell_result_dataframe, global_coloc_df, cell_coloc_df) :
|
|
141
145
|
colocalisation_distance = initiate_colocalisation(result_tables)
|
|
142
146
|
|
|
143
147
|
if colocalisation_distance == False :
|
|
144
|
-
|
|
148
|
+
pass
|
|
145
149
|
else :
|
|
146
|
-
|
|
150
|
+
global_coloc_df, cell_coloc_df = launch_colocalisation(
|
|
151
|
+
result_tables,
|
|
152
|
+
result_dataframe=result_dataframe,
|
|
153
|
+
cell_result_dataframe=cell_result_dataframe,
|
|
154
|
+
colocalisation_distance=colocalisation_distance,
|
|
155
|
+
global_coloc_df=global_coloc_df,
|
|
156
|
+
cell_coloc_df=cell_coloc_df,
|
|
157
|
+
)
|
|
147
158
|
|
|
148
|
-
return
|
|
159
|
+
return global_coloc_df, cell_coloc_df
|
|
149
160
|
|
|
150
161
|
def delete_acquisitions(selected_acquisitions : pd.DataFrame,
|
|
151
162
|
result_df : pd.DataFrame,
|
|
152
163
|
cell_result_df : pd.DataFrame,
|
|
153
|
-
|
|
164
|
+
global_coloc_df : pd.DataFrame,
|
|
165
|
+
cell_coloc_df : pd.DataFrame,
|
|
154
166
|
) :
|
|
155
167
|
|
|
156
168
|
if len(result_df) == 0 :
|
|
157
169
|
sg.popup("No acquisition to delete.")
|
|
158
|
-
return result_df, cell_result_df,
|
|
170
|
+
return result_df, cell_result_df, global_coloc_df
|
|
159
171
|
|
|
160
172
|
if len(selected_acquisitions) == 0 :
|
|
161
173
|
sg.popup("Please select the acquisitions you would like to delete.")
|
|
@@ -169,11 +181,62 @@ def delete_acquisitions(selected_acquisitions : pd.DataFrame,
|
|
|
169
181
|
print("{0} cells deleted.".format(len(cell_result_df_drop_idx)))
|
|
170
182
|
cell_result_df = cell_result_df.drop(cell_result_df_drop_idx, axis=0)
|
|
171
183
|
|
|
172
|
-
if len(
|
|
173
|
-
coloc_df_drop_idx =
|
|
184
|
+
if len(global_coloc_df) > 0 :
|
|
185
|
+
coloc_df_drop_idx = global_coloc_df[(global_coloc_df["acquisition_id_1"].isin(acquisition_ids)) | (global_coloc_df['acquisition_id_2'].isin(acquisition_ids))].index
|
|
174
186
|
print("{0} coloc measurement deleted.".format(len(coloc_df_drop_idx)))
|
|
175
|
-
|
|
187
|
+
global_coloc_df = global_coloc_df.drop(coloc_df_drop_idx, axis=0)
|
|
188
|
+
|
|
189
|
+
if len(cell_coloc_df) > 0 :
|
|
190
|
+
for acquisition_id in acquisition_ids :
|
|
191
|
+
cell_coloc_df = cell_coloc_df.drop(acquisition_id, axis=1, level=2) #Delete spot number and foci number
|
|
192
|
+
coloc_columns = cell_coloc_df.columns.get_level_values(1)
|
|
193
|
+
coloc_columns = coloc_columns[coloc_columns.str.contains(str(acquisition_id))]
|
|
194
|
+
cell_coloc_df = cell_coloc_df.drop(labels=coloc_columns, axis=1, level=1)
|
|
176
195
|
|
|
177
196
|
result_df = result_df.drop(result_drop_idx, axis=0)
|
|
178
197
|
|
|
179
|
-
return result_df, cell_result_df,
|
|
198
|
+
return result_df, cell_result_df, global_coloc_df, cell_coloc_df
|
|
199
|
+
|
|
200
|
+
def rename_acquisitions(
|
|
201
|
+
selected_acquisitions : pd.DataFrame,
|
|
202
|
+
result_df : pd.DataFrame,
|
|
203
|
+
cell_result_df : pd.DataFrame,
|
|
204
|
+
global_coloc_df : pd.DataFrame,
|
|
205
|
+
cell_coloc_df : pd.DataFrame,
|
|
206
|
+
) :
|
|
207
|
+
|
|
208
|
+
if len(result_df) == 0 :
|
|
209
|
+
sg.popup("No acquisition to rename.")
|
|
210
|
+
return result_df, cell_result_df, global_coloc_df
|
|
211
|
+
|
|
212
|
+
if len(selected_acquisitions) == 0 :
|
|
213
|
+
sg.popup("Please select the acquisitions you would like to rename.")
|
|
214
|
+
|
|
215
|
+
else :
|
|
216
|
+
name = rename_prompt()
|
|
217
|
+
if not name : return result_df, cell_result_df, global_coloc_df #User didn't put a name or canceled
|
|
218
|
+
name : str = name.replace(' ','_')
|
|
219
|
+
acquisition_ids = list(result_df.iloc[list(selected_acquisitions)]['acquisition_id'])
|
|
220
|
+
old_names = list(result_df.loc[result_df['acquisition_id'].isin(acquisition_ids)]['name'])
|
|
221
|
+
old_names.sort(key=len) #We order this list by elmt length
|
|
222
|
+
old_names.reverse() #From longer to smaller
|
|
223
|
+
|
|
224
|
+
result_df.loc[result_df['acquisition_id'].isin(acquisition_ids),['name']] = name
|
|
225
|
+
if len(cell_result_df) > 0 : cell_result_df.loc[cell_result_df['acquisition_id'].isin(acquisition_ids),['name']] = name
|
|
226
|
+
if len(global_coloc_df) > 0 :
|
|
227
|
+
global_coloc_df.loc[global_coloc_df['acquisition_id_1'].isin(acquisition_ids), ['name1']] = name
|
|
228
|
+
global_coloc_df.loc[global_coloc_df['acquisition_id_2'].isin(acquisition_ids), ['name2']] = name
|
|
229
|
+
if len(cell_coloc_df) > 0 :
|
|
230
|
+
target_columns = cell_coloc_df.columns.get_level_values(1)
|
|
231
|
+
for old_name in old_names : #Note list was ordered by elmt len (decs) to avoid conflict when one name is contained by another one. if the shorter is processed first then the longer will not be able to be properly renamed.
|
|
232
|
+
target_columns = target_columns.str.replace(old_name, name)
|
|
233
|
+
|
|
234
|
+
new_columns = zip(
|
|
235
|
+
cell_coloc_df.columns.get_level_values(0),
|
|
236
|
+
target_columns,
|
|
237
|
+
cell_coloc_df.columns.get_level_values(2),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
cell_coloc_df.columns = pd.MultiIndex.from_tuples(new_columns)
|
|
241
|
+
|
|
242
|
+
return result_df, cell_result_df, global_coloc_df, cell_coloc_df
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Contains code to handle detection as well as bigfish wrappers related to spot detection.
|
|
3
3
|
"""
|
|
4
|
+
|
|
4
5
|
from ._preprocess import ParameterInputError
|
|
5
6
|
from ._preprocess import check_integrity, convert_parameters_types
|
|
6
7
|
from ._signaltonoise import compute_snr_spots
|
|
7
|
-
from .
|
|
8
|
+
from ..gui.napari import correct_spots, _update_clusters, threshold_selection
|
|
8
9
|
from ..gui import add_default_loading
|
|
9
10
|
from ..gui import detection_parameters_promt, input_image_prompt
|
|
10
11
|
from ..utils import compute_anisotropy_coef
|
|
@@ -486,6 +487,7 @@ def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signa
|
|
|
486
487
|
features_names += ['nucleus_mean_signal', 'nucleus_median_signal', 'nucleus_max_signal', 'nucleus_min_signal']
|
|
487
488
|
features_names += ['snr_mean', 'snr_median', 'snr_std']
|
|
488
489
|
features_names += ['cell_center_coord','foci_number','foci_in_nuc_number']
|
|
490
|
+
features_names += ['rna_coords','cluster_coords']
|
|
489
491
|
|
|
490
492
|
result_frame = pd.DataFrame()
|
|
491
493
|
|
|
@@ -565,6 +567,7 @@ def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signa
|
|
|
565
567
|
features += [cell_center, foci_number, foci_in_nuc_number]
|
|
566
568
|
|
|
567
569
|
features = [acquisition_id, cell_id, cell_bbox] + features
|
|
570
|
+
features += [rna_coords, foci_coords]
|
|
568
571
|
|
|
569
572
|
result_frame = pd.concat([
|
|
570
573
|
result_frame,
|
|
@@ -575,8 +578,6 @@ def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signa
|
|
|
575
578
|
|
|
576
579
|
return result_frame
|
|
577
580
|
|
|
578
|
-
|
|
579
|
-
|
|
580
581
|
@add_default_loading
|
|
581
582
|
def launch_clustering(spots, user_parameters):
|
|
582
583
|
|
|
@@ -633,7 +634,7 @@ def launch_detection(
|
|
|
633
634
|
|
|
634
635
|
if do_clustering :
|
|
635
636
|
clusters = launch_clustering(spots, user_parameters, hide_loading = hide_loading) #012 are coordinates #3 is number of spots per cluster, #4 is cluster index
|
|
636
|
-
clusters = _update_clusters(clusters, spots, voxel_size=user_parameters['voxel_size'], cluster_size=user_parameters['cluster size'],
|
|
637
|
+
clusters = _update_clusters(clusters, spots, voxel_size=user_parameters['voxel_size'], cluster_size=user_parameters['cluster size'], shape=image.shape)
|
|
637
638
|
|
|
638
639
|
else : clusters = None
|
|
639
640
|
|
|
@@ -694,6 +695,16 @@ def launch_features_computation(acquisition_id, image, nucleus_signal, spots, cl
|
|
|
694
695
|
frame_results['threshold'] = user_parameters['threshold']
|
|
695
696
|
|
|
696
697
|
frame_results = pd.DataFrame(columns= frame_results.keys(), data= (frame_results.values(),))
|
|
698
|
+
|
|
699
|
+
#Adding name column
|
|
700
|
+
result_col = list(frame_results.columns)
|
|
701
|
+
cell_result_col = list(cell_result_dframe.columns)
|
|
702
|
+
name = "acquisition_{0}".format(acquisition_id)
|
|
703
|
+
frame_results['name'] = name
|
|
704
|
+
cell_result_dframe['name'] = name
|
|
705
|
+
frame_results = frame_results.loc[:,['name'] + result_col]
|
|
706
|
+
cell_result_dframe = cell_result_dframe.loc[:,['name'] + cell_result_col]
|
|
707
|
+
cell_result_dframe['total_rna_number'] = cell_result_dframe['nb_rna_in_nuc'] + cell_result_dframe['nb_rna_out_nuc']
|
|
697
708
|
|
|
698
709
|
return frame_results, cell_result_dframe
|
|
699
710
|
|
small_fish_gui/pipeline/main.py
CHANGED
|
@@ -5,7 +5,7 @@ This script is called when software starts; it is the main loop.
|
|
|
5
5
|
import pandas as pd
|
|
6
6
|
import PySimpleGUI as sg
|
|
7
7
|
from ..gui import hub_prompt
|
|
8
|
-
from .actions import add_detection, save_results, compute_colocalisation, delete_acquisitions
|
|
8
|
+
from .actions import add_detection, save_results, compute_colocalisation, delete_acquisitions, rename_acquisitions
|
|
9
9
|
from ._preprocess import clean_unused_parameters_cache
|
|
10
10
|
from ..batch import batch_promp
|
|
11
11
|
|
|
@@ -14,15 +14,31 @@ user_parameters = dict() # Very important instance containg all choice from user
|
|
|
14
14
|
acquisition_id = -1
|
|
15
15
|
result_df = pd.DataFrame()
|
|
16
16
|
cell_result_df = pd.DataFrame()
|
|
17
|
-
|
|
17
|
+
global_coloc_df = pd.DataFrame()
|
|
18
|
+
cell_coloc_df = pd.DataFrame()
|
|
18
19
|
segmentation_done = False
|
|
19
20
|
cytoplasm_label = None
|
|
20
21
|
nucleus_label = None
|
|
21
22
|
|
|
23
|
+
#Use for dev purpose
|
|
24
|
+
MAKE_NEW_SAVE = False
|
|
25
|
+
PATH = "/home/floricslimani/Documents/small_fish_workshop/save"
|
|
26
|
+
LOAD_SAVE = False
|
|
27
|
+
|
|
22
28
|
while True : #Break this loop to close small_fish
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
29
|
+
|
|
30
|
+
if LOAD_SAVE :
|
|
31
|
+
result_df = pd.read_csv(PATH + "/result.csv", sep='|')
|
|
32
|
+
cell_result_df = pd.read_csv(PATH + "/cell_result_df.csv", sep='|')
|
|
33
|
+
global_coloc_df = pd.read_csv(PATH + "/global_coloc_df.csv", sep='|')
|
|
34
|
+
cell_coloc_df = pd.read_csv(PATH + "/cell_coloc_df.csv", sep='|')
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
else :
|
|
38
|
+
result_df = result_df.reset_index(drop=True)
|
|
39
|
+
cell_result_df = cell_result_df.reset_index(drop=True)
|
|
40
|
+
global_coloc_df = global_coloc_df.reset_index(drop=True)
|
|
41
|
+
cell_coloc_df = cell_coloc_df.reset_index(drop=True)
|
|
26
42
|
try :
|
|
27
43
|
event, values = hub_prompt(result_df, segmentation_done)
|
|
28
44
|
|
|
@@ -43,25 +59,26 @@ while True : #Break this loop to close small_fish
|
|
|
43
59
|
save_results(
|
|
44
60
|
result_df=result_df,
|
|
45
61
|
cell_result_df=cell_result_df,
|
|
46
|
-
|
|
62
|
+
global_coloc_df=global_coloc_df,
|
|
63
|
+
cell_coloc_df = cell_coloc_df,
|
|
47
64
|
)
|
|
48
65
|
|
|
49
66
|
elif event == 'Compute colocalisation' :
|
|
50
67
|
result_tables = values.setdefault('result_table', []) #Contains the lines selected by the user on the sum-up array.
|
|
51
68
|
|
|
52
|
-
|
|
69
|
+
global_coloc_df, cell_coloc_df = compute_colocalisation(
|
|
53
70
|
result_tables,
|
|
54
|
-
result_dataframe=result_df
|
|
71
|
+
result_dataframe=result_df,
|
|
72
|
+
cell_result_dataframe=cell_result_df,
|
|
73
|
+
global_coloc_df=global_coloc_df,
|
|
74
|
+
cell_coloc_df=cell_coloc_df,
|
|
55
75
|
)
|
|
56
76
|
|
|
57
|
-
coloc_df = pd.concat(
|
|
58
|
-
[coloc_df,res_coloc],
|
|
59
|
-
axis= 0)
|
|
60
|
-
|
|
61
77
|
elif event == "Reset results" :
|
|
62
78
|
result_df = pd.DataFrame()
|
|
63
79
|
cell_result_df = pd.DataFrame()
|
|
64
|
-
|
|
80
|
+
global_coloc_df = pd.DataFrame()
|
|
81
|
+
cell_coloc_df = pd.DataFrame()
|
|
65
82
|
acquisition_id = -1
|
|
66
83
|
segmentation_done = False
|
|
67
84
|
cytoplasm_label = None
|
|
@@ -74,7 +91,7 @@ while True : #Break this loop to close small_fish
|
|
|
74
91
|
|
|
75
92
|
elif event == "Delete acquisitions" :
|
|
76
93
|
selected_acquisitions = values.setdefault('result_table', []) #Contains the lines selected by the user on the sum-up array.
|
|
77
|
-
result_df, cell_result_df,
|
|
94
|
+
result_df, cell_result_df, global_coloc_df, cell_coloc_df = delete_acquisitions(selected_acquisitions, result_df, cell_result_df, global_coloc_df, cell_coloc_df)
|
|
78
95
|
|
|
79
96
|
elif event == "Batch detection" :
|
|
80
97
|
result_df, cell_result_df, acquisition_id, user_parameters, segmentation_done, cytoplasm_label,nucleus_label = batch_promp(
|
|
@@ -84,8 +101,18 @@ while True : #Break this loop to close small_fish
|
|
|
84
101
|
preset=user_parameters,
|
|
85
102
|
)
|
|
86
103
|
|
|
104
|
+
elif event == "Rename acquisition" :
|
|
105
|
+
selected_acquisitions = values.setdefault('result_table', []) #Contains the lines selected by the user on the sum-up array.
|
|
106
|
+
result_df, cell_result_df, global_coloc_df, cell_coloc_df = rename_acquisitions(selected_acquisitions, result_df, cell_result_df, global_coloc_df, cell_coloc_df)
|
|
107
|
+
|
|
87
108
|
else :
|
|
88
109
|
break
|
|
110
|
+
|
|
111
|
+
if MAKE_NEW_SAVE :
|
|
112
|
+
result_df.reset_index(drop=True).to_csv(PATH + "/result.csv", sep='|')
|
|
113
|
+
cell_result_df.reset_index(drop=True).to_csv(PATH + "/cell_result_df.csv", sep='|')
|
|
114
|
+
cell_coloc_df.reset_index(drop=True).to_csv(PATH + "/cell_coloc_df.csv", sep='|')
|
|
115
|
+
global_coloc_df.reset_index(drop=True).to_csv(PATH + "/global_coloc_df.csv", sep='|')
|
|
89
116
|
|
|
90
117
|
except Exception as error :
|
|
91
118
|
sg.popup(str(error))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: small_fish_gui
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.7.0
|
|
4
4
|
Summary: Small Fish is a python application for the analysis of smFish images. It provides a ready to use graphical interface to combine famous python packages for cell analysis without any need for coding.
|
|
5
5
|
Project-URL: Homepage, https://github.com/2Echoes/small_fish
|
|
6
6
|
Project-URL: Issues, https://github.com/2Echoes/small_fish/issues
|
|
@@ -2,8 +2,8 @@ small_fish_gui/.readthedocs.yaml,sha256=r2T0e_In8X8l0_ZwgPvuoWQ9c0PE9bSpFzV2W6Ez
|
|
|
2
2
|
small_fish_gui/LICENSE,sha256=-iFy8VGBYs5VsHglKpk4D-hxqQ2jMJaqmfq_ulIzDks,1303
|
|
3
3
|
small_fish_gui/README.md,sha256=4RpEXKZW5vH6sUWeZb88yr1TLLPi20PqOk7KdA9O9Hk,4234
|
|
4
4
|
small_fish_gui/Segmentation example.jpg,sha256=opfiSbjmfF6z8kBs08sg_FNR2Om0AcMPU5sSwSLHdoQ,215038
|
|
5
|
-
small_fish_gui/__init__.py,sha256=
|
|
6
|
-
small_fish_gui/__main__.py,sha256=
|
|
5
|
+
small_fish_gui/__init__.py,sha256=LrR2v63vron8S_9kBcwoaenFnNYVYCOplKKCph3b-ks,1941
|
|
6
|
+
small_fish_gui/__main__.py,sha256=jjFNnf-l4jCJI16epq2KOaKmgtUAe9lSNdPj5fpxrDk,1143
|
|
7
7
|
small_fish_gui/napari_detection_example.png,sha256=l5EZlrbXemLiGqb5inSVsD6Kko1Opz528-go-fBfrw8,977350
|
|
8
8
|
small_fish_gui/requirements.txt,sha256=9OMfUAnLdHevq6w_fVoDmVmkSMJeFofkOK_86_fu9C0,321
|
|
9
9
|
small_fish_gui/utils.py,sha256=LM6QW2ono_LIRv7JXIIq7ZxxbDXqBtZ5uR9gjKJfwM8,1903
|
|
@@ -21,32 +21,33 @@ small_fish_gui/batch/values.py,sha256=C1hRlCpTIDsg89DMKIIW5NUxeK876ODRUuJ2D-mJv6
|
|
|
21
21
|
small_fish_gui/batch/values.txt,sha256=PVxzIaaF6DGFRx_CMaStXZI6OrbjNub1-jR3pklXVjc,991
|
|
22
22
|
small_fish_gui/docs/conf.py,sha256=6YU8UEpTenKGMiz7H4aG42Of72_n4uLadDfHJvziqRk,16
|
|
23
23
|
small_fish_gui/gui/__init__.py,sha256=xQ_BfYcnQmKZtx_0leO4OmbkLNLv49ZPqEu_UXMgmDc,867
|
|
24
|
+
small_fish_gui/gui/_napari_widgets.py,sha256=8IMppaPZU37ANdZwTZOhwqCEly0hokzYL7UIVIixGns,3022
|
|
24
25
|
small_fish_gui/gui/animation.py,sha256=rnNP5FPp06Hu-R33c4AVTCknALBbxT2YlsKFCXHAp9k,981
|
|
25
26
|
small_fish_gui/gui/general_help_screenshot.png,sha256=X4E6Td5f04K-pBUPDaBJRAE3D5b8fuEdiAUKhkIDr-0,54210
|
|
26
27
|
small_fish_gui/gui/help_module.py,sha256=PmgkkDs7bZ2-po83A_PK9uldQcHjehYmqre21nYb6DQ,9600
|
|
27
28
|
small_fish_gui/gui/layout.py,sha256=oB8Kg6s0rCA8yB4WM8JQY8BpjoPiBqTGb6YoOKDqEA8,13855
|
|
28
29
|
small_fish_gui/gui/mapping_help_screenshot.png,sha256=HcuRh5TYciUogUasza5vZ_QSshaiHsskQK23mh9vQS8,34735
|
|
29
|
-
small_fish_gui/gui/
|
|
30
|
+
small_fish_gui/gui/napari.py,sha256=XiahTyq7QEQAuF6EK3-e--3-A8yBPVn0oaVZZyJo0qo,8607
|
|
31
|
+
small_fish_gui/gui/prompts.py,sha256=CONXMmSa0a-l93fyXAPz7h1skql0BEZtLzWJMVepPQ0,13660
|
|
30
32
|
small_fish_gui/gui/segmentation_help_screenshot.png,sha256=rbSgIydT0gZtfMh1qk4mdMbEIyCaakvHmxa2eOrLwO0,118944
|
|
31
33
|
small_fish_gui/interface/__init__.py,sha256=PB86R4Y9kV80aGZ-vP0ZW2KeaCwGbBbCtFCmbN2yl28,275
|
|
32
34
|
small_fish_gui/interface/image.py,sha256=X1L7S5svxUwdoDcI3QM1PbN-c4Nz5w30hixq3IgqSn8,1130
|
|
33
|
-
small_fish_gui/interface/output.py,sha256=
|
|
35
|
+
small_fish_gui/interface/output.py,sha256=6dJuTZwFM1nZzAT8evif0tCnJZIJmtLyodQzSoA7Q_w,2060
|
|
34
36
|
small_fish_gui/interface/parameters.py,sha256=lUugD-4W2TZyJF3TH1q70TlktEYhhPtcPCrvxm5Dk50,36
|
|
35
37
|
small_fish_gui/interface/testing.py,sha256=MY5-GcPOUHagcrwR8A7QOjAmjZIDVC8Wz3NibLe3KQw,321
|
|
36
38
|
small_fish_gui/pipeline/__init__.py,sha256=_Ey20GG8fJtqZvixbXNNYX6wTWMnCUArmARPqsNEhuQ,743
|
|
37
|
-
small_fish_gui/pipeline/_colocalisation.py,sha256=
|
|
39
|
+
small_fish_gui/pipeline/_colocalisation.py,sha256=vVHDOvAfqaRFUuX-8HBtDLVrXgoSeUOxa19hmm7lllo,18978
|
|
38
40
|
small_fish_gui/pipeline/_custom_errors.py,sha256=tQ-AUhgzIFpK30AZiQQrtHCHyGVRDdAoIjzL0Fk-1pA,43
|
|
39
|
-
small_fish_gui/pipeline/_napari_wrapper.py,sha256=42c_PvKF8D_NW_CWysS4nZ2_Qp5vP9voaAH0bFJjJpc,8099
|
|
40
41
|
small_fish_gui/pipeline/_preprocess.py,sha256=ddocTXwc0vYq2VGUbWYaN9eUiHPyfiCuBpYQ2p6rQ8g,13084
|
|
41
|
-
small_fish_gui/pipeline/_segmentation.py,sha256=
|
|
42
|
+
small_fish_gui/pipeline/_segmentation.py,sha256=bB7U_EhebFAssyZcGimnz706aNLbajVMOUj6nbVflwA,18854
|
|
42
43
|
small_fish_gui/pipeline/_signaltonoise.py,sha256=7A9t7xu7zghI6cr201Ldm-LjJ5NOuP56VSeJ8KIzcUo,8497
|
|
43
|
-
small_fish_gui/pipeline/actions.py,sha256=
|
|
44
|
-
small_fish_gui/pipeline/detection.py,sha256=
|
|
45
|
-
small_fish_gui/pipeline/main.py,sha256=
|
|
44
|
+
small_fish_gui/pipeline/actions.py,sha256=JqcEYtVf3rr-YB_C8SF9U0dpoBktjUhm_Ko0FxZbxy4,11636
|
|
45
|
+
small_fish_gui/pipeline/detection.py,sha256=ORs3OR7MYIz4l1GX3Ayjzpxp2poRnTHhoicJdF7XL_E,34976
|
|
46
|
+
small_fish_gui/pipeline/main.py,sha256=0DrN9dXZJTqLOD0tZaHTVFE1oolzLPU1w5LNgWC3iuU,5072
|
|
46
47
|
small_fish_gui/pipeline/spots.py,sha256=yHvqf1eD25UltELpzcouYXhLkxiXI_mOL1ANSzXK5pw,1907
|
|
47
48
|
small_fish_gui/pipeline/test.py,sha256=w4ZMGDmUDXxVgWTlZ2TKw19W8q5gcE9gLMKe0SWnRrw,2827
|
|
48
49
|
small_fish_gui/pipeline/utils.py,sha256=run6qtqCAe_mFnE3o1CnmF1xBBmK3ydgc8-jOV9P-_w,448
|
|
49
|
-
small_fish_gui-1.
|
|
50
|
-
small_fish_gui-1.
|
|
51
|
-
small_fish_gui-1.
|
|
52
|
-
small_fish_gui-1.
|
|
50
|
+
small_fish_gui-1.7.0.dist-info/METADATA,sha256=b8ALR98CI1iIaTsDQKGEQ-9Yu3hOAEekdYmOFUDzCQE,2567
|
|
51
|
+
small_fish_gui-1.7.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
|
52
|
+
small_fish_gui-1.7.0.dist-info/licenses/LICENSE,sha256=-iFy8VGBYs5VsHglKpk4D-hxqQ2jMJaqmfq_ulIzDks,1303
|
|
53
|
+
small_fish_gui-1.7.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|