small-fish-gui 1.7.0__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. small_fish_gui/__init__.py +1 -1
  2. small_fish_gui/batch/integrity.py +3 -3
  3. small_fish_gui/batch/pipeline.py +11 -6
  4. small_fish_gui/batch/prompt.py +4 -4
  5. small_fish_gui/batch/update.py +2 -2
  6. small_fish_gui/batch/utils.py +8 -8
  7. small_fish_gui/batch/values.txt +2 -2
  8. small_fish_gui/gui/help_module.py +4 -4
  9. small_fish_gui/gui/layout.py +20 -12
  10. small_fish_gui/gui/prompts.py +99 -51
  11. small_fish_gui/hints.py +54 -0
  12. small_fish_gui/interface/__init__.py +1 -1
  13. small_fish_gui/interface/image.py +2 -2
  14. small_fish_gui/interface/{output.py → inoutput.py} +41 -2
  15. small_fish_gui/interface/testing.py +10 -6
  16. small_fish_gui/pipeline/__init__.py +4 -4
  17. small_fish_gui/pipeline/_colocalisation.py +19 -14
  18. small_fish_gui/pipeline/_preprocess.py +98 -50
  19. small_fish_gui/pipeline/actions.py +142 -38
  20. small_fish_gui/pipeline/detection.py +54 -106
  21. small_fish_gui/pipeline/main.py +28 -35
  22. small_fish_gui/pipeline/{_segmentation.py → segmentation.py} +32 -14
  23. small_fish_gui/pipeline/spots.py +4 -1
  24. {small_fish_gui-1.7.0.dist-info → small_fish_gui-1.8.0.dist-info}/METADATA +1 -2
  25. small_fish_gui-1.8.0.dist-info/RECORD +50 -0
  26. {small_fish_gui-1.7.0.dist-info → small_fish_gui-1.8.0.dist-info}/WHEEL +1 -1
  27. small_fish_gui/batch/output.py +0 -0
  28. small_fish_gui/batch/values.py +0 -3
  29. small_fish_gui/docs/conf.py +0 -1
  30. small_fish_gui/interface/parameters.py +0 -2
  31. small_fish_gui-1.7.0.dist-info/RECORD +0 -53
  32. /small_fish_gui/gui/{general_help_screenshot.png → screenshot/general_help_screenshot.png} +0 -0
  33. /small_fish_gui/gui/{mapping_help_screenshot.png → screenshot/mapping_help_screenshot.png} +0 -0
  34. /small_fish_gui/gui/{segmentation_help_screenshot.png → screenshot/segmentation_help_screenshot.png} +0 -0
  35. {small_fish_gui-1.7.0.dist-info → small_fish_gui-1.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -7,4 +7,4 @@ from .image import get_filename
7
7
  from .image import check_format
8
8
  from .image import FormatError
9
9
 
10
- from .output import write_results
10
+ from .inoutput import write_results
@@ -19,10 +19,10 @@ def open_image(full_path:str) :
19
19
  return im
20
20
 
21
21
 
22
- def check_format(image, is_3D, is_time_stack, is_multichannel) :
22
+ def check_format(image, is_3D, is_multichannel) :
23
23
  shape = list(image.shape)
24
24
  dim = image.ndim - (shape[image.ndim - 1] == 1)
25
- if not dim == (2 + is_3D + is_time_stack + is_multichannel) :
25
+ if not dim == (2 + is_3D + is_multichannel) :
26
26
  raise FormatError("Inconsistency in image format and parameters.")
27
27
 
28
28
 
@@ -1,6 +1,8 @@
1
1
  import os
2
2
  import pandas as pd
3
+ import numpy as np
3
4
  from bigfish.stack import check_parameter
5
+ from typing import Literal
4
6
 
5
7
  MAX_LEN_EXCEL = 1048576
6
8
 
@@ -33,13 +35,13 @@ def write_results(dataframe: pd.DataFrame, path:str, filename:str, do_excel= Tru
33
35
  i= 1
34
36
 
35
37
  if not overwrite :
36
- while new_filename + '.xlsx' in os.listdir(path) or new_filename + '.feather' in os.listdir(path) or new_filename + '.csv' in os.listdir(path) :
38
+ while new_filename + '.xlsx' in os.listdir(path) or new_filename + '.parquet' in os.listdir(path) or new_filename + '.csv' in os.listdir(path) :
37
39
  new_filename = filename + '_{0}'.format(i)
38
40
  i+=1
39
41
 
40
42
  COLUMNS_TO_DROP = ['image', 'spots', 'clusters', 'rna_coords', 'cluster_coords']
41
43
  for col in COLUMNS_TO_DROP :
42
- dataframe = dataframe.drop(columns=col)
44
+ if col in dataframe.columns : dataframe = dataframe.drop(columns=col)
43
45
 
44
46
  if reset_index : dataframe = dataframe.reset_index(drop=True)
45
47
 
@@ -55,3 +57,40 @@ def write_results(dataframe: pd.DataFrame, path:str, filename:str, do_excel= Tru
55
57
  dataframe.to_parquet(path + new_filename + '.parquet')
56
58
 
57
59
  return True
60
+
61
+ def input_segmentation(
62
+ nucleus_path : str,
63
+ cytoplasm_path : str,
64
+ ) :
65
+ nucleus_label = np.load(nucleus_path)
66
+
67
+ if cytoplasm_path != '' :
68
+ cytoplasm_label = np.load(cytoplasm_path)
69
+ else :
70
+ cytoplasm_label = nucleus_label
71
+
72
+ return nucleus_label, cytoplasm_label
73
+
74
+ def output_segmentation(
75
+ path : str,
76
+ extension : Literal['npy', 'npz_uncompressed', 'npz_compressed'],
77
+ nucleus_label : np.ndarray,
78
+ cytoplasm_label : np.ndarray = None,
79
+ ) :
80
+
81
+ saved = False
82
+ if extension == 'npy' :
83
+ save = np.save
84
+ elif extension == 'npz_uncompressed' :
85
+ save = np.savez
86
+ elif extension == 'npz_compressed' :
87
+ save = np.savez_compressed
88
+
89
+ if type(nucleus_label) != type(None) :
90
+ save(path + "_nucleus_segmentation", nucleus_label)
91
+ saved = True
92
+
93
+ if type(cytoplasm_label) != type(None) :
94
+ save(path + "_cytoplasm_segmentation", cytoplasm_label)
95
+
96
+ return saved
@@ -1,8 +1,12 @@
1
- import imageio.v3 as iio
1
+ import PySimpleGUI as sg
2
2
 
3
- path = '/home/flo/Documents/IGH projects/SohaQuantif/SCC/input/230723 n1 b-cat bac APC IF fitc ires neo smfish cy3 without puromycin-01.tif'
3
+ layout = [
4
+ [sg.Radio(['A',], key='button', group_id=0, key='test1'),sg.Radio(['B'], key='button', group_id=0, key='test2'), sg.Radio(['C'], key='button', group_id=0, key='test3')],
5
+ [sg.Radio(['D',], key='button', group_id=1, key='test4')],
6
+ [sg.Button('Ok')]
7
+ ]
4
8
 
5
- props = iio.improps(path)
6
- meta = iio.immeta(path)
7
- print(props)
8
- print(meta['channels'], meta['slices'], meta['unit'], meta['hyperstack'], meta['spacing'])
9
+ window = sg.Window(title= 'test', layout=layout)
10
+ event, values = window.read()
11
+
12
+ print(values)
@@ -7,10 +7,10 @@ from ._preprocess import reorder_image_stack
7
7
  from ._preprocess import prepare_image_detection
8
8
  from ._preprocess import convert_parameters_types
9
9
 
10
- from ._segmentation import launch_segmentation
11
- from ._segmentation import _cast_segmentation_parameters
12
- from ._segmentation import cell_segmentation
13
- from ._segmentation import plot_segmentation
10
+ from .segmentation import launch_segmentation
11
+ from .segmentation import _cast_segmentation_parameters
12
+ from .segmentation import cell_segmentation
13
+ from .segmentation import plot_segmentation
14
14
 
15
15
  from .detection import launch_detection
16
16
  from .detection import launch_features_computation
@@ -191,12 +191,15 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
191
191
  acquisition1 = result_dataframe.loc[result_dataframe['acquisition_id'] == acquisition_id1]
192
192
  acquisition2 = result_dataframe.loc[result_dataframe['acquisition_id'] == acquisition_id2]
193
193
 
194
+ assert len(acquisition1) == 1
195
+ assert len(acquisition2) == 1
196
+
194
197
  acquisition_couple = (acquisition_id1,acquisition_id2)
195
198
 
196
- voxel_size1 = acquisition1.at['voxel_size']
197
- voxel_size2 = acquisition2.at['voxel_size']
198
- shape1 = acquisition1.at['reordered_shape']
199
- shape2 = acquisition2.at['reordered_shape']
199
+ voxel_size1 = acquisition1.iloc[0].at['voxel_size']
200
+ voxel_size2 = acquisition2.iloc[0].at['voxel_size']
201
+ shape1 = acquisition1.iloc[0].at['reordered_shape']
202
+ shape2 = acquisition2.iloc[0].at['reordered_shape']
200
203
 
201
204
  if voxel_size1 != voxel_size2 :
202
205
  raise MissMatchError("voxel size 1 different than voxel size 2")
@@ -209,8 +212,8 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
209
212
  shape = shape1
210
213
 
211
214
 
212
- spots1 = acquisition1['spots']
213
- spots2 = acquisition2['spots']
215
+ spots1 = acquisition1.iloc[0].at['spots']
216
+ spots2 = acquisition2.iloc[0].at['spots']
214
217
 
215
218
  spot1_total = len(spots1)
216
219
  spot2_total = len(spots2)
@@ -225,7 +228,7 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
225
228
 
226
229
  if 'clusters' in acquisition1.index :
227
230
  try :
228
- clusters1 = acquisition1['clusters'][:,:len(voxel_size)]
231
+ clusters1 = acquisition1.iloc[0].at['clusters'][:,:len(voxel_size)]
229
232
  fraction_spots2_coloc_cluster1 = spots_colocalisation(spot_list1=spots2, spot_list2=clusters1, distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
230
233
  except MissMatchError as e :
231
234
  sg.popup(str(e))
@@ -238,7 +241,7 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
238
241
 
239
242
  if 'clusters' in acquisition2.index :
240
243
  try :
241
- clusters2 = acquisition2['clusters'][:,:len(voxel_size)]
244
+ clusters2 = acquisition2.iloc[0].at['clusters'][:,:len(voxel_size)]
242
245
  fraction_spots1_coloc_cluster2 = spots_colocalisation(spot_list1=spots1, spot_list2=clusters2, distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
243
246
  except MissMatchError as e :# clusters not computed
244
247
  sg.popup(str(e))
@@ -267,8 +270,8 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
267
270
 
268
271
  #Add names
269
272
  coloc_df_col = list(coloc_df.columns)
270
- coloc_df['name1'] = acquisition1.at['name']
271
- coloc_df['name2'] = acquisition2.at['name']
273
+ coloc_df['name1'] = acquisition1.iloc[0].at['name']
274
+ coloc_df['name2'] = acquisition2.iloc[0].at['name']
272
275
  coloc_df = coloc_df.loc[:,['name1','name2'] + coloc_df_col]
273
276
 
274
277
  return coloc_df
@@ -333,7 +336,7 @@ def _cell_coloc(
333
336
  )
334
337
  colocalisation_df[("spots_to_spots_fraction",coloc_name,"backward")] = colocalisation_df[("spots_to_spots_count",coloc_name,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
335
338
 
336
- if acquisition2['Cluster computation'].iat[0] :
339
+ if acquisition2['do_cluster_computation'].iat[0] :
337
340
  if len(acquisition2['clusters'].iat[0]) > 0 :
338
341
 
339
342
  #spots to clusters
@@ -347,7 +350,7 @@ def _cell_coloc(
347
350
  )
348
351
  colocalisation_df[("spots_to_clusters_fraction",coloc_name,"forward")] = colocalisation_df[("spots_to_clusters_count",coloc_name,"forward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id1,acquisition_id1)].astype(float)
349
352
 
350
- if acquisition1['Cluster computation'].iat[0] :
353
+ if acquisition1['do_cluster_computation'].iat[0] :
351
354
  if len(acquisition1['clusters'].iat[0]) > 0 :
352
355
  colocalisation_df[("spots_to_clusters_count",coloc_name,"backward")] = colocalisation_df.apply(
353
356
  lambda x: spots_colocalisation(
@@ -359,7 +362,7 @@ def _cell_coloc(
359
362
  )
360
363
  colocalisation_df[("spots_to_clusters_fraction",coloc_name,"backward")] = colocalisation_df[("spots_to_clusters_count",coloc_name,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
361
364
 
362
- if acquisition2['Cluster computation'].iat[0] and acquisition1['Cluster computation'].iat[0] :
365
+ if acquisition2['do_cluster_computation'].iat[0] and acquisition1['do_cluster_computation'].iat[0] :
363
366
  if len(acquisition1['clusters'].iat[0]) > 0 and len(acquisition2['clusters'].iat[0]) > 0 :
364
367
  #clusters to clusters
365
368
  colocalisation_df[("clusters_to_clusters_count",coloc_name,"forward")] = colocalisation_df.apply(
@@ -397,7 +400,7 @@ def launch_colocalisation(result_tables, result_dataframe, cell_result_dataframe
397
400
 
398
401
  acquisition_id1, acquisition_id2 = (acquisition1.at['acquisition_id'], acquisition2.at['acquisition_id'])
399
402
 
400
- if acquisition_id1 in cell_result_dataframe['acquisition_id'] and acquisition_id2 in cell_result_dataframe['acquisition_id'] :
403
+ if acquisition_id1 in list(cell_result_dataframe['acquisition_id']) and acquisition_id2 in list(cell_result_dataframe['acquisition_id']) :
401
404
  print("Launching cell to cell colocalisation.")
402
405
  new_coloc = _cell_coloc(
403
406
  acquisition_id1 = acquisition_id1,
@@ -411,6 +414,8 @@ def launch_colocalisation(result_tables, result_dataframe, cell_result_dataframe
411
414
  new_coloc,
412
415
  ], axis=1).sort_index(axis=1, level=0)
413
416
 
417
+ cell_coloc_df.index = cell_coloc_df.index.rename('cell_id')
418
+
414
419
 
415
420
  else :
416
421
  print("Launching global colocalisation.")
@@ -2,6 +2,7 @@ import numpy as np
2
2
  import os
3
3
  import PySimpleGUI as sg
4
4
  from ..gui import _error_popup, _warning_popup, parameters_layout, add_header, prompt, prompt_with_help
5
+ from ..gui.prompts import input_image_prompt
5
6
 
6
7
  class ParameterInputError(Exception) :
7
8
  """
@@ -13,24 +14,24 @@ class MappingError(ValueError) :
13
14
  """
14
15
  Raised when user inputs an incorrect image mapping.
15
16
  """
16
- def __init__(self, map ,*args: object) -> None:
17
+ def __init__(self, map_ ,*args: object) -> None:
17
18
  super().__init__(*args)
18
- self.map = map
19
+ self.map_ = map_
19
20
 
20
21
  def get_map(self) :
21
- return self.map
22
+ return self.map_
22
23
 
23
- def prepare_image_detection(map, user_parameters) :
24
+ def prepare_image_detection(map_, user_parameters) :
24
25
  """
25
26
  Return monochannel image for ready for spot detection;
26
27
  if image is already monochannel, nothing happens.
27
28
  else : image is the image on which detection is performed, other_image are the other layer to show in Napari Viewer.
28
29
  """
29
- image = reorder_image_stack(map, user_parameters)
30
+ image = reorder_image_stack(map_, user_parameters['image'])
30
31
  assert len(image.shape) != 5 , "Time stack not supported, should never be True"
31
32
 
32
- if user_parameters['multichannel'] :
33
- channel_to_compute = user_parameters['channel to compute']
33
+ if user_parameters['is_multichannel'] :
34
+ channel_to_compute = user_parameters['channel_to_compute']
34
35
  other_image = image.copy()
35
36
  other_image = np.delete(other_image, channel_to_compute, axis=0)
36
37
  other_image = [layer for layer in other_image]
@@ -41,13 +42,12 @@ def prepare_image_detection(map, user_parameters) :
41
42
 
42
43
  return image, other_image
43
44
 
44
- def reorder_image_stack(map, user_parameters) :
45
- image_stack = user_parameters['image']
46
- x = (int(map['x']),)
47
- y = (int(map['y']),)
48
- z = (int(map['z']),) if type(map.get('z')) != type(None) else ()
49
- c = (int(map['c']),) if type(map.get('c')) != type(None) else ()
50
- t = (int(map['t']),) if type(map.get('t')) != type(None) else ()
45
+ def reorder_image_stack(map_, image_stack) :
46
+ x = (int(map_['x']),)
47
+ y = (int(map_['y']),)
48
+ z = (int(map_['z']),) if type(map_.get('z')) != type(None) else ()
49
+ c = (int(map_['c']),) if type(map_.get('c')) != type(None) else ()
50
+ t = (int(map_['t']),) if type(map_.get('t')) != type(None) else ()
51
51
 
52
52
  source = t+c+z+y+x
53
53
 
@@ -62,20 +62,20 @@ def reorder_image_stack(map, user_parameters) :
62
62
  def map_channels(user_parameters) :
63
63
 
64
64
  image = user_parameters['image']
65
- is_3D_stack = user_parameters['3D stack']
66
- is_time_stack = user_parameters['time stack']
67
- multichannel = user_parameters['multichannel']
65
+ is_3D_stack = user_parameters['is_3D_stack']
66
+ is_time_stack = False
67
+ multichannel = user_parameters['is_multichannel']
68
68
 
69
69
  try :
70
- map = _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image=image)
70
+ map_ = _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image=image)
71
71
  except MappingError as e :
72
72
  sg.popup("Automatic dimension mapping went wrong. Please indicate dimensions positions in the array.")
73
- map = _ask_channel_map(image.shape, is_3D_stack, is_time_stack, multichannel, preset_map= e.get_map())
73
+ map_ = _ask_channel_map(image.shape, is_3D_stack, is_time_stack, multichannel, preset_map= e.get_map())
74
74
 
75
75
  else :
76
- map = _show_mapping(image.shape, map, is_3D_stack, is_time_stack, multichannel,)
76
+ map_ = _show_mapping(image.shape, map_, is_3D_stack, is_time_stack, multichannel,)
77
77
 
78
- return map
78
+ return map_
79
79
 
80
80
  def _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image: np.ndarray=None, shape=None) :
81
81
  if type(shape) == type(None) :
@@ -85,7 +85,7 @@ def _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image: np.ndarr
85
85
  #Set the biggest dimension to y
86
86
  y_val = max(reducing_list)
87
87
  y_idx = shape.index(y_val)
88
- map = {'y' : y_idx}
88
+ map_ = {'y' : y_idx}
89
89
 
90
90
  #2nd biggest set to x
91
91
  reducing_list[y_idx] = -1
@@ -93,7 +93,7 @@ def _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image: np.ndarr
93
93
  x_idx = reducing_list.index(x_val)
94
94
  reducing_list[y_idx] = y_val
95
95
 
96
- map['x'] = x_idx
96
+ map_['x'] = x_idx
97
97
  reducing_list.remove(y_val)
98
98
  reducing_list.remove(x_val)
99
99
 
@@ -101,26 +101,26 @@ def _auto_map_channels(is_3D_stack, is_time_stack, multichannel, image: np.ndarr
101
101
  if multichannel :
102
102
  c_val = min(reducing_list)
103
103
  c_idx = shape.index(c_val)
104
- map['c'] = c_idx
104
+ map_['c'] = c_idx
105
105
  reducing_list.remove(c_val)
106
106
 
107
107
  if is_time_stack :
108
108
  t_val = reducing_list[0]
109
109
  t_idx = shape.index(t_val)
110
- map['t'] = t_idx
110
+ map_['t'] = t_idx
111
111
  reducing_list.remove(t_val)
112
112
 
113
113
  if is_3D_stack :
114
114
  z_val = reducing_list[0]
115
115
  z_idx = shape.index(z_val)
116
- map['z'] = z_idx
116
+ map_['z'] = z_idx
117
117
 
118
- total_channels = len(map)
119
- unique_channel = len(np.unique(list(map.values())))
118
+ total_channels = len(map_)
119
+ unique_channel = len(np.unique(list(map_.values())))
120
120
 
121
- if total_channels != unique_channel : raise MappingError(map,"{0} channel(s) are not uniquely mapped.".format(total_channels - unique_channel))
121
+ if total_channels != unique_channel : raise MappingError(map_,"{0} channel(s) are not uniquely mapped.".format(total_channels - unique_channel))
122
122
 
123
- return map
123
+ return map_
124
124
 
125
125
  def _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map: dict= {}) :
126
126
  while True :
@@ -158,13 +158,13 @@ def _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map
158
158
 
159
159
  return preset_map
160
160
 
161
- def _show_mapping(shape, map, is_3D_stack, is_time_stack, multichannel) :
161
+ def _show_mapping(shape, map_, is_3D_stack, is_time_stack, multichannel) :
162
162
  while True :
163
163
  layout = [
164
164
  [sg.Text("Image shape : {0}".format(shape))],
165
165
  [sg.Text('Dimensions mapping was set to :')],
166
166
  [sg.Text('x : {0} \ny : {1} \nz : {2} \nc : {3} \nt : {4}'.format(
167
- map['x'], map['y'], map.get('z'), map.get("c"), map.get('t')
167
+ map_['x'], map_['y'], map_.get('z'), map_.get("c"), map_.get('t')
168
168
  ))],
169
169
  [sg.Button('Change mapping')]
170
170
  ]
@@ -172,9 +172,9 @@ def _show_mapping(shape, map, is_3D_stack, is_time_stack, multichannel) :
172
172
  event, values = prompt_with_help(layout, help='mapping', add_scrollbar=False)
173
173
 
174
174
  if event == 'Ok' :
175
- return map
175
+ return map_
176
176
  elif event == 'Change mapping':
177
- map = _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map=map)
177
+ map_ = _ask_channel_map(shape, is_3D_stack, is_time_stack, multichannel, preset_map=map_)
178
178
  elif event == 'Cancel' :
179
179
  return None
180
180
  else : raise AssertionError('Unforseen event')
@@ -194,12 +194,12 @@ def convert_parameters_types(values:dict) :
194
194
  for tuple_parameter in tuples_list :
195
195
  try :
196
196
  tuple_values = tuple([float(values.get(tuple_parameter + '_{0}'.format(dimension))) for dimension in dim_tuple])
197
- except Exception : #execption when str cannot be converted to float or no parameter was given.
197
+ except Exception as e : #execption when str cannot be converted to float or no parameter was given.
198
198
  values[tuple_parameter] = None
199
199
  else : values[tuple_parameter] = tuple_values
200
200
 
201
201
  #Parameters
202
- int_list = ['threshold', 'channel_to_compute', 'channel to compute', 'min number of spots', 'cluster size','nucleus channel signal']
202
+ int_list = ['threshold', 'channel_to_compute', 'channel_to_compute', 'min number of spots', 'cluster size','nucleus channel signal']
203
203
  float_list = ['alpha', 'beta', 'gamma', 'threshold penalty']
204
204
 
205
205
  for parameter in int_list :
@@ -224,7 +224,7 @@ def check_integrity(
224
224
  do_clustering,
225
225
  multichannel,
226
226
  segmentation_done,
227
- map,
227
+ map_,
228
228
  shape
229
229
  ):
230
230
  """
@@ -232,7 +232,9 @@ def check_integrity(
232
232
  """
233
233
 
234
234
  #voxel_size
235
- if type(values['voxel_size']) == type(None) : raise ParameterInputError('Incorrect voxel size parameter.')
235
+ if type(values['voxel_size']) == type(None) :
236
+ print(values['voxel_size'])
237
+ raise ParameterInputError('Incorrect voxel size parameter.')
236
238
 
237
239
  #detection integrity :
238
240
  if not isinstance(values['spot_size'], (tuple, list)) and not(isinstance(values['minimum_distance'], (tuple, list)) and isinstance(values['log_kernel_size'], (tuple, list))) :
@@ -257,7 +259,7 @@ def check_integrity(
257
259
 
258
260
  #channel
259
261
  if multichannel :
260
- ch_len = shape[int(map['c'])]
262
+ ch_len = shape[int(map_['c'])]
261
263
 
262
264
  if type(segmentation_done) == type(None) :
263
265
  pass
@@ -270,12 +272,12 @@ def check_integrity(
270
272
  values['nucleus channel signal'] = nuc_signal_ch
271
273
 
272
274
  try :
273
- ch = int(values['channel to compute'])
275
+ ch = int(values['channel_to_compute'])
274
276
  except Exception :
275
- raise ParameterInputError("Incorrect channel to compute parameter.")
277
+ raise ParameterInputError("Incorrect channel_to_compute parameter.")
276
278
  if ch >= ch_len :
277
- raise ParameterInputError("Channel to compute is out of range for image.\nPlease select from {0}".format(list(range(ch_len))))
278
- values['channel to compute'] = ch
279
+ raise ParameterInputError("channel_to_compute is out of range for image.\nPlease select from {0}".format(list(range(ch_len))))
280
+ values['channel_to_compute'] = ch
279
281
 
280
282
  #Spot extraction
281
283
  if not os.path.isdir(values['spots_extraction_folder']) and values['spots_extraction_folder'] != '':
@@ -284,12 +286,12 @@ def check_integrity(
284
286
 
285
287
  return values
286
288
 
287
- def reorder_shape(shape, map) :
288
- x = [int(map['x']),]
289
- y = [int(map['y']),]
290
- z = [int(map['z']),] if type(map.get('z')) != type(None) else []
291
- c = [int(map['c']),] if type(map.get('c')) != type(None) else []
292
- t = [int(map['t']),] if type(map.get('t')) != type(None) else []
289
+ def reorder_shape(shape, map_) :
290
+ x = [int(map_['x']),]
291
+ y = [int(map_['y']),]
292
+ z = [int(map_['z']),] if type(map_.get('z')) != type(None) else []
293
+ c = [int(map_['c']),] if type(map_.get('c')) != type(None) else []
294
+ t = [int(map_['t']),] if type(map_.get('t')) != type(None) else []
293
295
 
294
296
  source = t + c + z + y + x
295
297
 
@@ -343,4 +345,50 @@ def clean_unused_parameters_cache(user_parameters: dict) :
343
345
  if type(user_parameters[parameter]) == type(None) :
344
346
  del user_parameters[parameter]
345
347
 
346
- return user_parameters
348
+ return user_parameters
349
+
350
+ def ask_input_parameters(ask_for_segmentation=True) :
351
+ """
352
+ Prompt user with interface allowing parameters setting for bigFish detection / deconvolution.
353
+ """
354
+
355
+ values = {}
356
+ image_input_values = {}
357
+ while True :
358
+ is_3D_preset = image_input_values.setdefault('is_3D_stack', False)
359
+ is_time_preset = image_input_values.setdefault('time stack', False)
360
+ is_multichannel_preset = image_input_values.setdefault('is_multichannel', False)
361
+ denseregion_preset = image_input_values.setdefault('do_dense_regions_deconvolution', False)
362
+ do_clustering_preset = image_input_values.setdefault('do_cluster_computation', False)
363
+ do_napari_preset = image_input_values.setdefault('show_napari_corrector', False)
364
+
365
+ if ask_for_segmentation :
366
+ image_input_values = input_image_prompt(
367
+ is_3D_stack_preset=is_3D_preset,
368
+ multichannel_preset=is_multichannel_preset,
369
+ do_dense_regions_deconvolution_preset=None,
370
+ do_clustering_preset= None,
371
+ do_Napari_correction=None,
372
+ )
373
+ else :
374
+ image_input_values = input_image_prompt(
375
+ is_3D_stack_preset=is_3D_preset,
376
+ multichannel_preset=is_multichannel_preset,
377
+ do_dense_regions_deconvolution_preset=denseregion_preset,
378
+ do_clustering_preset= do_clustering_preset,
379
+ do_Napari_correction=do_napari_preset,
380
+ )
381
+
382
+ if type(image_input_values) == type(None) :
383
+ return image_input_values
384
+
385
+ if 'image' in image_input_values.keys() :
386
+ image_input_values['shape'] = image_input_values['image'].shape
387
+ break
388
+
389
+
390
+ values.update(image_input_values)
391
+ values['dim'] = 3 if values['is_3D_stack'] else 2
392
+ values['filename'] = os.path.basename(values['image path'])
393
+
394
+ return values