small-fish-gui 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,640 @@
1
+ """
2
+ Contains code to handle detection as well as bigfish wrappers related to spot detection.
3
+ """
4
+
5
+ from ._preprocess import ParameterInputError
6
+ from ._preprocess import check_integrity, convert_parameters_types
7
+ from ._signaltonoise import compute_snr_spots
8
+ from ._detection_visualisation import correct_spots, _update_clusters
9
+ from ..gui import add_default_loading
10
+ from ..gui import detection_parameters_promt, input_image_prompt
11
+
12
+ import numpy as np
13
+ import pandas as pd
14
+ import PySimpleGUI as sg
15
+ import os
16
+ from numpy import NaN
17
+ import bigfish.detection as detection
18
+ import bigfish.stack as stack
19
+ import bigfish.multistack as multistack
20
+ import bigfish.classification as classification
21
+ from bigfish.detection.spot_detection import get_object_radius_pixel
22
+ from types import GeneratorType
23
+
24
+
25
+ def ask_input_parameters(ask_for_segmentation=True) :
26
+ """
27
+ Prompt user with interface allowing parameters setting for bigFish detection / deconvolution.
28
+
29
+ Keys :
30
+ - 'image path'
31
+ - '3D stack'
32
+ - 'time stack'
33
+ - 'multichannel'
34
+ - 'Dense regions deconvolution'
35
+ - 'Segmentation
36
+ - 'Napari correction'
37
+ - 'threshold'
38
+ - 'time step'
39
+ - 'channel to compute'
40
+ - 'alpha'
41
+ - 'beta'
42
+ - 'gamma'
43
+ - 'voxel_size_{(z,y,x)}'
44
+ - 'spot_size{(z,y,x)}'
45
+ - 'log_kernel_size{(z,y,x)}'
46
+ - 'minimum_distance{(z,y,x)}'
47
+ """
48
+
49
+ values = {}
50
+ image_input_values = {}
51
+ while True :
52
+ is_3D_preset = image_input_values.setdefault('3D stack', False)
53
+ is_time_preset = image_input_values.setdefault('time stack', False)
54
+ is_multichannel_preset = image_input_values.setdefault('multichannel', False)
55
+ denseregion_preset = image_input_values.setdefault('Dense regions deconvolution', False)
56
+ do_clustering_preset = image_input_values.setdefault('Cluster computation', False)
57
+ do_segmentation_preset = image_input_values.setdefault('Segmentation', False)
58
+ do_napari_preset = image_input_values.setdefault('Napari correction', False)
59
+
60
+ image_input_values = input_image_prompt(
61
+ is_3D_stack_preset=is_3D_preset,
62
+ time_stack_preset=is_time_preset,
63
+ multichannel_preset=is_multichannel_preset,
64
+ do_dense_regions_deconvolution_preset=denseregion_preset,
65
+ do_clustering_preset= do_clustering_preset,
66
+ do_segmentation_preset=do_segmentation_preset,
67
+ do_Napari_correction=do_napari_preset,
68
+ ask_for_segmentation= ask_for_segmentation
69
+ )
70
+ if type(image_input_values) == type(None) :
71
+ return image_input_values
72
+
73
+ if 'image' in image_input_values.keys() :
74
+ image_input_values['shape'] = image_input_values['image'].shape
75
+ break
76
+
77
+
78
+ values.update(image_input_values)
79
+ values['dim'] = 3 if values['3D stack'] else 2
80
+ values['filename'] = os.path.basename(values['image path'])
81
+ if values['Segmentation'] and values['time stack'] : sg.popup('Segmentation is not supported for time stack. Segmentation will be turned off.')
82
+
83
+ return values
84
+
85
+
86
+ def compute_auto_threshold(images, voxel_size=None, spot_radius=None, log_kernel_size=None, minimum_distance=None, im_number= 15, crop_zstack= None) :
87
+ """
88
+ Compute bigfish auto threshold efficiently for list of images. In case on large set of images user can set im_number to only consider a random subset of image for threshold computation.
89
+ """
90
+ # check parameters
91
+ stack.check_parameter(images = (list, np.ndarray, GeneratorType,), voxel_size=(int, float, tuple, list, type(None)),spot_radius=(int, float, tuple, list, type(None)),log_kernel_size=(int, float, tuple, list, type(None)),minimum_distance=(int, float, tuple, list, type(None)), im_number = int, crop_zstack= (type(None), tuple))
92
+
93
+ # if one image is provided we enlist it
94
+ if not isinstance(images, list):
95
+ if isinstance(images, np.ndarray) :
96
+ stack.check_array(images,ndim=[2, 3],dtype=[np.uint8, np.uint16, np.float32, np.float64])
97
+ ndim = images.ndim
98
+ images = [images]
99
+ else :
100
+ images = [image for image in images]
101
+ for image in images : stack.check_array(image,ndim=[2, 3],dtype=[np.uint8, np.uint16, np.float32, np.float64])
102
+ ndim = images[0].ndim
103
+
104
+ else:
105
+ ndim = None
106
+ for i, image in enumerate(images):
107
+ stack.check_array(image,ndim=[2, 3],dtype=[np.uint8, np.uint16, np.float32, np.float64])
108
+ if i == 0:
109
+ ndim = image.ndim
110
+ else:
111
+ if ndim != image.ndim:
112
+ raise ValueError("Provided images should have the same "
113
+ "number of dimensions.")
114
+ if len(images) > im_number : #if true we select a random sample of images
115
+ idx = np.arange(len(images),dtype= int)
116
+ np.random.shuffle(idx)
117
+ images = [images[i] for i in idx]
118
+
119
+ #Building a giant 3D array containing all information for threshold selection -> cheating detection.automated_threshold_setting that doesn't take lists and doesn't use spatial information.
120
+ if type(crop_zstack) == type(None) :
121
+ crop_zstack = (0, len(images[0]))
122
+
123
+ log_kernel_size, minimum_distance = _compute_threshold_parameters(ndim, voxel_size, spot_radius, minimum_distance, log_kernel_size)
124
+ images_filtered = np.concatenate(
125
+ [stack.log_filter(image[crop_zstack[0]: crop_zstack[1]], sigma= log_kernel_size) for image in images],
126
+ axis= ndim -1)
127
+ max_masks = np.concatenate(
128
+ [detection.local_maximum_detection(image[crop_zstack[0]: crop_zstack[1]], min_distance= minimum_distance) for image in images],
129
+ axis= ndim -1)
130
+ threshold = detection.automated_threshold_setting(images_filtered, max_masks)
131
+
132
+ return threshold
133
+
134
+ def _compute_threshold_parameters(ndim, voxel_size, spot_radius, minimum_distance, log_kernel_size) :
135
+
136
+ # check consistency between parameters - detection with voxel size and
137
+ # spot radius
138
+ if (voxel_size is not None and spot_radius is not None
139
+ and log_kernel_size is None and minimum_distance is None):
140
+ if isinstance(voxel_size, (tuple, list)):
141
+ if len(voxel_size) != ndim:
142
+ raise ValueError("'voxel_size' must be a scalar or a sequence "
143
+ "with {0} elements.".format(ndim))
144
+ else:
145
+ voxel_size = (voxel_size,) * ndim
146
+ if isinstance(spot_radius, (tuple, list)):
147
+ if len(spot_radius) != ndim:
148
+ raise ValueError("'spot_radius' must be a scalar or a "
149
+ "sequence with {0} elements.".format(ndim))
150
+ else:
151
+ spot_radius = (spot_radius,) * ndim
152
+ log_kernel_size = get_object_radius_pixel(
153
+ voxel_size_nm=voxel_size,
154
+ object_radius_nm=spot_radius,
155
+ ndim=ndim)
156
+ minimum_distance = get_object_radius_pixel(
157
+ voxel_size_nm=voxel_size,
158
+ object_radius_nm=spot_radius,
159
+ ndim=ndim)
160
+
161
+ # check consistency between parameters - detection with kernel size and
162
+ # minimal distance
163
+ elif (voxel_size is None and spot_radius is None
164
+ and log_kernel_size is not None and minimum_distance is not None):
165
+ if isinstance(log_kernel_size, (tuple, list)):
166
+ if len(log_kernel_size) != ndim:
167
+ raise ValueError("'log_kernel_size' must be a scalar or a "
168
+ "sequence with {0} elements.".format(ndim))
169
+ else:
170
+ log_kernel_size = (log_kernel_size,) * ndim
171
+ if isinstance(minimum_distance, (tuple, list)):
172
+ if len(minimum_distance) != ndim:
173
+ raise ValueError("'minimum_distance' must be a scalar or a "
174
+ "sequence with {0} elements.".format(ndim))
175
+ else:
176
+ minimum_distance = (minimum_distance,) * ndim
177
+
178
+ # check consistency between parameters - detection in priority with kernel
179
+ # size and minimal distance
180
+ elif (voxel_size is not None and spot_radius is not None
181
+ and log_kernel_size is not None and minimum_distance is not None):
182
+ if isinstance(log_kernel_size, (tuple, list)):
183
+ if len(log_kernel_size) != ndim:
184
+ raise ValueError("'log_kernel_size' must be a scalar or a "
185
+ "sequence with {0} elements.".format(ndim))
186
+ else:
187
+ log_kernel_size = (log_kernel_size,) * ndim
188
+ if isinstance(minimum_distance, (tuple, list)):
189
+ if len(minimum_distance) != ndim:
190
+ raise ValueError("'minimum_distance' must be a scalar or a "
191
+ "sequence with {0} elements.".format(ndim))
192
+ else:
193
+ minimum_distance = (minimum_distance,) * ndim
194
+
195
+ # missing parameters
196
+ else:
197
+ raise ValueError("One of the two pairs of parameters ('voxel_size', "
198
+ "'spot_radius') or ('log_kernel_size', "
199
+ "'minimum_distance') should be provided.")
200
+
201
+ return log_kernel_size, minimum_distance
202
+
203
+ def cluster_detection(spots, voxel_size, radius = 350, nb_min_spots = 4, keys_to_compute = ["clustered_spots", "clusters"]) :
204
+ """
205
+ Performs `bigfish.detection.cluster_detection()` to detect clusters.
206
+ Then offers possibility to get results sorted in pandas dataframe.
207
+
208
+ Parameters
209
+ ----------
210
+ spots : np.ndarray
211
+ Coordinates of the detected spots with shape (nb_spots, 3) or (nb_spots, 2).
212
+ voxel_size : int, float, Tuple(int, float) or List(int, float)
213
+ Size of a voxel, in nanometer. One value per spatial dimension (zyx or yx dimensions). If it's a scalar, the same value is applied to every dimensions.
214
+ radius : int
215
+ The maximum distance between two samples for one to be considered as in the neighborhood of the other. Radius expressed in nanometer.
216
+ nb_min_spots : int
217
+ The number of spots in a neighborhood for a point to be considered as a core point (from which a cluster is expanded). This includes the point itself.
218
+ keys_to_compute : list[str], str
219
+ keys from (clustered_spots, clusters, clustered_spots_dataframe, clusters_dataframe)
220
+ --> clustered_spots : np.ndarray
221
+ Coordinates of the detected spots with shape (nb_spots, 4) or (nb_spots, 3). One coordinate per dimension (zyx or yx coordinates) plus the index of the cluster assigned to the spot. If no cluster was assigned, value is -1.
222
+ --> clusters : np.ndarray
223
+ Array with shape (nb_clusters, 5) or (nb_clusters, 4). One coordinate per dimension for the clusters centroid (zyx or yx coordinates), the number of spots detected in the clusters and its index.
224
+ --> clustered_spots_dataframe
225
+ --> clusters_dataframe
226
+
227
+ Returns
228
+ -------
229
+ res : dict
230
+ keys : keys from `keys_to_compute` argument : (clustered_spots, clusters, clustered_spots_dataframe, clusters_dataframe)
231
+ """
232
+
233
+ if isinstance(keys_to_compute, str) : keys_to_compute = [keys_to_compute]
234
+ elif isinstance(keys_to_compute, list) : pass
235
+ else : raise TypeError("Wrong type for keys_to_compute. Should be list[str] or str. It is {0}".format(type(keys_to_compute)))
236
+ if len(spots) == 0 :
237
+ res = {'clustered_spots' : [], 'clusters' : [], 'clustered_spots_dataframe' : pd.DataFrame(columns= ["id", "cluster_id", "z", "y", "x"]), 'clusters_dataframe' : pd.DataFrame(columns= ["id", "z", "y", "x", "spot_number"])}
238
+ return {key : res[key] for key in keys_to_compute}
239
+ else : res = {}
240
+ voxel_size = tuple([int(d) for d in voxel_size])
241
+ clustered_spots, clusters = detection.detect_clusters(spots, voxel_size= voxel_size, radius= radius, nb_min_spots= nb_min_spots)
242
+
243
+
244
+ if 'clustered_spots' in keys_to_compute :
245
+ res['clustered_spots'] = clustered_spots
246
+
247
+ if 'clusters' in keys_to_compute :
248
+ res['clusters'] = clusters
249
+
250
+ if 'clustered_spots_dataframe' in keys_to_compute :
251
+ res['clustered_spots_dataframe'] = _compute_clustered_spots_dataframe(clustered_spots)
252
+
253
+ if 'clusters_dataframe' in keys_to_compute :
254
+ res['clusters_dataframe'] = _compute_cluster_dataframe(clusters)
255
+
256
+ return res
257
+
258
+ def initiate_detection(user_parameters, segmentation_done, map, shape) :
259
+ is_3D_stack= user_parameters['3D stack']
260
+ is_multichannel = user_parameters['multichannel']
261
+ do_dense_region_deconvolution = user_parameters['Dense regions deconvolution']
262
+ do_clustering = user_parameters['Cluster computation']
263
+ do_segmentation = user_parameters['Segmentation']
264
+
265
+ while True :
266
+ user_parameters = detection_parameters_promt(
267
+ is_3D_stack=is_3D_stack,
268
+ is_multichannel=is_multichannel,
269
+ do_dense_region_deconvolution=do_dense_region_deconvolution,
270
+ do_clustering=do_clustering,
271
+ do_segmentation=do_segmentation,
272
+ segmentation_done= segmentation_done,
273
+ default_dict=user_parameters
274
+ )
275
+
276
+ if type(user_parameters) == type(None) : return user_parameters
277
+ try :
278
+ user_parameters = convert_parameters_types(user_parameters)
279
+ user_parameters = check_integrity(user_parameters, do_dense_region_deconvolution, is_multichannel, segmentation_done, map, shape)
280
+ except ParameterInputError as error:
281
+ sg.popup(error)
282
+ else :
283
+ break
284
+ return user_parameters
285
+
286
+ @add_default_loading
287
+ def _launch_detection(image, image_input_values: dict, time_stack_gen=None) :
288
+
289
+ """
290
+ Performs spots detection
291
+ """
292
+
293
+ #Extract parameters
294
+ voxel_size = image_input_values['voxel_size']
295
+ threshold = image_input_values.get('threshold')
296
+ threshold_penalty = image_input_values.setdefault('threshold penalty', 1)
297
+ spot_size = image_input_values.get('spot_size')
298
+ log_kernel_size = image_input_values.get('log_kernel_size')
299
+ minimum_distance = image_input_values.get('minimum_distance')
300
+
301
+ if type(threshold) == type(None) :
302
+ #detection
303
+ if type(time_stack_gen) != type(None) :
304
+ image_sample = time_stack_gen()
305
+ else :
306
+ image_sample = image
307
+
308
+ threshold = compute_auto_threshold(image_sample, voxel_size=voxel_size, spot_radius=spot_size) * threshold_penalty
309
+
310
+ spots = detection.detect_spots(
311
+ images= image,
312
+ threshold=threshold,
313
+ return_threshold= False,
314
+ voxel_size=voxel_size,
315
+ spot_radius= spot_size,
316
+ log_kernel_size=log_kernel_size,
317
+ minimum_distance=minimum_distance
318
+ )
319
+
320
+ return spots, threshold
321
+
322
+ @add_default_loading
323
+ def launch_dense_region_deconvolution(image, spots, image_input_values: dict,) :
324
+ """
325
+ Performs spot decomposition
326
+
327
+ Returns
328
+ -------
329
+ spots : np.ndarray
330
+ Array(nb_spot, dim) (dim is either 3 or 2)
331
+ fov_res : dict
332
+ keys : spot_number, spotsSignal_median, spotsSignal_mean, spotsSignal_std, median_pixel, mean_pixel, snr_median, snr_mean, snr_std, cell_medianbackground_mean, cell_medianbackground_std, cell_meanbackground_mean, cell_meanbackground_std, cell_stdbackground_mean, cell_stdbackground_std
333
+ """
334
+
335
+ ##Initiate lists
336
+ voxel_size = image_input_values['voxel_size']
337
+ spot_size = image_input_values.get('spot_size')
338
+ ##deconvolution parameters
339
+ alpha = image_input_values.get('alpha')
340
+ beta = image_input_values.get('beta')
341
+ gamma = image_input_values.get('gamma')
342
+ deconvolution_kernel = image_input_values.get('deconvolution_kernel')
343
+ dim = image_input_values['dim']
344
+
345
+ spots, dense_regions, ref_spot = detection.decompose_dense(image=image, spots=spots, voxel_size=voxel_size, spot_radius=spot_size, kernel_size=deconvolution_kernel, alpha=alpha, beta=beta, gamma=gamma)
346
+ del dense_regions, ref_spot
347
+
348
+ return spots
349
+
350
+ @add_default_loading
351
+ def launch_post_detection(image, spots, image_input_values: dict,) :
352
+ fov_res = {}
353
+ dim = image_input_values['dim']
354
+ voxel_size = image_input_values['voxel_size']
355
+ spot_size = image_input_values.get('spot_size')
356
+
357
+ #features
358
+ fov_res['spot_number'] = len(spots)
359
+ snr_res = compute_snr_spots(image, spots, voxel_size, spot_size)
360
+
361
+ if dim == 3 :
362
+ Z,Y,X = list(zip(*spots))
363
+ spots_values = image[Z,Y,X]
364
+ else :
365
+ Y,X = list(zip(*spots))
366
+ spots_values = image[Y,X]
367
+
368
+ fov_res['spotsSignal_median'], fov_res['spotsSignal_mean'], fov_res['spotsSignal_std'] = np.median(spots_values), np.mean(spots_values), np.std(spots_values)
369
+ fov_res['median_pixel'] = np.median(image)
370
+ fov_res['mean_pixel'] = np.mean(image)
371
+
372
+ #appending results
373
+ fov_res.update(snr_res)
374
+
375
+ return spots, fov_res
376
+
377
+ @add_default_loading
378
+ def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signal, cell_label, nucleus_label, user_parameters) :
379
+
380
+ #Extract parameters
381
+ dim = user_parameters['dim']
382
+ do_clustering = user_parameters['Cluster computation']
383
+ voxel_size = user_parameters['voxel_size']
384
+
385
+ if do_clustering : other_coords = {'clusters_coords' : clusters} if len(clusters) > 0 else None
386
+ else : other_coords = None
387
+ if do_clustering : do_clustering = len(clusters) > 0
388
+
389
+ if image.ndim == 3 :
390
+ image = stack.maximum_projection(image)
391
+ if nucleus_signal.ndim == 3 :
392
+ nucleus_signal = stack.maximum_projection(nucleus_signal)
393
+
394
+ cells_results = multistack.extract_cell(
395
+ cell_label=cell_label,
396
+ ndim=dim,
397
+ nuc_label=nucleus_label,
398
+ rna_coord=spots,
399
+ others_coord=other_coords,
400
+ image=image
401
+ )
402
+
403
+ #BigFish features
404
+ features_names = ['acquisition_id', 'cell_id', 'cell_bbox'] + classification.get_features_name(
405
+ names_features_centrosome=False,
406
+ names_features_area=True,
407
+ names_features_dispersion=True,
408
+ names_features_distance=True,
409
+ names_features_foci=do_clustering,
410
+ names_features_intranuclear=True,
411
+ names_features_protrusion=False,
412
+ names_features_topography=True
413
+ )
414
+
415
+ #Nucleus features : area is computed in bigfish
416
+ features_names += ['nucleus_mean_signal', 'nucleus_median_signal', 'nucleus_max_signal', 'nucleus_min_signal']
417
+
418
+ result_frame = pd.DataFrame()
419
+
420
+ for cell in cells_results :
421
+
422
+ #Extract cell results
423
+ cell_id = cell['cell_id']
424
+ cell_mask = cell['cell_mask']
425
+ nuc_mask = cell ['nuc_mask']
426
+ cell_bbox = cell['bbox'] # (min_y, min_x, max_y, max_x)
427
+ min_y, min_x, max_y, max_x = cell['bbox'] # (min_y, min_x, max_y, max_x)
428
+ nuc_signal = nucleus_signal[min_y:max_y, min_x:max_x]
429
+ rna_coords = cell['rna_coord']
430
+ foci_coords = cell.get('clusters_coords')
431
+ signal = cell['image']
432
+
433
+ with np.errstate(divide= 'ignore', invalid= 'ignore') :
434
+ features = classification.compute_features(
435
+ cell_mask=cell_mask,
436
+ nuc_mask=nuc_mask,
437
+ ndim=dim,
438
+ rna_coord= rna_coords,
439
+ foci_coord=foci_coords,
440
+ voxel_size_yx= float(voxel_size[-1]),
441
+ smfish=signal,
442
+ centrosome_coord=None,
443
+ compute_centrosome=False,
444
+ compute_area=True,
445
+ compute_dispersion=True,
446
+ compute_distance=True,
447
+ compute_foci= do_clustering and len(clusters) > 0,
448
+ compute_intranuclear=True,
449
+ compute_protrusion=False,
450
+ compute_topography=True
451
+ )
452
+
453
+ features = list(features)
454
+ features += [np.mean(nuc_signal), np.median(nuc_signal), np.max(nuc_signal), np.min(nuc_signal)]
455
+
456
+ features = [acquisition_id, cell_id, cell_bbox] + features
457
+
458
+ result_frame = pd.concat([
459
+ result_frame,
460
+ pd.DataFrame(columns = features_names, data= (features,)),
461
+ ],
462
+ axis= 0
463
+ )
464
+
465
+ return result_frame
466
+
467
+
468
+
469
+ @add_default_loading
470
+ def launch_clustering(spots, user_parameters):
471
+
472
+ voxel_size = user_parameters['voxel_size']
473
+ nb_min_spots = user_parameters['min number of spots']
474
+ cluster_size = user_parameters['cluster size']
475
+
476
+ clusters = cluster_detection(
477
+ spots=spots,
478
+ voxel_size=voxel_size,
479
+ radius=cluster_size,
480
+ nb_min_spots=nb_min_spots,
481
+ keys_to_compute= 'clusters'
482
+ )['clusters']
483
+
484
+ return clusters
485
+
486
+ def launch_detection(
487
+ image,
488
+ other_image,
489
+ user_parameters,
490
+ cell_label= None,
491
+ nucleus_label = None
492
+ ) :
493
+ """
494
+ Main call for features computation :
495
+ --> spot dection
496
+ --> dense regions deconv
497
+ --> cluster (opt)
498
+ --> spot correction
499
+ --> general features computations
500
+ --> cell extractions
501
+ --> cell features computations
502
+
503
+ RETURNS
504
+ -------
505
+ user_parameters : dict
506
+ spots : np.ndarray
507
+ clusters : np.ndarray
508
+
509
+ USER_PARAMETERS UPDATE
510
+ ----------------------
511
+ 'threshold'
512
+ """
513
+ fov_result = {}
514
+ do_dense_region_deconvolution = user_parameters['Dense regions deconvolution']
515
+ do_clustering = user_parameters['Cluster computation']
516
+
517
+ spots, threshold = _launch_detection(image, user_parameters)
518
+
519
+ if do_dense_region_deconvolution :
520
+ spots = launch_dense_region_deconvolution(image, spots, user_parameters)
521
+
522
+ if do_clustering :
523
+ clusters = launch_clustering(spots, user_parameters) #012 are coordinates #3 is number of spots per cluster, #4 is cluster index
524
+ clusters = _update_clusters(clusters, spots, voxel_size=user_parameters['voxel_size'], cluster_size=user_parameters['cluster size'], min_spot_number= user_parameters['min number of spots'], shape=image.shape)
525
+
526
+ else : clusters = None
527
+
528
+ spots, post_detection_dict = launch_post_detection(image, spots, user_parameters)
529
+ user_parameters['threshold'] = threshold
530
+
531
+ if user_parameters['Napari correction'] :
532
+
533
+ spots, clusters = correct_spots(
534
+ image,
535
+ spots,
536
+ user_parameters['voxel_size'],
537
+ clusters=clusters,
538
+ cluster_size= user_parameters.get('cluster size'),
539
+ min_spot_number= user_parameters.setdefault('min number of spots', 0),
540
+ cell_label=cell_label,
541
+ nucleus_label=nucleus_label,
542
+ other_images=other_image
543
+ )
544
+
545
+ fov_result.update(post_detection_dict)
546
+
547
+ return user_parameters, fov_result, spots, clusters
548
+
549
+
550
+ def launch_features_computation(acquisition_id, image, nucleus_signal, spots, clusters, nucleus_label, cell_label, user_parameters, frame_results) :
551
+
552
+ dim = image.ndim
553
+
554
+ if user_parameters['Cluster computation'] :
555
+ frame_results['cluster_number'] = len(clusters)
556
+ if dim == 3 :
557
+ frame_results['total_spots_in_clusters'] = clusters.sum(axis=0)[3]
558
+ else :
559
+ frame_results['total_spots_in_clusters'] = clusters.sum(axis=0)[2]
560
+
561
+ if type(cell_label) != type(None) and type(nucleus_label) != type(None):
562
+ cell_result_dframe = launch_cell_extraction(
563
+ acquisition_id=acquisition_id,
564
+ spots=spots,
565
+ clusters=clusters,
566
+ image=image,
567
+ nucleus_signal=nucleus_signal,
568
+ cell_label= cell_label,
569
+ nucleus_label=nucleus_label,
570
+ user_parameters=user_parameters,
571
+ )
572
+ else :
573
+ cell_result_dframe = pd.DataFrame()
574
+
575
+ frame_results['acquisition_id'] = acquisition_id
576
+ if type(cell_label) != type(None) and type(nucleus_label) != type(None):
577
+ frame_results['cell_number'] = len(cell_result_dframe)
578
+ else :
579
+ frame_results['cell_number'] = NaN
580
+ frame_results['spots'] = spots
581
+ frame_results['clusters'] = clusters
582
+ frame_results.update(user_parameters)
583
+ frame_results['threshold'] = user_parameters['threshold']
584
+
585
+ frame_results = pd.DataFrame(columns= frame_results.keys(), data= (frame_results.values(),))
586
+
587
+ return frame_results, cell_result_dframe
588
+
589
+ def _compute_clustered_spots_dataframe(clustered_spots) :
590
+ if len(clustered_spots) == 0 : return pd.DataFrame(columns= ["id", "cluster_id", "z", "y", "x"])
591
+ z, y ,x, cluster_index = list(zip(*clustered_spots))
592
+ ids = np.arange(len(clustered_spots))
593
+
594
+ df = pd.DataFrame({
595
+ "id" : ids
596
+ ,"cluster_id" : cluster_index
597
+ ,"z" : z
598
+ ,"y" : y
599
+ ,"x" : x
600
+ })
601
+
602
+ null_idx = df[df['cluster_id'] == -1].index
603
+ df.loc[null_idx, 'cluster_id'] = np.NaN
604
+
605
+ return df
606
+
607
+ def _compute_cluster_dataframe(clusters) :
608
+ if len(clusters) == 0 : return pd.DataFrame(columns= ["id", "z", "y", "x", "spot_number"])
609
+ z, y, x, spots_number, cluster_index = list(zip(*clusters))
610
+
611
+ df = pd.DataFrame({
612
+ "id" : cluster_index
613
+ ,"z" : z
614
+ ,"y" : y
615
+ ,"x" : x
616
+ ,"spot_number" : spots_number
617
+ })
618
+
619
+ return df
620
+
621
+ def get_nucleus_signal(image, other_images, user_parameters) :
622
+ if user_parameters['multichannel'] :
623
+ rna_signal_channel = user_parameters['channel to compute']
624
+ nucleus_signal_channel = user_parameters['nucleus channel signal']
625
+ if type(nucleus_signal_channel) == type(None) :
626
+ return np.zeros(shape=image.shape)
627
+
628
+ if rna_signal_channel == nucleus_signal_channel :
629
+ nucleus_signal == image
630
+
631
+ elif nucleus_signal_channel > rna_signal_channel :
632
+ nucleus_signal_channel -=1
633
+ nucleus_signal = other_images[nucleus_signal_channel]
634
+
635
+ elif nucleus_signal_channel < rna_signal_channel :
636
+ nucleus_signal = other_images[nucleus_signal_channel]
637
+
638
+ return nucleus_signal
639
+ else :
640
+ return image