small-fish-gui 1.8.0__py3-none-any.whl → 1.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -226,10 +226,10 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
226
226
  fraction_spots1_coloc_spots2 = np.NaN
227
227
  fraction_spots2_coloc_spots1 = np.NaN
228
228
 
229
- if 'clusters' in acquisition1.index :
229
+ if 'clusters' in acquisition1.columns :
230
230
  try :
231
- clusters1 = acquisition1.iloc[0].at['clusters'][:,:len(voxel_size)]
232
- fraction_spots2_coloc_cluster1 = spots_colocalisation(spot_list1=spots2, spot_list2=clusters1, distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
231
+ clusters_id_1 = acquisition1.iloc[0].at['spots_cluster_id']
232
+ fraction_spots2_coloc_cluster1 = spots_colocalisation(spot_list1=spots2, spot_list2=spots1[clusters_id_1 != -1], distance= colocalisation_distance, voxel_size=voxel_size) / spot2_total
233
233
  except MissMatchError as e :
234
234
  sg.popup(str(e))
235
235
  fraction_spots2_coloc_cluster1 = np.NaN
@@ -239,10 +239,10 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
239
239
 
240
240
  else : fraction_spots2_coloc_cluster1 = np.NaN
241
241
 
242
- if 'clusters' in acquisition2.index :
242
+ if 'clusters' in acquisition2.columns :
243
243
  try :
244
- clusters2 = acquisition2.iloc[0].at['clusters'][:,:len(voxel_size)]
245
- fraction_spots1_coloc_cluster2 = spots_colocalisation(spot_list1=spots1, spot_list2=clusters2, distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
244
+ clusters_id_2 = acquisition2.iloc[0].at['spots_cluster_id']
245
+ fraction_spots1_coloc_cluster2 = spots_colocalisation(spot_list1=spots1, spot_list2=spots2[clusters_id_2 != -1], distance= colocalisation_distance, voxel_size=voxel_size) / spot1_total
246
246
  except MissMatchError as e :# clusters not computed
247
247
  sg.popup(str(e))
248
248
  fraction_spots1_coloc_cluster2 = np.NaN
@@ -252,6 +252,25 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
252
252
 
253
253
  else : fraction_spots1_coloc_cluster2 = np.NaN
254
254
 
255
+ if 'clusters' in acquisition2.columns and 'clusters' in acquisition1.columns :
256
+ try :
257
+ total_clustered_spots1 = len(spots1[clusters_id_1 != -1])
258
+ total_clustered_spots2 = len(spots2[clusters_id_2 != -1])
259
+ fraction_cluster1_coloc_cluster2 = spots_colocalisation(spot_list1=spots1[clusters_id_1 != -1], spot_list2=spots2[clusters_id_2 != -1], distance= colocalisation_distance, voxel_size=voxel_size) / total_clustered_spots1
260
+ fraction_cluster2_coloc_cluster1 = spots_colocalisation(spot_list1=spots2[clusters_id_2 != -1], spot_list2=spots1[clusters_id_1 != -1], distance= colocalisation_distance, voxel_size=voxel_size) / total_clustered_spots2
261
+ except MissMatchError as e :# clusters not computed
262
+ sg.popup(str(e))
263
+ fraction_cluster1_coloc_cluster2 = np.NaN
264
+ fraction_cluster2_coloc_cluster1 = np.NaN
265
+ except TypeError :
266
+ fraction_cluster1_coloc_cluster2 = np.NaN
267
+ fraction_cluster2_coloc_cluster1 = np.NaN
268
+
269
+ else :
270
+ fraction_cluster1_coloc_cluster2 = np.NaN
271
+ fraction_cluster2_coloc_cluster1 = np.NaN
272
+
273
+
255
274
  coloc_df = pd.DataFrame({
256
275
  "acquisition_couple" : [acquisition_couple],
257
276
  "acquisition_id_1" : [acquisition_couple[0]],
@@ -263,6 +282,8 @@ def _global_coloc(acquisition_id1,acquisition_id2, result_dataframe, colocalisat
263
282
  'fraction_spots2_coloc_spots1' : [fraction_spots2_coloc_spots1],
264
283
  'fraction_spots2_coloc_cluster1' : [fraction_spots2_coloc_cluster1],
265
284
  'fraction_spots1_coloc_cluster2' : [fraction_spots1_coloc_cluster2],
285
+ 'fraction_cluster1_coloc_cluster2' : [fraction_cluster1_coloc_cluster2],
286
+ 'fraction_cluster2_coloc_cluster1' : [fraction_cluster2_coloc_cluster1],
266
287
  })
267
288
 
268
289
  coloc_df['fraction_spots1_coloc_free2'] = coloc_df['fraction_spots1_coloc_spots2'] - coloc_df['fraction_spots1_coloc_cluster2']
@@ -289,10 +310,9 @@ def _cell_coloc(
289
310
 
290
311
  acquisition_name_id1 = acquisition1['name'].iat[0]
291
312
  acquisition_name_id2 = acquisition2['name'].iat[0]
292
- cluster_radius1 = acquisition1['cluster size'].iat[0]
293
- cluster_radius2 = acquisition2['cluster size'].iat[0]
294
313
  result_dataframe = result_dataframe.set_index('acquisition_id', drop=False)
295
- coloc_name = '{0}nm_{1}{2}_{3}{4}'.format(colocalisation_distance, acquisition_id1,acquisition_name_id1, acquisition_id2,acquisition_name_id2)
314
+ coloc_name_forward = '{0} -> {1}'.format(acquisition_name_id1, acquisition_name_id2)
315
+ coloc_name_backward = '{1} -> {0}'.format(acquisition_name_id1, acquisition_name_id2)
296
316
 
297
317
  #Getting shape
298
318
  if not result_dataframe.at[acquisition_id1, 'reordered_shape'] == result_dataframe.at[acquisition_id2, 'reordered_shape'] :
@@ -309,14 +329,15 @@ def _cell_coloc(
309
329
  #Putting spots lists in 2 cols for corresponding cells
310
330
  pivot_values_columns = ['rna_coords', 'total_rna_number']
311
331
  if 'clusters' in acquisition2.columns or 'clusters' in acquisition1.columns :
312
- pivot_values_columns.extend(['cluster_coords','foci_number'])
332
+ pivot_values_columns.extend(['clustered_spots_coords','clustered_spot_number'])
333
+ cell_dataframe['cell_id'] = cell_dataframe['cell_id'].astype(int)
313
334
  colocalisation_df = cell_dataframe.pivot(
314
335
  columns=['name', 'acquisition_id'],
315
336
  values= pivot_values_columns,
316
337
  index= 'cell_id'
317
338
  )
318
339
  #spots _vs spots
319
- colocalisation_df[("spots_to_spots_count",coloc_name,"forward")] = colocalisation_df['rna_coords'].apply(
340
+ colocalisation_df[("spots_with_spots_count",coloc_name_forward,"forward")] = colocalisation_df['rna_coords'].apply(
320
341
  lambda x: spots_colocalisation(
321
342
  spot_list1= x[(acquisition_name_id1,acquisition_id1)],
322
343
  spot_list2= x[(acquisition_name_id2,acquisition_id2)],
@@ -324,9 +345,9 @@ def _cell_coloc(
324
345
  voxel_size=voxel_size
325
346
  ),axis=1
326
347
  )
327
- colocalisation_df[("spots_to_spots_fraction",coloc_name,"forward")] = colocalisation_df[("spots_to_spots_count",coloc_name,"forward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id1,acquisition_id1)].astype(float)
348
+ colocalisation_df[("spots_with_spots_fraction",coloc_name_forward,"forward")] = colocalisation_df[("spots_with_spots_count",coloc_name_forward,"forward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id1,acquisition_id1)].astype(float)
328
349
 
329
- colocalisation_df[("spots_to_spots_count",coloc_name,"backward")] = colocalisation_df['rna_coords'].apply(
350
+ colocalisation_df[("spots_with_spots_count",coloc_name_backward,"backward")] = colocalisation_df['rna_coords'].apply(
330
351
  lambda x: spots_colocalisation(
331
352
  spot_list1= x[(acquisition_name_id2,acquisition_id2)],
332
353
  spot_list2= x[(acquisition_name_id1,acquisition_id1)],
@@ -334,66 +355,71 @@ def _cell_coloc(
334
355
  voxel_size=voxel_size
335
356
  ),axis=1
336
357
  )
337
- colocalisation_df[("spots_to_spots_fraction",coloc_name,"backward")] = colocalisation_df[("spots_to_spots_count",coloc_name,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
358
+ colocalisation_df[("spots_with_spots_fraction",coloc_name_backward,"backward")] = colocalisation_df[("spots_with_spots_count",coloc_name_backward,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
338
359
 
339
360
  if acquisition2['do_cluster_computation'].iat[0] :
340
361
  if len(acquisition2['clusters'].iat[0]) > 0 :
341
362
 
342
363
  #spots to clusters
343
- colocalisation_df[("spots_to_clusters_count",coloc_name,"forward")] = colocalisation_df.apply(
364
+ colocalisation_df[("spots_with_clustered_spots_count",coloc_name_forward,"forward")] = colocalisation_df.apply(
344
365
  lambda x: spots_colocalisation(
345
366
  spot_list1= x[('rna_coords',acquisition_name_id1,acquisition_id1)],
346
- spot_list2= x[('cluster_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
347
- distance=colocalisation_distance + cluster_radius2,
367
+ spot_list2= x[('clustered_spots_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
368
+ distance=colocalisation_distance,
348
369
  voxel_size=voxel_size
349
370
  ),axis=1
350
371
  )
351
- colocalisation_df[("spots_to_clusters_fraction",coloc_name,"forward")] = colocalisation_df[("spots_to_clusters_count",coloc_name,"forward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id1,acquisition_id1)].astype(float)
372
+ colocalisation_df[("spots_with_clustered_spots_fraction",coloc_name_forward,"forward")] = colocalisation_df[("spots_with_clustered_spots_count",coloc_name_forward,"forward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id1,acquisition_id1)].astype(float)
352
373
 
353
374
  if acquisition1['do_cluster_computation'].iat[0] :
354
375
  if len(acquisition1['clusters'].iat[0]) > 0 :
355
- colocalisation_df[("spots_to_clusters_count",coloc_name,"backward")] = colocalisation_df.apply(
376
+ colocalisation_df[("spots_with_clustered_spots_count",coloc_name_backward,"backward")] = colocalisation_df.apply(
356
377
  lambda x: spots_colocalisation(
357
378
  spot_list1= x[('rna_coords',acquisition_name_id2,acquisition_id2)],
358
- spot_list2= x[('cluster_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
359
- distance=colocalisation_distance + cluster_radius1,
379
+ spot_list2= x[('clustered_spots_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
380
+ distance=colocalisation_distance,
360
381
  voxel_size=voxel_size
361
382
  ),axis=1
362
383
  )
363
- colocalisation_df[("spots_to_clusters_fraction",coloc_name,"backward")] = colocalisation_df[("spots_to_clusters_count",coloc_name,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
364
384
 
365
- if acquisition2['do_cluster_computation'].iat[0] and acquisition1['do_cluster_computation'].iat[0] :
385
+ colocalisation_df[("spots_with_clustered_spots_fraction",coloc_name_backward,"backward")] = colocalisation_df[("spots_with_clustered_spots_count",coloc_name_backward,"backward")].astype(float) / colocalisation_df[('total_rna_number',acquisition_name_id2,acquisition_id2)].astype(float)
386
+
387
+ if acquisition2['do_cluster_computation'].iat[0] and acquisition1['do_cluster_computation'].iat[0] :
366
388
  if len(acquisition1['clusters'].iat[0]) > 0 and len(acquisition2['clusters'].iat[0]) > 0 :
367
389
  #clusters to clusters
368
- colocalisation_df[("clusters_to_clusters_count",coloc_name,"forward")] = colocalisation_df.apply(
390
+ colocalisation_df[("clustered_spots_with_clustered_spots_count",coloc_name_forward,"forward")] = colocalisation_df.apply(
369
391
  lambda x: spots_colocalisation(
370
- spot_list1= x[('cluster_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
371
- spot_list2= x[('cluster_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
372
- distance=colocalisation_distance + cluster_radius1 + cluster_radius2,
392
+ spot_list1= x[('clustered_spots_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
393
+ spot_list2= x[('clustered_spots_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
394
+ distance=colocalisation_distance,
373
395
  voxel_size=voxel_size
374
396
  ),axis=1
375
397
  )
376
- colocalisation_df[("clusters_to_clusters_fraction",coloc_name,"forward")] = colocalisation_df[("clusters_to_clusters_count",coloc_name,"forward")].astype(float) / colocalisation_df[('foci_number',acquisition_name_id1,acquisition_id1)].astype(float)
398
+ colocalisation_df[("clustered_spots_with_clustered_spots_fraction",coloc_name_forward,"forward")] = colocalisation_df[("clustered_spots_with_clustered_spots_count",coloc_name_forward,"forward")].astype(float) / colocalisation_df[('clustered_spot_number',acquisition_name_id1,acquisition_id1)].astype(float)
377
399
 
378
- colocalisation_df[("clusters_to_clusters_count",coloc_name,"backward")] = colocalisation_df.apply(
400
+ colocalisation_df[("clustered_spots_with_clustered_spots_count",coloc_name_backward,"backward")] = colocalisation_df.apply(
379
401
  lambda x: spots_colocalisation(
380
- spot_list1= x[('cluster_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
381
- spot_list2= x[('cluster_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
382
- distance=colocalisation_distance + cluster_radius1 + cluster_radius2,
402
+ spot_list1= x[('clustered_spots_coords',acquisition_name_id2,acquisition_id2)][:,:len(voxel_size)],
403
+ spot_list2= x[('clustered_spots_coords',acquisition_name_id1,acquisition_id1)][:,:len(voxel_size)],
404
+ distance=colocalisation_distance,
383
405
  voxel_size=voxel_size
384
406
  ),axis=1
385
407
  )
386
- colocalisation_df[("clusters_to_clusters_fraction",coloc_name,"backward")] = colocalisation_df[("clusters_to_clusters_count",coloc_name,"backward")].astype(float) / colocalisation_df[('foci_number',acquisition_name_id2,acquisition_id2)].astype(float)
408
+ colocalisation_df[("clustered_spots_with_clustered_spots_fraction",coloc_name_backward,"backward")] = colocalisation_df[("clustered_spots_with_clustered_spots_count",coloc_name_backward,"backward")].astype(float) / colocalisation_df[('clustered_spot_number',acquisition_name_id2,acquisition_id2)].astype(float)
387
409
 
388
410
  colocalisation_df = colocalisation_df.sort_index(axis=0).sort_index(axis=1, level=0)
389
411
 
390
- if 'cluster_coords' in cell_dataframe.columns : colocalisation_df = colocalisation_df.drop('cluster_coords', axis=1)
412
+ if 'clustered_spots_coords' in cell_dataframe.columns : colocalisation_df = colocalisation_df.drop('clustered_spots_coords', axis=1)
391
413
  colocalisation_df = colocalisation_df.drop('rna_coords', axis=1)
414
+ colocalisation_df['voxel_size'] = [voxel_size]*len(colocalisation_df)
415
+ colocalisation_df['pair_name'] = [(acquisition_name_id1, acquisition_name_id2)] * len(colocalisation_df)
416
+ colocalisation_df['pair_acquisition_id'] = [(acquisition_id1, acquisition_id2)] * len(colocalisation_df)
417
+ colocalisation_df['colocalisation_distance'] = colocalisation_distance
392
418
 
393
419
  return colocalisation_df
394
420
 
395
421
  @add_default_loading
396
- def launch_colocalisation(result_tables, result_dataframe, cell_result_dataframe, colocalisation_distance, global_coloc_df, cell_coloc_df: pd.DataFrame) :
422
+ def launch_colocalisation(result_tables, result_dataframe, cell_result_dataframe, colocalisation_distance, global_coloc_df, cell_coloc_df: dict) :
397
423
 
398
424
  acquisition1 = result_dataframe.iloc[result_tables[0]]
399
425
  acquisition2 = result_dataframe.iloc[result_tables[1]]
@@ -409,12 +435,11 @@ def launch_colocalisation(result_tables, result_dataframe, cell_result_dataframe
409
435
  cell_dataframe=cell_result_dataframe,
410
436
  colocalisation_distance=colocalisation_distance
411
437
  )
412
- cell_coloc_df = pd.concat([
413
- cell_coloc_df,
414
- new_coloc,
415
- ], axis=1).sort_index(axis=1, level=0)
416
-
417
- cell_coloc_df.index = cell_coloc_df.index.rename('cell_id')
438
+
439
+ index = 0
440
+ while (acquisition_id1, acquisition_id2, index) in cell_coloc_df.keys() :
441
+ index +=1
442
+ cell_coloc_df [(acquisition_id1,acquisition_id2, index)] = new_coloc
418
443
 
419
444
 
420
445
  else :
@@ -6,7 +6,7 @@ from ..gui.prompts import output_image_prompt, prompt_save_segmentation, prompt_
6
6
  from ..gui.prompts import ask_detection_confirmation, ask_cancel_detection, ask_confirmation
7
7
  from ..gui.prompts import rename_prompt
8
8
 
9
- from ..interface.inoutput import write_results
9
+ from ..interface.inoutput import write_results, write_list_of_results
10
10
  from ..interface.inoutput import input_segmentation, output_segmentation
11
11
 
12
12
  from ._preprocess import map_channels
@@ -49,9 +49,6 @@ def segment_cells(user_parameters : pipeline_parameters, nucleus_label, cytoplas
49
49
  return nucleus_label, cytoplasm_label, user_parameters
50
50
 
51
51
  def add_detection(user_parameters : pipeline_parameters, acquisition_id, cytoplasm_label, nucleus_label) :
52
- """
53
- #TODO : Separate segmentation from detection in pipeline.
54
- """
55
52
 
56
53
  new_results_df = pd.DataFrame()
57
54
  new_cell_results_df = pd.DataFrame()
@@ -89,7 +86,7 @@ def add_detection(user_parameters : pipeline_parameters, acquisition_id, cytopla
89
86
  nucleus_signal = get_nucleus_signal(image, other_image, user_parameters)
90
87
 
91
88
  try : # Catch error raised if user enter a spot size too small compare to voxel size
92
- user_parameters, frame_result, spots, clusters = launch_detection(
89
+ user_parameters, frame_result, spots, clusters, spots_cluster_id = launch_detection(
93
90
  image,
94
91
  other_image,
95
92
  user_parameters,
@@ -105,7 +102,7 @@ def add_detection(user_parameters : pipeline_parameters, acquisition_id, cytopla
105
102
  raise(error)
106
103
 
107
104
 
108
- if user_parameters['show_napari_corrector'] :
105
+ if user_parameters['show_napari_corrector'] or user_parameters['show_interactive_threshold_selector']:
109
106
  if ask_detection_confirmation(user_parameters.get('threshold')) : break
110
107
  else :
111
108
  break
@@ -118,6 +115,7 @@ def add_detection(user_parameters : pipeline_parameters, acquisition_id, cytopla
118
115
  user_parameters=user_parameters,
119
116
  image=image,
120
117
  spots=spots,
118
+ cluster_id= spots_cluster_id,
121
119
  nucleus_label= nucleus_label,
122
120
  cell_label= cytoplasm_label,
123
121
  )
@@ -129,6 +127,7 @@ def add_detection(user_parameters : pipeline_parameters, acquisition_id, cytopla
129
127
  nucleus_signal = nucleus_signal,
130
128
  spots=spots,
131
129
  clusters=clusters,
130
+ spots_cluster_id = spots_cluster_id,
132
131
  nucleus_label = nucleus_label,
133
132
  cell_label= cytoplasm_label,
134
133
  user_parameters=user_parameters,
@@ -222,7 +221,12 @@ def load_segmentation(nucleus_label, cytoplasm_label, segmentation_done) :
222
221
 
223
222
  return nucleus_label, cytoplasm_label, segmentation_done
224
223
 
225
- def save_results(result_df, cell_result_df, global_coloc_df, cell_coloc_df) :
224
+ def save_results(
225
+ result_df : pd.DataFrame,
226
+ cell_result_df : pd.DataFrame,
227
+ global_coloc_df : pd.DataFrame,
228
+ cell_coloc_df : dict, #TODO : Rename to cell_coloc_dict
229
+ ) :
226
230
  if len(result_df) != 0 :
227
231
  dic = output_image_prompt(filename=result_df.iloc[0].at['filename'])
228
232
 
@@ -238,7 +242,7 @@ def save_results(result_df, cell_result_df, global_coloc_df, cell_coloc_df) :
238
242
  sucess1 = write_results(result_df, path= path, filename=filename, do_excel= do_excel, do_feather= do_feather, do_csv=do_csv)
239
243
  sucess2 = write_results(cell_result_df, path= path, filename=filename + '_cell_result', do_excel= do_excel, do_feather= do_feather, do_csv=do_csv)
240
244
  sucess3 = write_results(global_coloc_df, path= path, filename=filename + 'global_coloc_result', do_excel= do_excel, do_feather= do_feather, do_csv=do_csv)
241
- sucess4 = write_results(cell_coloc_df, path= path, filename=filename + 'cell2cell_coloc_result', do_excel= do_excel, do_feather= do_feather, do_csv=do_csv, reset_index=False)
245
+ sucess4 = write_list_of_results(cell_coloc_df.values(), path= path, filename=filename + 'cell2cell_coloc_result', do_excel= do_excel, do_feather= do_feather, do_csv=do_csv)
242
246
  if all([sucess1,sucess2, sucess3, sucess4,]) : sg.popup("Sucessfully saved at {0}.".format(path))
243
247
 
244
248
  else :
@@ -266,7 +270,7 @@ def delete_acquisitions(selected_acquisitions : pd.DataFrame,
266
270
  result_df : pd.DataFrame,
267
271
  cell_result_df : pd.DataFrame,
268
272
  global_coloc_df : pd.DataFrame,
269
- cell_coloc_df : pd.DataFrame,
273
+ cell_coloc_df : dict,
270
274
  ) :
271
275
 
272
276
  if len(result_df) == 0 :
@@ -291,11 +295,14 @@ def delete_acquisitions(selected_acquisitions : pd.DataFrame,
291
295
  global_coloc_df = global_coloc_df.drop(coloc_df_drop_idx, axis=0)
292
296
 
293
297
  if len(cell_coloc_df) > 0 :
298
+ keys_to_delete = []
294
299
  for acquisition_id in acquisition_ids :
295
- cell_coloc_df = cell_coloc_df.drop(acquisition_id, axis=1, level=2) #Delete spot number and foci number
296
- coloc_columns = cell_coloc_df.columns.get_level_values(1)
297
- coloc_columns = coloc_columns[coloc_columns.str.contains(str(acquisition_id))]
298
- cell_coloc_df = cell_coloc_df.drop(labels=coloc_columns, axis=1, level=1)
300
+ for coloc_key in cell_coloc_df.keys() :
301
+ if acquisition_id in coloc_key :
302
+ keys_to_delete.append(coloc_key)
303
+
304
+ for key in keys_to_delete :
305
+ if key in cell_coloc_df.keys() : cell_coloc_df.pop(key)
299
306
 
300
307
  result_df = result_df.drop(result_drop_idx, axis=0)
301
308
 
@@ -306,7 +313,7 @@ def rename_acquisitions(
306
313
  result_df : pd.DataFrame,
307
314
  cell_result_df : pd.DataFrame,
308
315
  global_coloc_df : pd.DataFrame,
309
- cell_coloc_df : pd.DataFrame,
316
+ cell_coloc_df : dict,
310
317
  ) :
311
318
 
312
319
  if len(result_df) == 0 :
@@ -331,16 +338,19 @@ def rename_acquisitions(
331
338
  global_coloc_df.loc[global_coloc_df['acquisition_id_1'].isin(acquisition_ids), ['name1']] = name
332
339
  global_coloc_df.loc[global_coloc_df['acquisition_id_2'].isin(acquisition_ids), ['name2']] = name
333
340
  if len(cell_coloc_df) > 0 :
334
- target_columns = cell_coloc_df.columns.get_level_values(1)
335
- for old_name in old_names : #Note list was ordered by elmt len (decs) to avoid conflict when one name is contained by another one. if the shorter is processed first then the longer will not be able to be properly renamed.
336
- target_columns = target_columns.str.replace(old_name, name)
337
-
338
- new_columns = zip(
339
- cell_coloc_df.columns.get_level_values(0),
340
- target_columns,
341
- cell_coloc_df.columns.get_level_values(2),
342
- )
341
+ for key in cell_coloc_df.keys() :
342
+ df = cell_coloc_df[key]
343
+ target_columns = df.columns.get_level_values(1)
344
+ for old_name in old_names : #Note list was ordered by elmt len (decs) to avoid conflict when one name is contained by another one. if the shorter is processed first then the longer will not be able to be properly renamed.
345
+ target_columns = target_columns.str.replace(old_name, name)
346
+
347
+ new_columns = zip(
348
+ df.columns.get_level_values(0),
349
+ target_columns,
350
+ df.columns.get_level_values(2),
351
+ )
343
352
 
344
- cell_coloc_df.columns = pd.MultiIndex.from_tuples(new_columns)
353
+ df.columns = pd.MultiIndex.from_tuples(new_columns)
354
+ cell_coloc_df[key] = df
345
355
 
346
356
  return result_df, cell_result_df, global_coloc_df, cell_coloc_df
@@ -7,7 +7,7 @@ from ..hints import pipeline_parameters
7
7
  from ._preprocess import ParameterInputError
8
8
  from ._preprocess import check_integrity, convert_parameters_types
9
9
 
10
- from ..gui.napari import correct_spots, _update_clusters, threshold_selection
10
+ from ..gui.napari_visualiser import correct_spots, threshold_selection
11
11
  from ..gui import add_default_loading
12
12
  from ..gui import detection_parameters_promt
13
13
 
@@ -388,14 +388,36 @@ def _compute_cell_snr(image: np.ndarray, bbox, spots, voxel_size, spot_size) :
388
388
  return snr_dict
389
389
 
390
390
  @add_default_loading
391
- def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signal, cell_label, nucleus_label, user_parameters : pipeline_parameters) :
391
+ def launch_cell_extraction(
392
+ acquisition_id,
393
+ spots,
394
+ clusters,
395
+ spots_cluster_id,
396
+ image,
397
+ nucleus_signal,
398
+ cell_label,
399
+ nucleus_label,
400
+ user_parameters : pipeline_parameters
401
+ ) :
392
402
 
393
403
  #Extract parameters
394
404
  dim = user_parameters['dim']
395
405
  do_clustering = user_parameters['do_cluster_computation']
396
406
  voxel_size = user_parameters['voxel_size']
397
407
 
398
- if do_clustering : other_coords = {'clusters_coords' : clusters} if len(clusters) > 0 else None
408
+ if do_clustering :
409
+ if len(clusters) > 0 :
410
+
411
+ free_spots = spots[spots_cluster_id == -1]
412
+ clustered_spots = spots[spots_cluster_id != -1]
413
+
414
+ other_coords = {
415
+ 'clusters_coords' : clusters,
416
+ 'clustered_spots' : clustered_spots,
417
+ 'free_spots' : free_spots,
418
+ }
419
+ else :
420
+ other_coords = None
399
421
  else : other_coords = None
400
422
  if do_clustering : do_clustering = len(clusters) > 0
401
423
 
@@ -429,7 +451,8 @@ def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signa
429
451
  features_names += ['nucleus_mean_signal', 'nucleus_median_signal', 'nucleus_max_signal', 'nucleus_min_signal']
430
452
  features_names += ['snr_mean', 'snr_median', 'snr_std']
431
453
  features_names += ['cell_center_coord','foci_number','foci_in_nuc_number']
432
- features_names += ['rna_coords','cluster_coords']
454
+ features_names += ['rna_coords','cluster_coords', 'clustered_spots_coords', 'free_spots_coords']
455
+ features_names += ['clustered_spot_number', 'free_spot_number']
433
456
 
434
457
  result_frame = pd.DataFrame()
435
458
 
@@ -444,6 +467,8 @@ def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signa
444
467
  nuc_signal = nucleus_signal[min_y:max_y, min_x:max_x]
445
468
  rna_coords = cell['rna_coord']
446
469
  foci_coords = cell.get('clusters_coords')
470
+ clustered_spots_coords = cell.get('clustered_spots')
471
+ free_spots_coords = cell.get('free_spots')
447
472
  signal = cell['image']
448
473
 
449
474
  with np.errstate(divide= 'ignore', invalid= 'ignore') :
@@ -509,7 +534,8 @@ def launch_cell_extraction(acquisition_id, spots, clusters, image, nucleus_signa
509
534
  features += [cell_center, foci_number, foci_in_nuc_number]
510
535
 
511
536
  features = [acquisition_id, cell_id, cell_bbox] + features
512
- features += [rna_coords, foci_coords]
537
+ features += [rna_coords, foci_coords, clustered_spots_coords, free_spots_coords]
538
+ features += [len(clustered_spots_coords), len(free_spots_coords)]
513
539
 
514
540
  result_frame = pd.concat([
515
541
  result_frame,
@@ -527,20 +553,23 @@ def launch_clustering(spots, user_parameters : pipeline_parameters):
527
553
  nb_min_spots = user_parameters['min number of spots']
528
554
  cluster_size = user_parameters['cluster size']
529
555
 
530
- clusters = cluster_detection(
556
+ cluster_result_dict = cluster_detection(
531
557
  spots=spots,
532
558
  voxel_size=voxel_size,
533
559
  radius=cluster_size,
534
560
  nb_min_spots=nb_min_spots,
535
- keys_to_compute= 'clusters'
536
- )['clusters']
561
+ keys_to_compute= ['clusters','clustered_spots']
562
+ )
563
+
564
+ clusters = cluster_result_dict['clusters']
565
+ clustered_spots = cluster_result_dict['clustered_spots']
537
566
 
538
- return clusters
567
+ return clusters, clustered_spots
539
568
 
540
569
  def launch_detection(
541
570
  image,
542
571
  other_image,
543
- user_parameters,
572
+ user_parameters : pipeline_parameters,
544
573
  cell_label= None,
545
574
  nucleus_label = None,
546
575
  hide_loading=False,
@@ -575,10 +604,12 @@ def launch_detection(
575
604
  spots = launch_dense_region_deconvolution(image, spots, user_parameters, hide_loading = hide_loading)
576
605
 
577
606
  if do_clustering :
578
- clusters = launch_clustering(spots, user_parameters, hide_loading = hide_loading) #012 are coordinates #3 is number of spots per cluster, #4 is cluster index
579
- clusters = _update_clusters(clusters, spots, voxel_size=user_parameters['voxel_size'], cluster_size=user_parameters['cluster size'], shape=image.shape)
607
+ clusters, clustered_spots = launch_clustering(spots, user_parameters, hide_loading = hide_loading) #012 are coordinates #3 is number of spots per cluster, #4 is cluster index
608
+ spots, spots_cluster_id = clustered_spots[:,:-1], clustered_spots[:,-1]
580
609
 
581
- else : clusters = None
610
+ else :
611
+ clusters = None
612
+ spots_cluster_id = None
582
613
 
583
614
  user_parameters['threshold'] = threshold
584
615
 
@@ -589,19 +620,37 @@ def launch_detection(
589
620
  spots,
590
621
  user_parameters['voxel_size'],
591
622
  clusters=clusters,
623
+ spot_cluster_id = spots_cluster_id,
592
624
  cluster_size= user_parameters.get('cluster size'),
593
625
  min_spot_number= user_parameters.setdefault('min number of spots', 0),
594
626
  cell_label=cell_label,
595
627
  nucleus_label=nucleus_label,
596
628
  other_images=other_image
597
629
  )
630
+
631
+ if do_clustering :
632
+ spots, spots_cluster_id = spots[:,:-1], spots[:,-1]
633
+ else :
634
+ spots_cluster_id = None
635
+
598
636
  post_detection_dict = launch_post_detection(image, spots, user_parameters, hide_loading = hide_loading)
599
637
  fov_result.update(post_detection_dict)
600
638
 
601
- return user_parameters, fov_result, spots, clusters
639
+ return user_parameters, fov_result, spots, clusters, spots_cluster_id
602
640
 
603
641
 
604
- def launch_features_computation(acquisition_id, image, nucleus_signal, spots, clusters, nucleus_label, cell_label, user_parameters :pipeline_parameters, frame_results) :
642
+ def launch_features_computation(
643
+ acquisition_id,
644
+ image,
645
+ nucleus_signal,
646
+ spots,
647
+ clusters,
648
+ spots_cluster_id,
649
+ nucleus_label,
650
+ cell_label,
651
+ user_parameters : pipeline_parameters,
652
+ frame_results
653
+ ) :
605
654
 
606
655
  dim = image.ndim
607
656
  if user_parameters['do_cluster_computation'] :
@@ -618,6 +667,7 @@ def launch_features_computation(acquisition_id, image, nucleus_signal, spots, cl
618
667
  acquisition_id=acquisition_id,
619
668
  spots=spots,
620
669
  clusters=clusters,
670
+ spots_cluster_id = spots_cluster_id,
621
671
  image=image,
622
672
  nucleus_signal=nucleus_signal,
623
673
  cell_label= cell_label,
@@ -638,6 +688,7 @@ def launch_features_computation(acquisition_id, image, nucleus_signal, spots, cl
638
688
  frame_results['cell_number'] = NaN
639
689
  frame_results['spots'] = spots
640
690
  frame_results['clusters'] = clusters
691
+ frame_results['spots_cluster_id'] = spots_cluster_id
641
692
  frame_results.update(user_parameters)
642
693
  frame_results['threshold'] = user_parameters['threshold']
643
694
 
@@ -16,13 +16,13 @@ acquisition_id = -1
16
16
  result_df = pd.DataFrame(columns=['acquisition_id'])
17
17
  cell_result_df = pd.DataFrame(columns=['acquisition_id'])
18
18
  global_coloc_df = pd.DataFrame()
19
- cell_coloc_df = pd.DataFrame()
19
+ cell_coloc_df = dict()
20
20
  cytoplasm_label = None
21
21
  nucleus_label = None
22
22
 
23
23
  while True : #Break this loop to close small_fish
24
-
25
24
  try :
25
+ result_df = result_df.reset_index(drop=True)
26
26
  event, values = hub_prompt(result_df, user_parameters['segmentation_done'])
27
27
 
28
28
  if event == 'Add detection' :
@@ -34,8 +34,8 @@ while True : #Break this loop to close small_fish
34
34
  cytoplasm_label = cytoplasm_label,
35
35
  nucleus_label = nucleus_label,
36
36
  )
37
- result_df = pd.concat([result_df, new_result_df], axis=0)
38
- cell_result_df = pd.concat([cell_result_df, new_cell_result_df], axis=0)
37
+ result_df = pd.concat([result_df, new_result_df], axis=0).reset_index(drop=True)
38
+ cell_result_df = pd.concat([cell_result_df, new_cell_result_df], axis=0).reset_index(drop=True)
39
39
 
40
40
  elif event == 'Segment cells' :
41
41
  nucleus_label, cytoplasm_label, user_parameters = segment_cells(
@@ -73,10 +73,10 @@ while True : #Break this loop to close small_fish
73
73
  )
74
74
 
75
75
  elif event == "Reset all" :
76
- result_df = pd.DataFrame()
77
- cell_result_df = pd.DataFrame()
76
+ result_df = pd.DataFrame(columns=['acquisition_id'])
77
+ cell_result_df = pd.DataFrame(columns=['acquisition_id'])
78
78
  global_coloc_df = pd.DataFrame()
79
- cell_coloc_df = pd.DataFrame()
79
+ cell_coloc_df = dict()
80
80
  acquisition_id = -1
81
81
  user_parameters['segmentation_done'] = False
82
82
  cytoplasm_label = None
@@ -8,7 +8,7 @@ from ..hints import pipeline_parameters
8
8
  from ..gui.layout import _segmentation_layout
9
9
  from ..gui import prompt, prompt_with_help, ask_cancel_segmentation
10
10
  from ..interface import open_image
11
- from ..gui.napari import show_segmentation as napari_show_segmentation
11
+ from ..gui.napari_visualiser import show_segmentation as napari_show_segmentation
12
12
  from .utils import from_label_get_centeroidscoords
13
13
  from ._preprocess import ask_input_parameters
14
14
  from ._preprocess import map_channels, reorder_shape, reorder_image_stack
@@ -12,6 +12,7 @@ def launch_spots_extraction(
12
12
  user_parameters,
13
13
  image,
14
14
  spots,
15
+ cluster_id,
15
16
  nucleus_label,
16
17
  cell_label,
17
18
  ) :
@@ -19,6 +20,7 @@ def launch_spots_extraction(
19
20
  acquisition_id=acquisition_id,
20
21
  image=image,
21
22
  spots=spots,
23
+ cluster_id= cluster_id,
22
24
  nucleus_label=nucleus_label,
23
25
  cell_label=cell_label,
24
26
  )
@@ -38,6 +40,7 @@ def compute_Spots(
38
40
  acquisition_id : int,
39
41
  image : np.ndarray,
40
42
  spots : np.ndarray,
43
+ cluster_id : np.ndarray,
41
44
  nucleus_label = None,
42
45
  cell_label = None,
43
46
  ) :
@@ -45,6 +48,9 @@ def compute_Spots(
45
48
  if len(spots) == 0 :
46
49
  return pd.DataFrame()
47
50
 
51
+ if type(cluster_id) == type(None) : #When user doesn't select cluster
52
+ cluster_id = [np.NaN]*len(spots)
53
+
48
54
  index = list(zip(*spots))
49
55
  index = tuple(index)
50
56
  spot_intensities_list = list(image[index])
@@ -67,6 +73,7 @@ def compute_Spots(
67
73
  'cell_label' : cell_label_list,
68
74
  'in_nucleus' : in_nuc_list,
69
75
  'coordinates' : coord_list,
76
+ 'cluster_id' : cluster_id,
70
77
  })
71
78
 
72
79
  return Spots