celldetective 1.1.1.post1__py3-none-any.whl → 1.1.1.post4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
celldetective/io.py CHANGED
@@ -26,6 +26,7 @@ from stardist import fill_label_holes
26
26
  from celldetective.utils import interpolate_nan
27
27
  from scipy.interpolate import griddata
28
28
 
29
+
29
30
  def get_experiment_wells(experiment):
30
31
 
31
32
  """
@@ -801,6 +802,10 @@ def auto_load_number_of_frames(stack_path):
801
802
  """
802
803
 
803
804
  # Try to estimate automatically # frames
805
+
806
+ if stack_path is None:
807
+ return None
808
+
804
809
  stack_path = stack_path.replace('\\','/')
805
810
 
806
811
  with TiffFile(stack_path) as tif:
@@ -1370,6 +1375,14 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1370
1375
  exp_name = os.path.split(expfolder)[-1]
1371
1376
  print(exp_name)
1372
1377
 
1378
+ wells = get_experiment_wells(expfolder)
1379
+ well_idx = list(wells).index(str(parent1)+os.sep)
1380
+ ab = get_experiment_antibodies(expfolder)[well_idx]
1381
+ conc = get_experiment_concentrations(expfolder)[well_idx]
1382
+ ct = get_experiment_cell_types(expfolder)[well_idx]
1383
+ pa = get_experiment_pharmaceutical_agents(expfolder)[well_idx]
1384
+
1385
+
1373
1386
  spatial_calibration = float(ConfigSectionMap(config,"MovieSettings")["pxtoum"])
1374
1387
  channel_names, channel_indices = extract_experiment_channels(config)
1375
1388
 
@@ -1424,7 +1437,7 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1424
1437
  multichannel = np.array(multichannel)
1425
1438
  save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_roi_{xmin}_{xmax}_{ymin}_{ymax}_labelled.tif", labels_layer[xmin:xmax,ymin:ymax].astype(np.int16), axes='YX')
1426
1439
  save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_roi_{xmin}_{xmax}_{ymin}_{ymax}.tif", multichannel, axes='CYX')
1427
- info = {"spatial_calibration": spatial_calibration, "channels": list(channel_names)}
1440
+ info = {"spatial_calibration": spatial_calibration, "channels": list(channel_names), 'cell_type': ct, 'antibody': ab, 'concentration': conc, 'pharmaceutical_agent': pa}
1428
1441
  info_name = annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_roi_{xmin}_{xmax}_{ymin}_{ymax}.json"
1429
1442
  with open(info_name, 'w') as f:
1430
1443
  json.dump(info, f, indent=4)
@@ -1441,7 +1454,7 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1441
1454
  multichannel = np.array(multichannel)
1442
1455
  save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_labelled.tif", labels_layer, axes='YX')
1443
1456
  save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}.tif", multichannel, axes='CYX')
1444
- info = {"spatial_calibration": spatial_calibration, "channels": list(channel_names)}
1457
+ info = {"spatial_calibration": spatial_calibration, "channels": list(channel_names), 'cell_type': ct, 'antibody': ab, 'concentration': conc, 'pharmaceutical_agent': pa}
1445
1458
  info_name = annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}.json"
1446
1459
  with open(info_name, 'w') as f:
1447
1460
  json.dump(info, f, indent=4)
@@ -1481,6 +1494,59 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1481
1494
  del labels
1482
1495
  gc.collect()
1483
1496
 
1497
+ def correct_annotation(filename):
1498
+
1499
+ """
1500
+ New function to reannotate an annotation image in post, using napari and save update inplace.
1501
+ """
1502
+
1503
+ def export_labels():
1504
+ labels_layer = viewer.layers['segmentation'].data
1505
+ for t,im in enumerate(tqdm(labels_layer)):
1506
+
1507
+ try:
1508
+ im = auto_correct_masks(im)
1509
+ except Exception as e:
1510
+ print(e)
1511
+
1512
+ save_tiff_imagej_compatible(existing_lbl, im.astype(np.int16), axes='YX')
1513
+ print("The labels have been successfully rewritten.")
1514
+
1515
+ @magicgui(call_button='Save the modified labels')
1516
+ def save_widget():
1517
+ return export_labels()
1518
+
1519
+ img = imread(filename.replace('\\','/'))
1520
+ if img.ndim==3:
1521
+ img = np.moveaxis(img, 0, -1)
1522
+ elif img.ndim==2:
1523
+ img = img[:,:,np.newaxis]
1524
+
1525
+ existing_lbl = filename.replace('.tif','_labelled.tif')
1526
+ if os.path.exists(existing_lbl):
1527
+ labels = imread(existing_lbl)[np.newaxis,:,:].astype(int)
1528
+ else:
1529
+ labels = np.zeros_like(img[:,:,0]).astype(int)[np.newaxis,:,:]
1530
+
1531
+ stack = img[np.newaxis,:,:,:]
1532
+
1533
+ viewer = napari.Viewer()
1534
+ viewer.add_image(stack,channel_axis=-1,colormap=["gray"]*stack.shape[-1])
1535
+ viewer.add_labels(labels, name='segmentation',opacity=0.4)
1536
+ viewer.window.add_dock_widget(save_widget, area='right')
1537
+ viewer.show(block=True)
1538
+
1539
+ # temporary fix for slight napari memory leak
1540
+ for i in range(100):
1541
+ try:
1542
+ viewer.layers.pop()
1543
+ except:
1544
+ pass
1545
+ del viewer
1546
+ del stack
1547
+ del labels
1548
+ gc.collect()
1549
+
1484
1550
 
1485
1551
  def _view_on_napari(tracks=None, stack=None, labels=None):
1486
1552
 
@@ -1628,7 +1694,7 @@ def locate_segmentation_model(name):
1628
1694
  """
1629
1695
 
1630
1696
  main_dir = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0],"celldetective"])
1631
- modelpath = os.sep.join([main_dir, "models", "segmentation*", os.sep])
1697
+ modelpath = os.sep.join([main_dir, "models", "segmentation*"]) + os.sep
1632
1698
  print(f'Looking for {name} in {modelpath}')
1633
1699
  models = glob(modelpath+f'*{os.sep}')
1634
1700
 
@@ -48,14 +48,14 @@ def set_live_status(setA,setB,status, not_status_option):
48
48
 
49
49
  """
50
50
 
51
-
52
- if status is None:
51
+ print(f"Provided statuses: {status}...")
52
+ if status is None or status==["live_status","live_status"] or status==[None,None]:
53
53
  setA.loc[:,'live_status'] = 1
54
54
  setB.loc[:,'live_status'] = 1
55
55
  status = ['live_status', 'live_status']
56
56
  elif isinstance(status,list):
57
57
  assert len(status)==2,'Please provide only two columns to classify cells as alive or dead.'
58
- if status[0] is None:
58
+ if status[0] is None or status[0]=='live_status':
59
59
  setA.loc[:,'live_status'] = 1
60
60
  status[0] = 'live_status'
61
61
  elif status[0] is not None and isinstance(not_status_option,list):
@@ -63,7 +63,7 @@ def set_live_status(setA,setB,status, not_status_option):
63
63
  if not_status_option[0]:
64
64
  setA.loc[:,'not_'+status[0]] = [not a if a==0 or a==1 else np.nan for a in setA.loc[:,status[0]].values]
65
65
  status[0] = 'not_'+status[0]
66
- if status[1] is None:
66
+ if status[1] is None or status[1]=='live_status':
67
67
  setB.loc[:,'live_status'] = 1
68
68
  status[1] = 'live_status'
69
69
  elif status[1] is not None and isinstance(not_status_option,list):
@@ -380,16 +380,39 @@ def compute_neighborhood_at_position(pos, distance, population=['targets','effec
380
380
  if df_A_pkl is not None:
381
381
  pkl_columns = np.array(df_A_pkl.columns)
382
382
  neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
383
- cols = list(pkl_columns[neigh_columns]) + ['TRACK_ID','FRAME']
383
+ cols = list(pkl_columns[neigh_columns]) + ['FRAME']
384
+
385
+ if 'TRACK_ID' in list(pkl_columns):
386
+ cols.append('TRACK_ID')
387
+ on_cols = ['TRACK_ID','FRAME']
388
+ else:
389
+ cols.append('ID')
390
+ on_cols = ['ID','FRAME']
391
+
384
392
  print(f'Recover {cols} from the pickle file...')
385
- df_A = pd.merge(df_A, df_A_pkl.loc[:,cols], how="outer", on=['TRACK_ID','FRAME'])
386
- print(df_A.columns)
393
+ try:
394
+ df_A = pd.merge(df_A, df_A_pkl.loc[:,cols], how="outer", on=on_cols)
395
+ print(df_A.columns)
396
+ except Exception as e:
397
+ print(f'Failure to merge pickle and csv files: {e}')
398
+
387
399
  if df_B_pkl is not None and df_B is not None:
388
400
  pkl_columns = np.array(df_B_pkl.columns)
389
401
  neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
390
- cols = list(pkl_columns[neigh_columns]) + ['TRACK_ID','FRAME']
402
+ cols = list(pkl_columns[neigh_columns]) + ['FRAME']
403
+
404
+ if 'TRACK_ID' in list(pkl_columns):
405
+ cols.append('TRACK_ID')
406
+ on_cols = ['TRACK_ID','FRAME']
407
+ else:
408
+ cols.append('ID')
409
+ on_cols = ['ID','FRAME']
410
+
391
411
  print(f'Recover {cols} from the pickle file...')
392
- df_B = pd.merge(df_B, df_B_pkl.loc[:,cols], how="outer", on=['TRACK_ID','FRAME'])
412
+ try:
413
+ df_B = pd.merge(df_B, df_B_pkl.loc[:,cols], how="outer", on=on_cols)
414
+ except Exception as e:
415
+ print(f'Failure to merge pickle and csv files: {e}')
393
416
 
394
417
  if clear_neigh:
395
418
  unwanted = df_A.columns[df_A.columns.str.contains('neighborhood')]
@@ -408,17 +431,22 @@ def compute_neighborhood_at_position(pos, distance, population=['targets','effec
408
431
  elif neighborhood_kwargs['mode']=='self':
409
432
  neigh_col = f'neighborhood_self_circle_{d}_px'
410
433
 
411
- edge_filter_A = (df_A['POSITION_X'] > td)&(df_A['POSITION_Y'] > td)&(df_A['POSITION_Y'] < (img_shape[0] - td))&(df_A['POSITION_X'] < (img_shape[1] - td))
412
- edge_filter_B = (df_B['POSITION_X'] > td)&(df_B['POSITION_Y'] > td)&(df_B['POSITION_Y'] < (img_shape[0] - td))&(df_B['POSITION_X'] < (img_shape[1] - td))
413
- df_A.loc[~edge_filter_A, neigh_col] = np.nan
414
- df_B.loc[~edge_filter_B, neigh_col] = np.nan
434
+ # edge_filter_A = (df_A['POSITION_X'] > td)&(df_A['POSITION_Y'] > td)&(df_A['POSITION_Y'] < (img_shape[0] - td))&(df_A['POSITION_X'] < (img_shape[1] - td))
435
+ # edge_filter_B = (df_B['POSITION_X'] > td)&(df_B['POSITION_Y'] > td)&(df_B['POSITION_Y'] < (img_shape[0] - td))&(df_B['POSITION_X'] < (img_shape[1] - td))
436
+ # df_A.loc[~edge_filter_A, neigh_col] = np.nan
437
+ # df_B.loc[~edge_filter_B, neigh_col] = np.nan
415
438
 
439
+ print('Count neighborhood...')
416
440
  df_A = compute_neighborhood_metrics(df_A, neigh_col, metrics=['inclusive','exclusive','intermediate'], decompose_by_status=True)
417
441
  if neighborhood_kwargs['symmetrize']:
418
442
  df_B = compute_neighborhood_metrics(df_B, neigh_col, metrics=['inclusive','exclusive','intermediate'], decompose_by_status=True)
443
+ print('Done...')
419
444
 
420
- df_A = mean_neighborhood_before_event(df_A, neigh_col, event_time_col)
421
- df_A = mean_neighborhood_after_event(df_A, neigh_col, event_time_col)
445
+ if 'TRACK_ID' in list(df_A.columns):
446
+ print('Estimate average neighborhood before/after event...')
447
+ df_A = mean_neighborhood_before_event(df_A, neigh_col, event_time_col)
448
+ df_A = mean_neighborhood_after_event(df_A, neigh_col, event_time_col)
449
+ print('Done...')
422
450
 
423
451
  df_A.to_pickle(path_A.replace('.csv','.pkl'))
424
452
  if not population[0]==population[1]:
@@ -485,9 +513,14 @@ def compute_neighborhood_metrics(neigh_table, neigh_col, metrics=['inclusive','e
485
513
 
486
514
  neigh_table = neigh_table.reset_index(drop=True)
487
515
  if 'position' in list(neigh_table.columns):
488
- groupbycols = ['position','TRACK_ID']
516
+ groupbycols = ['position']
489
517
  else:
490
- groupbycols = ['TRACK_ID']
518
+ groupbycols = []
519
+ if 'TRACK_ID' in list(neigh_table.columns):
520
+ groupbycols.append('TRACK_ID')
521
+ else:
522
+ groupbycols.append('ID')
523
+
491
524
  neigh_table.sort_values(by=groupbycols+['FRAME'],inplace=True)
492
525
 
493
526
  for tid,group in neigh_table.groupby(groupbycols):
@@ -607,10 +640,16 @@ def mean_neighborhood_before_event(neigh_table, neigh_col, event_time_col, metri
607
640
  """
608
641
 
609
642
 
643
+ neigh_table = neigh_table.reset_index(drop=True)
610
644
  if 'position' in list(neigh_table.columns):
611
- groupbycols = ['position','TRACK_ID']
645
+ groupbycols = ['position']
612
646
  else:
613
- groupbycols = ['TRACK_ID']
647
+ groupbycols = []
648
+ if 'TRACK_ID' in list(neigh_table.columns):
649
+ groupbycols.append('TRACK_ID')
650
+ else:
651
+ groupbycols.append('ID')
652
+
614
653
  neigh_table.sort_values(by=groupbycols+['FRAME'],inplace=True)
615
654
  suffix = '_before_event'
616
655
 
@@ -681,10 +720,16 @@ def mean_neighborhood_after_event(neigh_table, neigh_col, event_time_col, metric
681
720
  """
682
721
 
683
722
 
723
+ neigh_table = neigh_table.reset_index(drop=True)
684
724
  if 'position' in list(neigh_table.columns):
685
- groupbycols = ['position','TRACK_ID']
725
+ groupbycols = ['position']
726
+ else:
727
+ groupbycols = []
728
+ if 'TRACK_ID' in list(neigh_table.columns):
729
+ groupbycols.append('TRACK_ID')
686
730
  else:
687
- groupbycols = ['TRACK_ID']
731
+ groupbycols.append('ID')
732
+
688
733
  neigh_table.sort_values(by=groupbycols+['FRAME'],inplace=True)
689
734
  suffix = '_after_event'
690
735
 
@@ -1096,6 +1141,8 @@ def compute_contact_neighborhood_at_position(pos, distance, population=['targets
1096
1141
 
1097
1142
  df_A, path_A = get_position_table(pos, population=population[0], return_path=True)
1098
1143
  df_B, path_B = get_position_table(pos, population=population[1], return_path=True)
1144
+ if df_A is None or df_B is None:
1145
+ return None
1099
1146
 
1100
1147
  df_A_pkl = get_position_pickle(pos, population=population[0], return_path=False)
1101
1148
  df_B_pkl = get_position_pickle(pos, population=population[1], return_path=False)
@@ -1103,16 +1150,39 @@ def compute_contact_neighborhood_at_position(pos, distance, population=['targets
1103
1150
  if df_A_pkl is not None:
1104
1151
  pkl_columns = np.array(df_A_pkl.columns)
1105
1152
  neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
1106
- cols = list(pkl_columns[neigh_columns]) + ['TRACK_ID','FRAME']
1153
+ cols = list(pkl_columns[neigh_columns]) + ['FRAME']
1154
+
1155
+ if 'TRACK_ID' in list(pkl_columns):
1156
+ cols.append('TRACK_ID')
1157
+ on_cols = ['TRACK_ID','FRAME']
1158
+ else:
1159
+ cols.append('ID')
1160
+ on_cols = ['ID','FRAME']
1161
+
1107
1162
  print(f'Recover {cols} from the pickle file...')
1108
- df_A = pd.merge(df_A, df_A_pkl.loc[:,cols], how="outer", on=['TRACK_ID','FRAME'])
1109
- print(df_A.columns)
1163
+ try:
1164
+ df_A = pd.merge(df_A, df_A_pkl.loc[:,cols], how="outer", on=on_cols)
1165
+ print(df_A.columns)
1166
+ except Exception as e:
1167
+ print(f'Failure to merge pickle and csv files: {e}')
1168
+
1110
1169
  if df_B_pkl is not None and df_B is not None:
1111
1170
  pkl_columns = np.array(df_B_pkl.columns)
1112
1171
  neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
1113
- cols = list(pkl_columns[neigh_columns]) + ['TRACK_ID','FRAME']
1172
+ cols = list(pkl_columns[neigh_columns]) + ['FRAME']
1173
+
1174
+ if 'TRACK_ID' in list(pkl_columns):
1175
+ cols.append('TRACK_ID')
1176
+ on_cols = ['TRACK_ID','FRAME']
1177
+ else:
1178
+ cols.append('ID')
1179
+ on_cols = ['ID','FRAME']
1180
+
1114
1181
  print(f'Recover {cols} from the pickle file...')
1115
- df_B = pd.merge(df_B, df_B_pkl.loc[:,cols], how="outer", on=['TRACK_ID','FRAME'])
1182
+ try:
1183
+ df_B = pd.merge(df_B, df_B_pkl.loc[:,cols], how="outer", on=on_cols)
1184
+ except Exception as e:
1185
+ print(f'Failure to merge pickle and csv files: {e}')
1116
1186
 
1117
1187
  labelsA = locate_labels(pos, population=population[0])
1118
1188
  if population[1]==population[0]:
@@ -69,6 +69,7 @@ def estimate_background_per_condition(experiment, threshold_on_std=1, well_optio
69
69
  ... print(bg["well"], bg["bg"].shape)
70
70
  """
71
71
 
72
+
72
73
  config = get_config(experiment)
73
74
  wells = get_experiment_wells(experiment)
74
75
  len_movie = float(ConfigSectionMap(config,"MovieSettings")["len_movie"])
@@ -79,7 +80,7 @@ def estimate_background_per_condition(experiment, threshold_on_std=1, well_optio
79
80
  channel_indices = _extract_channel_indices_from_config(config, [target_channel])
80
81
  nbr_channels = _extract_nbr_channels_from_config(config)
81
82
  img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
82
-
83
+
83
84
  backgrounds = []
84
85
 
85
86
  for k, well_path in enumerate(tqdm(wells[well_indices], disable=not show_progress_per_well)):
@@ -95,51 +96,65 @@ def estimate_background_per_condition(experiment, threshold_on_std=1, well_optio
95
96
  for l,pos_path in enumerate(tqdm(positions, disable=not show_progress_per_pos)):
96
97
 
97
98
  stack_path = get_position_movie_path(pos_path, prefix=movie_prefix)
99
+ if stack_path is not None:
100
+ len_movie_auto = auto_load_number_of_frames(stack_path)
101
+ if len_movie_auto is not None:
102
+ len_movie = len_movie_auto
103
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
98
104
 
99
- if mode=="timeseries":
100
-
101
- frames = load_frames(img_num_channels[0,frame_range[0]:frame_range[1]], stack_path, normalize_input=False)
102
- frames = np.moveaxis(frames, -1, 0).astype(float)
103
-
104
- for i in range(len(frames)):
105
- if np.all(frames[i].flatten()==0):
106
- frames[i,:,:] = np.nan
107
-
108
- frame_mean = np.nanmean(frames, axis=0)
109
-
110
- frame = frame_mean.copy().astype(float)
111
- std_frame = filter_image(frame.copy(),filters=activation_protocol)
112
- edge = estimate_unreliable_edge(activation_protocol)
113
- mask = threshold_image(std_frame, threshold_on_std, 1.0E06, foreground_value=1, edge_exclusion=edge)
114
- frame[np.where(mask.astype(int)==1)] = np.nan
115
-
116
- elif mode=="tiles":
117
-
118
- frames = load_frames(img_num_channels[0,:], stack_path, normalize_input=False).astype(float)
119
- frames = np.moveaxis(frames, -1, 0).astype(float)
105
+ if mode=="timeseries":
120
106
 
121
- for i in range(len(frames)):
107
+ frames = load_frames(img_num_channels[0,frame_range[0]:frame_range[1]], stack_path, normalize_input=False)
108
+ frames = np.moveaxis(frames, -1, 0).astype(float)
122
109
 
123
- if np.all(frames[i].flatten()==0):
124
- frames[i,:,:] = np.nan
125
- continue
110
+ for i in range(len(frames)):
111
+ if np.all(frames[i].flatten()==0):
112
+ frames[i,:,:] = np.nan
126
113
 
127
- f = frames[i].copy()
128
- std_frame = filter_image(f.copy(),filters=activation_protocol)
114
+ frame_mean = np.nanmean(frames, axis=0)
115
+
116
+ frame = frame_mean.copy().astype(float)
117
+ std_frame = filter_image(frame.copy(),filters=activation_protocol)
129
118
  edge = estimate_unreliable_edge(activation_protocol)
130
- mask = threshold_image(std_frame, threshold_on_std, 1.0E06, foreground_value=1, edge_exclusion=edge)
131
- f[np.where(mask.astype(int)==1)] = np.nan
132
-
133
- frames[i,:,:] = f
134
-
135
- frame = np.nanmedian(frames, axis=0)
119
+ mask = threshold_image(std_frame, threshold_on_std, np.inf, foreground_value=1, edge_exclusion=edge)
120
+ frame[np.where(mask.astype(int)==1)] = np.nan
121
+
122
+ elif mode=="tiles":
123
+
124
+ frames = load_frames(img_num_channels[0,:], stack_path, normalize_input=False).astype(float)
125
+ frames = np.moveaxis(frames, -1, 0).astype(float)
126
+
127
+ new_frames = []
128
+ for i in range(len(frames)):
129
+
130
+ if np.all(frames[i].flatten()==0):
131
+ empty_frame = np.zeros_like(frames[i])
132
+ empty_frame[:,:] = np.nan
133
+ new_frames.append(empty_frame)
134
+ continue
135
+
136
+ f = frames[i].copy()
137
+ std_frame = filter_image(f.copy(),filters=activation_protocol)
138
+ edge = estimate_unreliable_edge(activation_protocol)
139
+ mask = threshold_image(std_frame, threshold_on_std, np.inf, foreground_value=1, edge_exclusion=edge)
140
+ f[np.where(mask.astype(int)==1)] = np.nan
141
+ new_frames.append(f.copy())
142
+
143
+ frame = np.nanmedian(new_frames, axis=0)
144
+ else:
145
+ print(f'Stack not found for position {pos_path}...')
146
+ frame = []
136
147
 
137
148
  # store
138
149
  frame_mean_per_position.append(frame)
139
150
 
140
- background = np.nanmedian(frame_mean_per_position,axis=0)
141
- backgrounds.append({"bg": background, "well": well_path})
142
- print(f"Background successfully computed for well {well_name}...")
151
+ try:
152
+ background = np.nanmedian(frame_mean_per_position,axis=0)
153
+ backgrounds.append({"bg": background, "well": well_path})
154
+ print(f"Background successfully computed for well {well_name}...")
155
+ except Exception as e:
156
+ print(e)
157
+ backgrounds.append(None)
143
158
 
144
159
  return backgrounds
145
160
 
@@ -261,28 +276,35 @@ def correct_background_model_free(
261
276
 
262
277
  stack_path = get_position_movie_path(pos_path, prefix=movie_prefix)
263
278
  print(f'Applying the correction to position {extract_position_name(pos_path)}...')
264
-
265
- corrected_stack = apply_background_to_stack(stack_path,
266
- background,
267
- target_channel_index=channel_indices[0],
268
- nbr_channels=nbr_channels,
269
- stack_length=len_movie,
270
- threshold_on_std=threshold_on_std,
271
- optimize_option=optimize_option,
272
- opt_coef_range=opt_coef_range,
273
- opt_coef_nbr=opt_coef_nbr,
274
- operation=operation,
275
- clip=clip,
276
- export=export,
277
- activation_protocol=activation_protocol,
278
- prefix=export_prefix,
279
- )
280
- print('Correction successful.')
281
- if return_stacks:
282
- stacks.append(corrected_stack)
279
+ if stack_path is not None:
280
+ len_movie_auto = auto_load_number_of_frames(stack_path)
281
+ if len_movie_auto is not None:
282
+ len_movie = len_movie_auto
283
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
284
+
285
+ corrected_stack = apply_background_to_stack(stack_path,
286
+ background,
287
+ target_channel_index=channel_indices[0],
288
+ nbr_channels=nbr_channels,
289
+ stack_length=len_movie,
290
+ threshold_on_std=threshold_on_std,
291
+ optimize_option=optimize_option,
292
+ opt_coef_range=opt_coef_range,
293
+ opt_coef_nbr=opt_coef_nbr,
294
+ operation=operation,
295
+ clip=clip,
296
+ export=export,
297
+ activation_protocol=activation_protocol,
298
+ prefix=export_prefix,
299
+ )
300
+ print('Correction successful.')
301
+ if return_stacks:
302
+ stacks.append(corrected_stack)
303
+ else:
304
+ del corrected_stack
305
+ collect()
283
306
  else:
284
- del corrected_stack
285
- collect()
307
+ stacks.append(None)
286
308
 
287
309
  if return_stacks:
288
310
  return stacks
@@ -371,18 +393,25 @@ def apply_background_to_stack(stack_path, background, target_channel_index=0, nb
371
393
 
372
394
  std_frame = filter_image(target_copy.copy(),filters=activation_protocol)
373
395
  edge = estimate_unreliable_edge(activation_protocol)
374
- mask = threshold_image(std_frame, threshold_on_std, 1.0E06, foreground_value=1, edge_exclusion=edge)
396
+ mask = threshold_image(std_frame, threshold_on_std, np.inf, foreground_value=1, edge_exclusion=edge)
375
397
  target_copy[np.where(mask.astype(int)==1)] = np.nan
376
398
 
377
399
  loss = []
378
400
 
379
401
  # brute-force regression, could do gradient descent instead
380
402
  for c in coefficients:
403
+
381
404
  target_crop = unpad(target_copy,edge)
382
405
  bg_crop = unpad(background, edge)
383
- diff = np.subtract(target_crop, c*bg_crop, where=target_crop==target_crop)
384
- s = np.sum(np.abs(diff, where=diff==diff), where=diff==diff)
406
+
407
+ roi = np.zeros_like(target_crop).astype(int)
408
+ roi[target_crop!=target_crop] = 1
409
+ roi[bg_crop!=bg_crop] = 1
410
+
411
+ diff = np.subtract(target_crop, c*bg_crop, where=roi==0)
412
+ s = np.sum(np.abs(diff, where=roi==0), where=roi==0)
385
413
  loss.append(s)
414
+
386
415
  c = coefficients[np.argmin(loss)]
387
416
  print(f"Frame: {i}; optimal coefficient: {c}...")
388
417
  # if c==min(coefficients) or c==max(coefficients):
@@ -758,7 +787,10 @@ def correct_background_model(
758
787
 
759
788
  stack_path = get_position_movie_path(pos_path, prefix=movie_prefix)
760
789
  print(f'Applying the correction to position {extract_position_name(pos_path)}...')
761
- print(stack_path)
790
+ len_movie_auto = auto_load_number_of_frames(stack_path)
791
+ if len_movie_auto is not None:
792
+ len_movie = len_movie_auto
793
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
762
794
 
763
795
  corrected_stack = fit_and_apply_model_background_to_stack(stack_path,
764
796
  target_channel_index=channel_indices[0],
@@ -954,7 +986,7 @@ def field_correction(img, threshold_on_std=1, operation='divide', model='parabol
954
986
 
955
987
  std_frame = filter_image(target_copy,filters=activation_protocol)
956
988
  edge = estimate_unreliable_edge(activation_protocol)
957
- mask = threshold_image(std_frame, threshold_on_std, 1.0E06, foreground_value=1, edge_exclusion=edge).astype(int)
989
+ mask = threshold_image(std_frame, threshold_on_std, np.inf, foreground_value=1, edge_exclusion=edge).astype(int)
958
990
  background = fit_background_model(img, cell_masks=mask, model=model, edge_exclusion=edge)
959
991
 
960
992
  if operation=="divide":
@@ -148,6 +148,7 @@ def segment_index(indices):
148
148
  model.config.use_gpu = use_gpu
149
149
  model.use_gpu = use_gpu
150
150
  print(f"StarDist model {modelname} successfully loaded.")
151
+ scale_model = scale
151
152
 
152
153
  elif model_type=='cellpose':
153
154
 
@@ -60,17 +60,10 @@ batch_size = training_instructions['batch_size']
60
60
 
61
61
  # Load dataset
62
62
  print(f'Datasets: {datasets}')
63
- X,Y = load_image_dataset(datasets, target_channels, train_spatial_calibration=spatial_calibration,
63
+ X,Y,filenames = load_image_dataset(datasets, target_channels, train_spatial_calibration=spatial_calibration,
64
64
  mask_suffix='labelled')
65
65
  print('Dataset loaded...')
66
66
 
67
- # Normalize images
68
- # X = normalize_per_channel(X,
69
- # normalization_percentile_mode=normalization_percentile,
70
- # normalization_values=normalization_values,
71
- # normalization_clipping=normalization_clip
72
- # )
73
-
74
67
  values = []
75
68
  percentiles = []
76
69
  for k in range(len(normalization_percentile)):
@@ -88,17 +81,6 @@ for k in range(len(X)):
88
81
  x_interp = np.moveaxis([interpolate_nan(x[:,:,c].copy()) for c in range(x.shape[-1])],0,-1)
89
82
  X[k] = x_interp
90
83
 
91
- # for x in X[:10]:
92
- # plt.imshow(x[:,:,0])
93
- # plt.colorbar()
94
- # plt.pause(2)
95
- # plt.close()
96
-
97
- # plt.imshow(x[:,:,1])
98
- # plt.colorbar()
99
- # plt.pause(2)
100
- # plt.close()
101
-
102
84
  Y = [fill_label_holes(y) for y in tqdm(Y)]
103
85
 
104
86
  assert len(X) > 1, "not enough training data"
@@ -107,7 +89,11 @@ ind = rng.permutation(len(X))
107
89
  n_val = max(1, int(round(validation_split * len(ind))))
108
90
  ind_train, ind_val = ind[:-n_val], ind[-n_val:]
109
91
  X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]
110
- X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]
92
+ X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]
93
+
94
+ files_train = [filenames[i] for i in ind_train]
95
+ files_val = [filenames[i] for i in ind_val]
96
+
111
97
  print('number of images: %3d' % len(X))
112
98
  print('- training: %3d' % len(X_trn))
113
99
  print('- validation: %3d' % len(X_val))
@@ -134,7 +120,10 @@ if model_type=='cellpose':
134
120
  import torch
135
121
 
136
122
  if not use_gpu:
123
+ print('Using CPU for training...')
137
124
  device = torch.device("cpu")
125
+ else:
126
+ print('Using GPU for training...')
138
127
 
139
128
  logger, log_file = logger_setup()
140
129
  print(f'Pretrained model: ',pretrained)
@@ -163,7 +152,7 @@ if model_type=='cellpose':
163
152
  config_inputs = {"channels": target_channels, "diameter": standard_diameter, 'cellprob_threshold': 0., 'flow_threshold': 0.4,
164
153
  'normalization_percentile': normalization_percentile, 'normalization_clip': normalization_clip,
165
154
  'normalization_values': normalization_values, 'model_type': 'cellpose',
166
- 'spatial_calibration': input_spatial_calibration}
155
+ 'spatial_calibration': input_spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
167
156
  json_input_config = json.dumps(config_inputs, indent=4)
168
157
  with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile:
169
158
  outfile.write(json_input_config)
@@ -234,7 +223,7 @@ elif model_type=='stardist':
234
223
 
235
224
  config_inputs = {"channels": target_channels, 'normalization_percentile': normalization_percentile,
236
225
  'normalization_clip': normalization_clip, 'normalization_values': normalization_values,
237
- 'model_type': 'stardist', 'spatial_calibration': spatial_calibration}
226
+ 'model_type': 'stardist', 'spatial_calibration': spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
238
227
 
239
228
  json_input_config = json.dumps(config_inputs, indent=4)
240
229
  with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile: