celldetective 1.3.9.post5__py3-none-any.whl → 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. celldetective/__init__.py +0 -3
  2. celldetective/_version.py +1 -1
  3. celldetective/events.py +2 -4
  4. celldetective/exceptions.py +11 -0
  5. celldetective/extra_properties.py +132 -0
  6. celldetective/filters.py +7 -1
  7. celldetective/gui/InitWindow.py +37 -46
  8. celldetective/gui/__init__.py +3 -9
  9. celldetective/gui/about.py +19 -15
  10. celldetective/gui/analyze_block.py +34 -19
  11. celldetective/gui/base_annotator.py +786 -0
  12. celldetective/gui/base_components.py +23 -0
  13. celldetective/gui/classifier_widget.py +86 -94
  14. celldetective/gui/configure_new_exp.py +163 -46
  15. celldetective/gui/control_panel.py +76 -146
  16. celldetective/gui/{signal_annotator.py → event_annotator.py} +533 -1438
  17. celldetective/gui/generic_signal_plot.py +11 -13
  18. celldetective/gui/gui_utils.py +54 -23
  19. celldetective/gui/help/neighborhood.json +2 -2
  20. celldetective/gui/json_readers.py +5 -4
  21. celldetective/gui/layouts.py +265 -31
  22. celldetective/gui/{signal_annotator2.py → pair_event_annotator.py} +433 -635
  23. celldetective/gui/plot_measurements.py +21 -17
  24. celldetective/gui/plot_signals_ui.py +125 -72
  25. celldetective/gui/process_block.py +283 -188
  26. celldetective/gui/processes/compute_neighborhood.py +594 -0
  27. celldetective/gui/processes/downloader.py +37 -34
  28. celldetective/gui/processes/measure_cells.py +19 -8
  29. celldetective/gui/processes/segment_cells.py +47 -11
  30. celldetective/gui/processes/track_cells.py +18 -13
  31. celldetective/gui/seg_model_loader.py +21 -62
  32. celldetective/gui/settings/__init__.py +7 -0
  33. celldetective/gui/settings/_settings_base.py +70 -0
  34. celldetective/gui/{retrain_signal_model_options.py → settings/_settings_event_model_training.py} +54 -109
  35. celldetective/gui/{measurement_options.py → settings/_settings_measurements.py} +54 -92
  36. celldetective/gui/{neighborhood_options.py → settings/_settings_neighborhood.py} +10 -13
  37. celldetective/gui/settings/_settings_segmentation.py +49 -0
  38. celldetective/gui/{retrain_segmentation_model_options.py → settings/_settings_segmentation_model_training.py} +38 -92
  39. celldetective/gui/{signal_annotator_options.py → settings/_settings_signal_annotator.py} +78 -103
  40. celldetective/gui/{btrack_options.py → settings/_settings_tracking.py} +85 -116
  41. celldetective/gui/styles.py +2 -1
  42. celldetective/gui/survival_ui.py +49 -95
  43. celldetective/gui/tableUI.py +53 -25
  44. celldetective/gui/table_ops/__init__.py +0 -0
  45. celldetective/gui/table_ops/merge_groups.py +118 -0
  46. celldetective/gui/thresholds_gui.py +617 -1221
  47. celldetective/gui/viewers.py +107 -42
  48. celldetective/gui/workers.py +8 -4
  49. celldetective/io.py +137 -57
  50. celldetective/links/zenodo.json +145 -144
  51. celldetective/measure.py +94 -53
  52. celldetective/neighborhood.py +342 -268
  53. celldetective/preprocessing.py +56 -35
  54. celldetective/regionprops/_regionprops.py +16 -5
  55. celldetective/relative_measurements.py +50 -29
  56. celldetective/scripts/analyze_signals.py +4 -1
  57. celldetective/scripts/measure_cells.py +5 -5
  58. celldetective/scripts/measure_relative.py +20 -12
  59. celldetective/scripts/segment_cells.py +4 -10
  60. celldetective/scripts/segment_cells_thresholds.py +3 -3
  61. celldetective/scripts/track_cells.py +10 -8
  62. celldetective/scripts/train_segmentation_model.py +18 -6
  63. celldetective/signals.py +29 -14
  64. celldetective/tracking.py +14 -3
  65. celldetective/utils.py +91 -62
  66. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/METADATA +24 -16
  67. celldetective-1.4.1.dist-info/RECORD +123 -0
  68. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/WHEEL +1 -1
  69. tests/gui/__init__.py +0 -0
  70. tests/gui/test_new_project.py +228 -0
  71. tests/gui/test_project.py +99 -0
  72. tests/test_preprocessing.py +2 -2
  73. celldetective/models/segmentation_effectors/ricm_bf_all_last/config_input.json +0 -79
  74. celldetective/models/segmentation_effectors/ricm_bf_all_last/ricm_bf_all_last +0 -0
  75. celldetective/models/segmentation_effectors/ricm_bf_all_last/training_instructions.json +0 -37
  76. celldetective/models/segmentation_effectors/test-transfer/config_input.json +0 -39
  77. celldetective/models/segmentation_effectors/test-transfer/test-transfer +0 -0
  78. celldetective/models/signal_detection/NucCond/classification_loss.png +0 -0
  79. celldetective/models/signal_detection/NucCond/classifier.h5 +0 -0
  80. celldetective/models/signal_detection/NucCond/config_input.json +0 -1
  81. celldetective/models/signal_detection/NucCond/log_classifier.csv +0 -126
  82. celldetective/models/signal_detection/NucCond/log_regressor.csv +0 -282
  83. celldetective/models/signal_detection/NucCond/regression_loss.png +0 -0
  84. celldetective/models/signal_detection/NucCond/regressor.h5 +0 -0
  85. celldetective/models/signal_detection/NucCond/scores.npy +0 -0
  86. celldetective/models/signal_detection/NucCond/test_confusion_matrix.png +0 -0
  87. celldetective/models/signal_detection/NucCond/test_regression.png +0 -0
  88. celldetective/models/signal_detection/NucCond/validation_confusion_matrix.png +0 -0
  89. celldetective/models/signal_detection/NucCond/validation_regression.png +0 -0
  90. celldetective-1.3.9.post5.dist-info/RECORD +0 -129
  91. tests/test_qt.py +0 -103
  92. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/entry_points.txt +0 -0
  93. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info/licenses}/LICENSE +0 -0
  94. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/top_level.txt +0 -0
celldetective/io.py CHANGED
@@ -24,12 +24,17 @@ from magicgui import magicgui
24
24
  from pathlib import Path, PurePath
25
25
  from shutil import copyfile, rmtree
26
26
 
27
- from celldetective.utils import _rearrange_multichannel_frame, _fix_no_contrast, zoom_multiframes,ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file
28
- from celldetective.utils import interpolate_nan_multichannel, _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel, get_config
27
+ from celldetective.utils import _rearrange_multichannel_frame, _fix_no_contrast, zoom_multiframes, \
28
+ config_section_to_dict, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, \
29
+ download_zenodo_file
30
+ from celldetective.utils import interpolate_nan_multichannel, get_config
29
31
 
30
32
  from stardist import fill_label_holes
31
33
  from skimage.transform import resize
34
+ import re
32
35
 
36
+ from typing import List, Tuple, Union
37
+ import numbers
33
38
 
34
39
  def extract_experiment_from_well(well_path):
35
40
 
@@ -202,6 +207,9 @@ def collect_experiment_metadata(pos_path=None, well_path=None):
202
207
  if not well_path.endswith(os.sep):
203
208
  well_path += os.sep
204
209
  experiment = extract_experiment_from_well(well_path)
210
+ else:
211
+ print("Please provide a position or well path...")
212
+ return None
205
213
 
206
214
  wells = list(get_experiment_wells(experiment))
207
215
  idx = wells.index(well_path)
@@ -211,7 +219,14 @@ def collect_experiment_metadata(pos_path=None, well_path=None):
211
219
  else:
212
220
  pos_name = 0
213
221
 
214
- dico = {"pos_path": pos_path, "position": pos_path, "pos_name": pos_name, "well_path": well_path, "well_name": well_name, "well_nbr": well_nbr, "experiment": experiment}
222
+ dico = {"pos_path": pos_path,
223
+ "position": pos_path,
224
+ "pos_name": pos_name,
225
+ "well_path": well_path,
226
+ "well_name": well_name,
227
+ "well_nbr": well_nbr,
228
+ "experiment": experiment,
229
+ }
215
230
 
216
231
  meta = get_experiment_metadata(experiment) # None or dict of metadata
217
232
  if meta is not None:
@@ -306,9 +321,9 @@ def get_spatial_calibration(experiment):
306
321
  """
307
322
 
308
323
  config = get_config(experiment)
309
- PxToUm = float(ConfigSectionMap(config, "MovieSettings")["pxtoum"])
324
+ px_to_um = float(config_section_to_dict(config, "MovieSettings")["pxtoum"])
310
325
 
311
- return PxToUm
326
+ return px_to_um
312
327
 
313
328
 
314
329
  def get_temporal_calibration(experiment):
@@ -351,14 +366,14 @@ def get_temporal_calibration(experiment):
351
366
  """
352
367
 
353
368
  config = get_config(experiment)
354
- FrameToMin = float(ConfigSectionMap(config, "MovieSettings")["frametomin"])
369
+ frame_to_min = float(config_section_to_dict(config, "MovieSettings")["frametomin"])
355
370
 
356
- return FrameToMin
371
+ return frame_to_min
357
372
 
358
373
  def get_experiment_metadata(experiment):
359
374
 
360
375
  config = get_config(experiment)
361
- metadata = ConfigSectionMap(config, "Metadata")
376
+ metadata = config_section_to_dict(config, "Metadata")
362
377
  return metadata
363
378
 
364
379
  def get_experiment_labels(experiment):
@@ -367,12 +382,12 @@ def get_experiment_labels(experiment):
367
382
  wells = get_experiment_wells(experiment)
368
383
  nbr_of_wells = len(wells)
369
384
 
370
- labels = ConfigSectionMap(config, "Labels")
385
+ labels = config_section_to_dict(config, "Labels")
371
386
  for k in list(labels.keys()):
372
387
  values = labels[k].split(',')
373
388
  if nbr_of_wells != len(values):
374
389
  values = [str(s) for s in np.linspace(0, nbr_of_wells - 1, nbr_of_wells)]
375
- if np.all([s.isnumeric() for s in values]):
390
+ if np.all(np.array([s.isnumeric() for s in values])):
376
391
  values = [float(s) for s in values]
377
392
  labels.update({k: values})
378
393
 
@@ -427,7 +442,7 @@ def get_experiment_concentrations(experiment, dtype=str):
427
442
  wells = get_experiment_wells(experiment)
428
443
  nbr_of_wells = len(wells)
429
444
 
430
- concentrations = ConfigSectionMap(config, "Labels")["concentrations"].split(",")
445
+ concentrations = config_section_to_dict(config, "Labels")["concentrations"].split(",")
431
446
  if nbr_of_wells != len(concentrations):
432
447
  concentrations = [str(s) for s in np.linspace(0, nbr_of_wells - 1, nbr_of_wells)]
433
448
 
@@ -482,7 +497,7 @@ def get_experiment_cell_types(experiment, dtype=str):
482
497
  wells = get_experiment_wells(experiment)
483
498
  nbr_of_wells = len(wells)
484
499
 
485
- cell_types = ConfigSectionMap(config, "Labels")["cell_types"].split(",")
500
+ cell_types = config_section_to_dict(config, "Labels")["cell_types"].split(",")
486
501
  if nbr_of_wells != len(cell_types):
487
502
  cell_types = [str(s) for s in np.linspace(0, nbr_of_wells - 1, nbr_of_wells)]
488
503
 
@@ -534,7 +549,7 @@ def get_experiment_antibodies(experiment, dtype=str):
534
549
  wells = get_experiment_wells(experiment)
535
550
  nbr_of_wells = len(wells)
536
551
 
537
- antibodies = ConfigSectionMap(config, "Labels")["antibodies"].split(",")
552
+ antibodies = config_section_to_dict(config, "Labels")["antibodies"].split(",")
538
553
  if nbr_of_wells != len(antibodies):
539
554
  antibodies = [str(s) for s in np.linspace(0, nbr_of_wells - 1, nbr_of_wells)]
540
555
 
@@ -589,14 +604,25 @@ def get_experiment_pharmaceutical_agents(experiment, dtype=str):
589
604
  wells = get_experiment_wells(experiment)
590
605
  nbr_of_wells = len(wells)
591
606
 
592
- pharmaceutical_agents = ConfigSectionMap(config, "Labels")["pharmaceutical_agents"].split(",")
607
+ pharmaceutical_agents = config_section_to_dict(config, "Labels")["pharmaceutical_agents"].split(",")
593
608
  if nbr_of_wells != len(pharmaceutical_agents):
594
609
  pharmaceutical_agents = [str(s) for s in np.linspace(0, nbr_of_wells - 1, nbr_of_wells)]
595
610
 
596
611
  return np.array([dtype(c) for c in pharmaceutical_agents])
597
612
 
598
613
 
599
- def interpret_wells_and_positions(experiment, well_option, position_option):
614
+ def get_experiment_populations(experiment, dtype=str):
615
+
616
+ config = get_config(experiment)
617
+ populations_str = config_section_to_dict(config, "Populations")
618
+ if populations_str is not None:
619
+ populations = populations_str['populations'].split(',')
620
+ else:
621
+ populations = ['effectors','targets']
622
+ return list([dtype(c) for c in populations])
623
+
624
+
625
+ def interpret_wells_and_positions(experiment: str, well_option: Union[str,int,List[int]], position_option: Union[str,int,List[int]]) -> Union[Tuple[List[int], List[int]], None]:
600
626
  """
601
627
  Interpret well and position options for a given experiment.
602
628
 
@@ -606,8 +632,8 @@ def interpret_wells_and_positions(experiment, well_option, position_option):
606
632
 
607
633
  Parameters
608
634
  ----------
609
- experiment : object
610
- The experiment object containing well information.
635
+ experiment : str
636
+ The experiment path containing well information.
611
637
  well_option : str, int, or list of int
612
638
  The well selection option:
613
639
  - '*' : Select all wells.
@@ -649,6 +675,9 @@ def interpret_wells_and_positions(experiment, well_option, position_option):
649
675
  well_indices = [int(well_option)]
650
676
  elif isinstance(well_option, list):
651
677
  well_indices = well_option
678
+ else:
679
+ print("Well indices could not be interpreted...")
680
+ return None
652
681
 
653
682
  if position_option == '*':
654
683
  position_indices = None
@@ -656,6 +685,9 @@ def interpret_wells_and_positions(experiment, well_option, position_option):
656
685
  position_indices = np.array([position_option], dtype=int)
657
686
  elif isinstance(position_option, list):
658
687
  position_indices = position_option
688
+ else:
689
+ print("Position indices could not be interpreted...")
690
+ return None
659
691
 
660
692
  return well_indices, position_indices
661
693
 
@@ -784,7 +816,11 @@ def get_position_table(pos, population, return_path=False):
784
816
  table = pos + os.sep.join(['output', 'tables', f'trajectories_{population}.csv'])
785
817
 
786
818
  if os.path.exists(table):
787
- df_pos = pd.read_csv(table, low_memory=False)
819
+ try:
820
+ df_pos = pd.read_csv(table, low_memory=False)
821
+ except Exception as e:
822
+ print(e)
823
+ df_pos = None
788
824
  else:
789
825
  df_pos = None
790
826
 
@@ -959,7 +995,7 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
959
995
  config = get_config(experiment)
960
996
  wells = get_experiment_wells(experiment)
961
997
 
962
- movie_prefix = ConfigSectionMap(config, "MovieSettings")["movie_prefix"]
998
+ movie_prefix = config_section_to_dict(config, "MovieSettings")["movie_prefix"]
963
999
 
964
1000
  labels = get_experiment_labels(experiment)
965
1001
  metadata = get_experiment_metadata(experiment) # None or dict of metadata
@@ -1016,15 +1052,23 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
1016
1052
 
1017
1053
  if metadata is not None:
1018
1054
  keys = list(metadata.keys())
1019
- for k in keys:
1020
- df_pos[k] = metadata[k]
1055
+ for key in keys:
1056
+ df_pos[key] = metadata[key]
1021
1057
 
1022
1058
  df.append(df_pos)
1023
1059
  any_table = True
1024
1060
 
1025
- pos_dict = {'pos_path': pos_path, 'pos_index': real_pos_index, 'pos_name': pos_name, 'table_path': table,
1026
- 'stack_path': stack_path,'well_path': well_path, 'well_index': real_well_index, 'well_name': well_name,
1027
- 'well_number': well_number, 'well_alias': well_alias}
1061
+ pos_dict = {'pos_path': pos_path,
1062
+ 'pos_index': real_pos_index,
1063
+ 'pos_name': pos_name,
1064
+ 'table_path': table,
1065
+ 'stack_path': stack_path,
1066
+ 'well_path': well_path,
1067
+ 'well_index': real_well_index,
1068
+ 'well_name': well_name,
1069
+ 'well_number': well_number,
1070
+ 'well_alias': well_alias,
1071
+ }
1028
1072
 
1029
1073
  df_pos_info.append(pos_dict)
1030
1074
 
@@ -1087,7 +1131,9 @@ def locate_stack(position, prefix='Aligned'):
1087
1131
  position += os.sep
1088
1132
 
1089
1133
  stack_path = glob(position + os.sep.join(['movie', f'{prefix}*.tif']))
1090
- assert len(stack_path) > 0, f"No movie with prefix {prefix} found..."
1134
+ if not stack_path:
1135
+ raise FileNotFoundError(f"No movie with prefix {prefix} found...")
1136
+
1091
1137
  stack = imread(stack_path[0].replace('\\', '/'))
1092
1138
  stack_length = auto_load_number_of_frames(stack_path[0])
1093
1139
 
@@ -1165,6 +1211,9 @@ def locate_labels(position, population='target', frames=None):
1165
1211
  label_path = natsorted(glob(position + os.sep.join(["labels_targets", "*.tif"])))
1166
1212
  elif population.lower() == "effector" or population.lower() == "effectors":
1167
1213
  label_path = natsorted(glob(position + os.sep.join(["labels_effectors", "*.tif"])))
1214
+ else:
1215
+ label_path = natsorted(glob(position + os.sep.join([f"labels_{population}", "*.tif"])))
1216
+
1168
1217
 
1169
1218
  label_names = [os.path.split(lbl)[-1] for lbl in label_path]
1170
1219
 
@@ -1242,6 +1291,9 @@ def fix_missing_labels(position, population='target', prefix='Aligned'):
1242
1291
  elif population.lower() == "effector" or population.lower() == "effectors":
1243
1292
  label_path = natsorted(glob(position + os.sep.join(["labels_effectors", "*.tif"])))
1244
1293
  path = position + os.sep + "labels_effectors"
1294
+ else:
1295
+ label_path = natsorted(glob(position + os.sep.join([f"labels_{population}", "*.tif"])))
1296
+ path = position + os.sep + f"labels_{population}"
1245
1297
 
1246
1298
  if label_path!=[]:
1247
1299
  #path = os.path.split(label_path[0])[0]
@@ -1348,6 +1400,9 @@ def load_tracking_data(position, prefix="Aligned", population="target"):
1348
1400
  trajectories = pd.read_csv(position + os.sep.join(['output', 'tables', 'trajectories_targets.csv']))
1349
1401
  elif population.lower() == "effector" or population.lower() == "effectors":
1350
1402
  trajectories = pd.read_csv(position + os.sep.join(['output', 'tables', 'trajectories_effectors.csv']))
1403
+ else:
1404
+ trajectories = pd.read_csv(position + os.sep.join(['output', 'tables', f'trajectories_{population}.csv']))
1405
+
1351
1406
 
1352
1407
  stack, labels = locate_stack_and_labels(position, prefix=prefix, population=population)
1353
1408
 
@@ -1397,7 +1452,7 @@ def auto_load_number_of_frames(stack_path):
1397
1452
 
1398
1453
  Handle invalid or missing paths gracefully:
1399
1454
 
1400
- >>> frames = auto_load_number_of_frames(None)
1455
+ >>> frames = auto_load_number_of_frames("stack.tif")
1401
1456
  >>> print(frames)
1402
1457
  None
1403
1458
 
@@ -1500,7 +1555,7 @@ def parse_isotropic_radii(string):
1500
1555
 
1501
1556
  """
1502
1557
 
1503
- sections = re.split(',| ', string)
1558
+ sections = re.split(r"[ ,]", string)
1504
1559
  radii = []
1505
1560
  for k, s in enumerate(sections):
1506
1561
  if s.isdigit():
@@ -2324,7 +2379,7 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
2324
2379
  position : str
2325
2380
  The path to the position or experiment directory.
2326
2381
  prefix : str, optional
2327
- The prefix used to identify the the movie file. The default is "Aligned".
2382
+ The prefix used to identify the movie file. The default is "Aligned".
2328
2383
  population : str, optional
2329
2384
  The population type to load, either "target" or "effector". The default is "target".
2330
2385
 
@@ -2354,6 +2409,11 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
2354
2409
  napari_data = np.load(position+os.sep.join(['output', 'tables', 'napari_effector_trajectories.npy']), allow_pickle=True)
2355
2410
  else:
2356
2411
  napari_data = None
2412
+ else:
2413
+ if os.path.exists(position+os.sep.join(['output', 'tables', f'napari_{population}_trajectories.npy'])):
2414
+ napari_data = np.load(position+os.sep.join(['output', 'tables', f'napari_{population}_trajectories.npy']), allow_pickle=True)
2415
+ else:
2416
+ napari_data = None
2357
2417
 
2358
2418
  if napari_data is not None:
2359
2419
  data = napari_data.item()['data']
@@ -2371,7 +2431,7 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
2371
2431
  return data, properties, graph, labels, stack
2372
2432
 
2373
2433
 
2374
- def auto_correct_masks(masks, bbox_factor = 1.75, min_area=9, fill_labels=False):
2434
+ def auto_correct_masks(masks, bbox_factor: float = 1.75, min_area: int = 9, fill_labels: bool = False):
2375
2435
 
2376
2436
  """
2377
2437
  Correct segmentation masks to ensure consistency and remove anomalies.
@@ -2391,6 +2451,13 @@ def auto_correct_masks(masks, bbox_factor = 1.75, min_area=9, fill_labels=False)
2391
2451
  masks : np.ndarray
2392
2452
  A 2D array representing the segmented mask image with labeled regions. Each unique value
2393
2453
  in the array represents a different object or cell.
2454
+ bbox_factor : float, optional
2455
+ A factor on cell area that is compared directly to the bounding box area of the cell, to detect remote cells
2456
+ sharing a same label value. The default is `1.75`.
2457
+ min_area : int, optional
2458
+ Discard cells that have an area smaller than this minimum area (px²). The default is `9` (3x3 pixels).
2459
+ fill_labels : bool, optional
2460
+ Fill holes within cell masks automatically. The default is `False`.
2394
2461
 
2395
2462
  Returns
2396
2463
  -------
@@ -2414,6 +2481,8 @@ def auto_correct_masks(masks, bbox_factor = 1.75, min_area=9, fill_labels=False)
2414
2481
  [0, 2, 2, 1],
2415
2482
  [0, 2, 0, 0]])
2416
2483
  """
2484
+
2485
+ assert masks.ndim==2,"`masks` should be a 2D numpy array..."
2417
2486
 
2418
2487
  # Avoid negative mask values
2419
2488
  masks[masks<0] = np.abs(masks[masks<0])
@@ -2478,6 +2547,8 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2478
2547
  The prefix used to identify the stack. The default is 'Aligned'.
2479
2548
  population : str, optional
2480
2549
  The population type for which the segmentation is performed. The default is 'target'.
2550
+ flush_memory : bool, optional
2551
+ Pop napari layers upon closing the viewer to empty the memory footprint. The default is `False`.
2481
2552
 
2482
2553
  Notes
2483
2554
  -----
@@ -2493,6 +2564,9 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2493
2564
 
2494
2565
  def export_labels():
2495
2566
  labels_layer = viewer.layers['segmentation'].data
2567
+ if not os.path.exists(output_folder):
2568
+ os.mkdir(output_folder)
2569
+
2496
2570
  for t, im in enumerate(tqdm(labels_layer)):
2497
2571
 
2498
2572
  try:
@@ -2531,7 +2605,7 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2531
2605
  for k in keys:
2532
2606
  info.update({k: metadata_info[k]})
2533
2607
 
2534
- spatial_calibration = float(ConfigSectionMap(config,"MovieSettings")["pxtoum"])
2608
+ spatial_calibration = float(config_section_to_dict(config, "MovieSettings")["pxtoum"])
2535
2609
  channel_names, channel_indices = extract_experiment_channels(expfolder)
2536
2610
 
2537
2611
  annotation_folder = expfolder + os.sep + f'annotations_{population}' + os.sep
@@ -2639,12 +2713,9 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2639
2713
  return export_annotation()
2640
2714
 
2641
2715
  stack, labels = locate_stack_and_labels(position, prefix=prefix, population=population)
2642
-
2643
- if not population.endswith('s'):
2644
- population += 's'
2645
2716
  output_folder = position + f'labels_{population}{os.sep}'
2717
+ print(f"Shape of the loaded image stack: {stack.shape}...")
2646
2718
 
2647
- print(f"{stack.shape}")
2648
2719
  viewer = napari.Viewer()
2649
2720
  viewer.add_image(stack, channel_axis=-1, colormap=["gray"] * stack.shape[-1])
2650
2721
  viewer.add_labels(labels.astype(int), name='segmentation', opacity=0.4)
@@ -2678,6 +2749,8 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2678
2749
  del labels
2679
2750
  gc.collect()
2680
2751
 
2752
+ print("napari viewer was successfully closed...")
2753
+
2681
2754
  def correct_annotation(filename):
2682
2755
 
2683
2756
  """
@@ -2700,6 +2773,12 @@ def correct_annotation(filename):
2700
2773
  def save_widget():
2701
2774
  return export_labels()
2702
2775
 
2776
+ if filename.endswith("_labelled.tif"):
2777
+ filename = filename.replace("_labelled.tif",".tif")
2778
+ if filename.endswith(".json"):
2779
+ filename = filename.replace('.json',".tif")
2780
+ assert os.path.exists(filename),f"Image {filename} does not seem to exist..."
2781
+
2703
2782
  img = imread(filename.replace('\\','/'))
2704
2783
  if img.ndim==3:
2705
2784
  img = np.moveaxis(img, 0, -1)
@@ -2764,7 +2843,7 @@ def _view_on_napari(tracks=None, stack=None, labels=None):
2764
2843
  ... 'x': [10, 20, 30], 'y': [15, 25, 35]})
2765
2844
  >>> stack = np.random.rand(100, 100, 3)
2766
2845
  >>> labels = np.random.randint(0, 2, (100, 100))
2767
- >>> view_on_napari(tracks, stack=stack, labels=labels)
2846
+ >>> _view_on_napari(tracks, stack=stack, labels=labels)
2768
2847
  # Visualize tracks, stack, and labels using Napari.
2769
2848
 
2770
2849
  """
@@ -2827,21 +2906,15 @@ def control_tracking_table(position, calibration=1, prefix="Aligned", population
2827
2906
 
2828
2907
 
2829
2908
  def get_segmentation_models_list(mode='targets', return_path=False):
2830
- if mode == 'targets':
2831
- modelpath = os.sep.join(
2832
- [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2833
- "segmentation_targets", os.sep])
2834
- repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_targets"]))
2835
- elif mode == 'effectors':
2836
- modelpath = os.sep.join(
2837
- [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2838
- "segmentation_effectors", os.sep])
2839
- repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_effectors"]))
2840
- elif mode == 'generic':
2841
- modelpath = os.sep.join(
2909
+
2910
+ modelpath = os.sep.join(
2842
2911
  [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2843
- "segmentation_generic", os.sep])
2844
- repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_generic"]))
2912
+ f"segmentation_{mode}", os.sep])
2913
+ if not os.path.exists(modelpath):
2914
+ os.mkdir(modelpath)
2915
+ repository_models = []
2916
+ else:
2917
+ repository_models = get_zenodo_files(cat=os.sep.join(["models", f"segmentation_{mode}"]))
2845
2918
 
2846
2919
  available_models = natsorted(glob(modelpath + '*/'))
2847
2920
  available_models = [m.replace('\\', '/').split('/')[-2] for m in available_models]
@@ -3162,7 +3235,7 @@ def normalize(frame, percentiles=(0.0,99.99), values=None, ignore_gray_value=0.,
3162
3235
  subframe = frame.copy()
3163
3236
 
3164
3237
  if values is not None:
3165
- mi = values[0];
3238
+ mi = values[0]
3166
3239
  ma = values[1]
3167
3240
  else:
3168
3241
  mi = np.nanpercentile(subframe.flatten(), percentiles[0], keepdims=True)
@@ -3183,9 +3256,14 @@ def normalize(frame, percentiles=(0.0,99.99), values=None, ignore_gray_value=0.,
3183
3256
  return frame.copy().astype(dtype)
3184
3257
 
3185
3258
 
3186
- def normalize_multichannel(multichannel_frame, percentiles=None,
3187
- values=None, ignore_gray_value=0., clip=False,
3188
- amplification=None, dtype=float):
3259
+ def normalize_multichannel(multichannel_frame: np.ndarray,
3260
+ percentiles=None,
3261
+ values=None,
3262
+ ignore_gray_value=0.,
3263
+ clip=False,
3264
+ amplification=None,
3265
+ dtype=float,
3266
+ ):
3189
3267
 
3190
3268
  """
3191
3269
  Normalizes a multichannel frame by adjusting the intensity values of each channel based on specified percentiles,
@@ -3234,13 +3312,11 @@ def normalize_multichannel(multichannel_frame, percentiles=None,
3234
3312
  Examples
3235
3313
  --------
3236
3314
  >>> multichannel_frame = np.random.rand(100, 100, 3) # Example multichannel frame
3237
- >>> normalized_frame = normalize_multichannel(multichannel_frame, percentiles=((1, 99), (2, 98), (0, 100)))
3315
+ >>> normalized_frame = normalize_multichannel(multichannel_frame, percentiles=[(1, 99), (2, 98), (0, 100)])
3238
3316
  # Normalizes each channel of the frame using specified percentile ranges.
3239
3317
 
3240
3318
  """
3241
3319
 
3242
-
3243
-
3244
3320
  mf = multichannel_frame.copy().astype(float)
3245
3321
  assert mf.ndim == 3, f'Wrong shape for the multichannel frame: {mf.shape}.'
3246
3322
  if percentiles is None:
@@ -3275,7 +3351,7 @@ def normalize_multichannel(multichannel_frame, percentiles=None,
3275
3351
 
3276
3352
  return np.moveaxis(mf_new,0,-1)
3277
3353
 
3278
- def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=float, normalize_kwargs={"percentiles": (0.,99.99)}):
3354
+ def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=np.float64, normalize_kwargs={"percentiles": (0.,99.99)}):
3279
3355
 
3280
3356
  """
3281
3357
  Loads and optionally normalizes and rescales specified frames from a stack located at a given path.
@@ -3334,6 +3410,10 @@ def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=fl
3334
3410
  print(
3335
3411
  f'Error in loading the frame {img_nums} {e}. Please check that the experiment channel information is consistent with the movie being read.')
3336
3412
  return None
3413
+ try:
3414
+ frames[np.isinf(frames)] = np.nan
3415
+ except Exception as e:
3416
+ print(e)
3337
3417
 
3338
3418
  frames = _rearrange_multichannel_frame(frames)
3339
3419