celldetective 1.3.7.post1__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. celldetective/_version.py +1 -1
  2. celldetective/gui/btrack_options.py +8 -8
  3. celldetective/gui/classifier_widget.py +8 -0
  4. celldetective/gui/configure_new_exp.py +1 -1
  5. celldetective/gui/json_readers.py +2 -4
  6. celldetective/gui/plot_signals_ui.py +38 -29
  7. celldetective/gui/process_block.py +1 -0
  8. celldetective/gui/processes/downloader.py +108 -0
  9. celldetective/gui/processes/measure_cells.py +346 -0
  10. celldetective/gui/processes/segment_cells.py +354 -0
  11. celldetective/gui/processes/track_cells.py +298 -0
  12. celldetective/gui/processes/train_segmentation_model.py +270 -0
  13. celldetective/gui/processes/train_signal_model.py +108 -0
  14. celldetective/gui/seg_model_loader.py +71 -25
  15. celldetective/gui/signal_annotator2.py +10 -7
  16. celldetective/gui/signal_annotator_options.py +1 -1
  17. celldetective/gui/tableUI.py +252 -20
  18. celldetective/gui/viewers.py +1 -1
  19. celldetective/io.py +53 -20
  20. celldetective/measure.py +12 -144
  21. celldetective/relative_measurements.py +40 -43
  22. celldetective/segmentation.py +48 -1
  23. celldetective/signals.py +84 -305
  24. celldetective/tracking.py +23 -24
  25. celldetective/utils.py +1 -1
  26. {celldetective-1.3.7.post1.dist-info → celldetective-1.3.8.dist-info}/METADATA +11 -2
  27. {celldetective-1.3.7.post1.dist-info → celldetective-1.3.8.dist-info}/RECORD +31 -25
  28. {celldetective-1.3.7.post1.dist-info → celldetective-1.3.8.dist-info}/WHEEL +1 -1
  29. {celldetective-1.3.7.post1.dist-info → celldetective-1.3.8.dist-info}/LICENSE +0 -0
  30. {celldetective-1.3.7.post1.dist-info → celldetective-1.3.8.dist-info}/entry_points.txt +0 -0
  31. {celldetective-1.3.7.post1.dist-info → celldetective-1.3.8.dist-info}/top_level.txt +0 -0
celldetective/io.py CHANGED
@@ -16,7 +16,7 @@ import concurrent.futures
16
16
  from csbdeep.utils import normalize_mi_ma
17
17
  from csbdeep.io import save_tiff_imagej_compatible
18
18
 
19
- import skimage.io as skio
19
+ import imageio.v2 as imageio
20
20
  from skimage.measure import regionprops_table, label
21
21
 
22
22
  from btrack.datasets import cell_config
@@ -58,6 +58,7 @@ def extract_experiment_from_well(well_path):
58
58
  >>> well_path = "/path/to/experiment/plate/well"
59
59
  >>> extract_experiment_from_well(well_path)
60
60
  '/path/to/experiment'
61
+
61
62
  """
62
63
 
63
64
  if not well_path.endswith(os.sep):
@@ -94,6 +95,7 @@ def extract_well_from_position(pos_path):
94
95
  >>> pos_path = "/path/to/experiment/plate/well/position"
95
96
  >>> extract_well_from_position(pos_path)
96
97
  '/path/to/experiment/plate/well/'
98
+
97
99
  """
98
100
 
99
101
  if not pos_path.endswith(os.sep):
@@ -129,6 +131,7 @@ def extract_experiment_from_position(pos_path):
129
131
  >>> pos_path = "/path/to/experiment/plate/well/position"
130
132
  >>> extract_experiment_from_position(pos_path)
131
133
  '/path/to/experiment'
134
+
132
135
  """
133
136
 
134
137
  pos_path = pos_path.replace(os.sep, '/')
@@ -187,6 +190,7 @@ def collect_experiment_metadata(pos_path=None, well_path=None):
187
190
  >>> metadata = collect_experiment_metadata(well_path=well_path)
188
191
  >>> metadata["concentration"]
189
192
  10.0
193
+
190
194
  """
191
195
 
192
196
  if pos_path is not None:
@@ -289,6 +293,7 @@ def get_config(experiment):
289
293
  >>> config_path = get_config(experiment)
290
294
  >>> print(config_path)
291
295
  '/path/to/experiment/config.ini'
296
+
292
297
  """
293
298
 
294
299
  if not experiment.endswith(os.sep):
@@ -336,6 +341,7 @@ def get_spatial_calibration(experiment):
336
341
  >>> calibration = get_spatial_calibration(experiment)
337
342
  >>> print(calibration)
338
343
  0.325 # pixels-to-micrometers conversion factor
344
+
339
345
  """
340
346
 
341
347
  config = get_config(experiment)
@@ -380,6 +386,7 @@ def get_temporal_calibration(experiment):
380
386
  >>> calibration = get_temporal_calibration(experiment)
381
387
  >>> print(calibration)
382
388
  0.5 # frames-to-minutes conversion factor
389
+
383
390
  """
384
391
 
385
392
  config = get_config(experiment)
@@ -393,6 +400,23 @@ def get_experiment_metadata(experiment):
393
400
  metadata = ConfigSectionMap(config, "Metadata")
394
401
  return metadata
395
402
 
403
+ def get_experiment_labels(experiment):
404
+
405
+ config = get_config(experiment)
406
+ wells = get_experiment_wells(experiment)
407
+ nbr_of_wells = len(wells)
408
+
409
+ labels = ConfigSectionMap(config, "Labels")
410
+ for k in list(labels.keys()):
411
+ values = labels[k].split(',')
412
+ if nbr_of_wells != len(values):
413
+ values = [str(s) for s in np.linspace(0, nbr_of_wells - 1, nbr_of_wells)]
414
+ if np.all([s.isnumeric() for s in values]):
415
+ values = [float(s) for s in values]
416
+ labels.update({k: values})
417
+
418
+ return labels
419
+
396
420
 
397
421
  def get_experiment_concentrations(experiment, dtype=str):
398
422
 
@@ -435,6 +459,7 @@ def get_experiment_concentrations(experiment, dtype=str):
435
459
  >>> concentrations = get_experiment_concentrations(experiment, dtype=float)
436
460
  >>> print(concentrations)
437
461
  [0.1, 0.2, 0.5, 1.0]
462
+
438
463
  """
439
464
 
440
465
  config = get_config(experiment)
@@ -489,6 +514,7 @@ def get_experiment_cell_types(experiment, dtype=str):
489
514
  >>> cell_types = get_experiment_cell_types(experiment, dtype=str)
490
515
  >>> print(cell_types)
491
516
  ['TypeA', 'TypeB', 'TypeC', 'TypeD']
517
+
492
518
  """
493
519
 
494
520
  config = get_config(experiment)
@@ -540,6 +566,7 @@ def get_experiment_antibodies(experiment, dtype=str):
540
566
 
541
567
  >>> get_experiment_antibodies("path/to/experiment2", dtype=int)
542
568
  array([0, 1, 2])
569
+
543
570
  """
544
571
 
545
572
  config = get_config(experiment)
@@ -594,6 +621,7 @@ def get_experiment_pharmaceutical_agents(experiment, dtype=str):
594
621
  >>> antibodies = get_experiment_antibodies(experiment, dtype=str)
595
622
  >>> print(antibodies)
596
623
  ['AntibodyA', 'AntibodyB', 'AntibodyC', 'AntibodyD']
624
+
597
625
  """
598
626
 
599
627
  config = get_config(experiment)
@@ -702,6 +730,7 @@ def extract_well_name_and_number(well):
702
730
  >>> well_path = "another/path/W1"
703
731
  >>> extract_well_name_and_number(well_path)
704
732
  ('W1', 1)
733
+
705
734
  """
706
735
 
707
736
  split_well_path = well.split(os.sep)
@@ -740,6 +769,7 @@ def extract_position_name(pos):
740
769
  >>> pos_path = "another/path/positionA"
741
770
  >>> extract_position_name(pos_path)
742
771
  'positionA'
772
+
743
773
  """
744
774
 
745
775
  split_pos_path = pos.split(os.sep)
@@ -890,6 +920,7 @@ def get_position_movie_path(pos, prefix=''):
890
920
  >>> pos_path = "nonexistent/path"
891
921
  >>> get_position_movie_path(pos_path)
892
922
  None
923
+
893
924
  """
894
925
 
895
926
 
@@ -961,16 +992,15 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
961
992
  Use pickle files for faster loading:
962
993
 
963
994
  >>> df = load_experiment_tables("experiment_01", load_pickle=True)
995
+
964
996
  """
965
997
 
966
998
  config = get_config(experiment)
967
999
  wells = get_experiment_wells(experiment)
968
1000
 
969
1001
  movie_prefix = ConfigSectionMap(config, "MovieSettings")["movie_prefix"]
970
- concentrations = get_experiment_concentrations(experiment, dtype=float)
971
- cell_types = get_experiment_cell_types(experiment)
972
- antibodies = get_experiment_antibodies(experiment)
973
- pharmaceutical_agents = get_experiment_pharmaceutical_agents(experiment)
1002
+
1003
+ labels = get_experiment_labels(experiment)
974
1004
  metadata = get_experiment_metadata(experiment) # None or dict of metadata
975
1005
  well_labels = _extract_labels_from_config(config, len(wells))
976
1006
 
@@ -986,14 +1016,8 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
986
1016
 
987
1017
  well_name, well_number = extract_well_name_and_number(well_path)
988
1018
  widx = well_indices[k]
989
-
990
1019
  well_alias = well_labels[widx]
991
1020
 
992
- well_concentration = concentrations[widx]
993
- well_antibody = antibodies[widx]
994
- well_cell_type = cell_types[widx]
995
- well_pharmaceutical_agent = pharmaceutical_agents[widx]
996
-
997
1021
  positions = get_positions_in_well(well_path)
998
1022
  if position_indices is not None:
999
1023
  try:
@@ -1022,10 +1046,13 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
1022
1046
  df_pos['well_name'] = well_name
1023
1047
  df_pos['pos_name'] = pos_name
1024
1048
 
1025
- df_pos['concentration'] = well_concentration
1026
- df_pos['antibody'] = well_antibody
1027
- df_pos['cell_type'] = well_cell_type
1028
- df_pos['pharmaceutical_agent'] = well_pharmaceutical_agent
1049
+ for k in list(labels.keys()):
1050
+ values = labels[k]
1051
+ try:
1052
+ df_pos[k] = values[widx]
1053
+ except Exception as e:
1054
+ print(f"{e=}")
1055
+
1029
1056
  if metadata is not None:
1030
1057
  keys = list(metadata.keys())
1031
1058
  for k in keys:
@@ -1037,10 +1064,6 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
1037
1064
  pos_dict = {'pos_path': pos_path, 'pos_index': real_pos_index, 'pos_name': pos_name, 'table_path': table,
1038
1065
  'stack_path': stack_path,'well_path': well_path, 'well_index': real_well_index, 'well_name': well_name,
1039
1066
  'well_number': well_number, 'well_alias': well_alias}
1040
- # if metadata is not None:
1041
- # keys = list(metadata.keys())
1042
- # for k in keys:
1043
- # pos_dict.update({k: metadata[k]})
1044
1067
 
1045
1068
  df_pos_info.append(pos_dict)
1046
1069
 
@@ -1171,6 +1194,7 @@ def locate_labels(position, population='target', frames=None):
1171
1194
  Load multiple specific frames:
1172
1195
 
1173
1196
  >>> labels = locate_labels("/path/to/position", population="target", frames=[0, 1, 2])
1197
+
1174
1198
  """
1175
1199
 
1176
1200
  if not position.endswith(os.sep):
@@ -1241,6 +1265,7 @@ def fix_missing_labels(position, population='target', prefix='Aligned'):
1241
1265
  -------
1242
1266
  None
1243
1267
  The function creates new label files in the corresponding folder for any frames missing label files.
1268
+
1244
1269
  """
1245
1270
 
1246
1271
  if not position.endswith(os.sep):
@@ -1414,6 +1439,7 @@ def auto_load_number_of_frames(stack_path):
1414
1439
  >>> frames = auto_load_number_of_frames(None)
1415
1440
  >>> print(frames)
1416
1441
  None
1442
+
1417
1443
  """
1418
1444
 
1419
1445
  if stack_path is None:
@@ -1510,6 +1536,7 @@ def parse_isotropic_radii(string):
1510
1536
  - It identifies ranges using square brackets and assumes that ranges are always
1511
1537
  two consecutive values.
1512
1538
  - Non-integer sections of the string are ignored.
1539
+
1513
1540
  """
1514
1541
 
1515
1542
  sections = re.split(',| ', string)
@@ -1618,6 +1645,7 @@ def interpret_tracking_configuration(config):
1618
1645
 
1619
1646
  >>> interpret_tracking_configuration(None)
1620
1647
  '/path/to/default/config.json'
1648
+
1621
1649
  """
1622
1650
 
1623
1651
  if isinstance(config, str):
@@ -1792,6 +1820,7 @@ def locate_signal_model(name, path=None, pairs=False):
1792
1820
 
1793
1821
  >>> locate_signal_model("remote_model")
1794
1822
  'path/to/celldetective/models/signal_detection/remote_model/'
1823
+
1795
1824
  """
1796
1825
 
1797
1826
  main_dir = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective"])
@@ -1859,6 +1888,7 @@ def locate_pair_signal_model(name, path=None):
1859
1888
 
1860
1889
  >>> locate_pair_signal_model("custom_model", path="/additional/models/")
1861
1890
  '/additional/models/custom_model/'
1891
+
1862
1892
  """
1863
1893
 
1864
1894
 
@@ -1937,6 +1967,7 @@ def relabel_segmentation(labels, df, exclude_nans=True, column_labels={'track':
1937
1967
  ... }
1938
1968
  >>> new_labels = relabel_segmentation(labels, df, column_labels=column_labels, exclude_nans=True)
1939
1969
  Done.
1970
+
1940
1971
  """
1941
1972
 
1942
1973
  n_threads = threads
@@ -2037,6 +2068,7 @@ def control_tracks(position, prefix="Aligned", population="target", relabel=True
2037
2068
  Example
2038
2069
  -------
2039
2070
  >>> control_tracks("/path/to/data/position_1", prefix="Aligned", population="target", relabel=True, flush_memory=True, threads=4)
2071
+
2040
2072
  """
2041
2073
 
2042
2074
  if not position.endswith(os.sep):
@@ -2089,6 +2121,7 @@ def tracks_to_btrack(df, exclude_nans=False):
2089
2121
  Example
2090
2122
  -------
2091
2123
  >>> data, properties, graph = tracks_to_btrack(df, exclude_nans=True)
2124
+
2092
2125
  """
2093
2126
 
2094
2127
  graph = {}
@@ -3310,7 +3343,7 @@ def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=fl
3310
3343
  """
3311
3344
 
3312
3345
  try:
3313
- frames = skio.imread(stack_path, key=img_nums, plugin="tifffile")
3346
+ frames = imageio.imread(stack_path, key=img_nums)
3314
3347
  except Exception as e:
3315
3348
  print(
3316
3349
  f'Error in loading the frame {img_nums} {e}. Please check that the experiment channel information is consistent with the movie being read.')
celldetective/measure.py CHANGED
@@ -86,14 +86,14 @@ def measure(stack=None, labels=None, trajectories=None, channel_names=None,
86
86
  >>> stack = np.random.rand(10, 100, 100, 3)
87
87
  >>> labels = np.random.randint(0, 2, (10, 100, 100))
88
88
  >>> trajectories = pd.DataFrame({'TRACK_ID': [1, 2, 3], 'FRAME': [1, 1, 1],
89
- ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
89
+ ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
90
90
  >>> channel_names = ['channel1', 'channel2', 'channel3']
91
91
  >>> features = ['area', 'intensity_mean']
92
92
  >>> intensity_measurement_radii = [5, 10]
93
93
  >>> border_distances = 2
94
94
  >>> measurements = measure(stack=stack, labels=labels, trajectories=trajectories, channel_names=channel_names,
95
- ... features=features, intensity_measurement_radii=intensity_measurement_radii,
96
- ... border_distances=border_distances)
95
+ ... features=features, intensity_measurement_radii=intensity_measurement_radii,
96
+ ... border_distances=border_distances)
97
97
  # Perform measurements on the stack, labels, and trajectories, computing isotropic intensities and additional features.
98
98
 
99
99
  """
@@ -661,12 +661,12 @@ def measure_isotropic_intensity(positions, # Dataframe of cell positions @ t
661
661
  Examples
662
662
  --------
663
663
  >>> positions = pd.DataFrame({'TRACK_ID': [1, 2, 3], 'FRAME': [1, 1, 1],
664
- ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
664
+ ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
665
665
  >>> img = np.random.rand(100, 100, 3)
666
666
  >>> channels = ['channel1', 'channel2', 'channel3']
667
667
  >>> intensity_measurement_radii = 5
668
668
  >>> positions = measure_isotropic_intensity(positions, img, channels=channels,
669
- ... intensity_measurement_radii=intensity_measurement_radii)
669
+ ... intensity_measurement_radii=intensity_measurement_radii)
670
670
  # Measure isotropic intensity values around cell positions in the image.
671
671
 
672
672
  """
@@ -813,45 +813,7 @@ def measure_at_position(pos, mode, return_measurements=False, threads=1):
813
813
 
814
814
 
815
815
  def local_normalisation(image, labels, background_intensity, measurement='intensity_median', operation='subtract', clip=False):
816
- """
817
- Perform local normalization on an image based on labels.
818
-
819
- Parameters:
820
- - image (numpy.ndarray): The input image.
821
- - labels (numpy.ndarray): An array specifying the labels for different regions in the image.
822
- - background_intensity (pandas.DataFrame): A DataFrame containing background intensity values
823
- corresponding to each label.
824
- - mode (str): The normalization mode ('Mean' or 'Median').
825
- - operation (str): The operation to perform ('Subtract' or 'Divide').
826
-
827
- Returns:
828
- - numpy.ndarray: The normalized image.
829
-
830
- This function performs local normalization on an image based on the provided labels. It iterates over
831
- each unique label, excluding the background label (0), and performs the specified operation with the
832
- background intensity values corresponding to that label. The background intensity values are obtained
833
- from the provided background_intensity DataFrame based on the normalization mode.
834
-
835
- If the operation is 'Subtract', the background intensity is subtracted from the image pixel values.
836
- If the operation is 'Divide', the image pixel values are divided by the background intensity.
837
-
838
- Example:
839
- >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
840
- >>> labels = np.array([[0, 1, 1], [2, 2, 3], [3, 3, 0]])
841
- >>> background_intensity = pd.DataFrame({'intensity_mean': [10, 20, 30]})
842
- >>> mode = 'Mean'
843
- >>> operation = 'Subtract'
844
- >>> result = local_normalisation(image, labels, background_intensity, mode, operation)
845
- >>> print(result)
846
- [[-9. -8. -7.]
847
- [14. 15. 6.]
848
- [27. 28. 9.]]
849
-
850
- Note:
851
- - The background intensity DataFrame should have columns named 'intensity_mean' or 'intensity_median'
852
- based on the mode specified.
853
- - The background intensity values should be provided in the same order as the labels.
854
- """
816
+
855
817
 
856
818
  for index, cell in enumerate(np.unique(labels)):
857
819
  if cell == 0:
@@ -869,42 +831,8 @@ def local_normalisation(image, labels, background_intensity, measurement='intens
869
831
 
870
832
 
871
833
  def normalise_by_cell(image, labels, distance=5, model='median', operation='subtract', clip=False):
872
- """
873
- Normalize an image based on cell regions.
874
-
875
- Parameters:
876
- - image (numpy.ndarray): The input image.
877
- - labels (numpy.ndarray): An array specifying the labels for different regions in the image.
878
- - distance (float): The distance parameter for finding the contour of cell regions.
879
- - mode (str): The normalization mode ('Mean' or 'Median').
880
- - operation (str): The operation to perform ('Subtract' or 'Divide').
881
-
882
- Returns:
883
- - numpy.ndarray: The normalized image.
884
-
885
- This function normalizes an image based on cell regions defined by the provided labels. It calculates
886
- the border of cell regions using the contour_of_instance_segmentation function with the specified
887
- distance parameter. Then, it computes the background intensity of each cell region based on the mode
888
- ('Mean' or 'Median'). Finally, it performs local normalization using the local_normalisation function
889
- and returns the normalized image.
890
-
891
- Example:
892
- >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
893
- >>> labels = np.array([[0, 1, 1], [2, 2, 3], [3, 3, 0]])
894
- >>> distance = 2.0
895
- >>> mode = 'Mean'
896
- >>> operation = 'Subtract'
897
- >>> result = normalise_by_cell(image, labels, distance, mode, operation)
898
- >>> print(result)
899
- [[-9. -8. -7.]
900
- [14. 15. 6.]
901
- [27. 28. 9.]]
902
-
903
- Note:
904
- - The contour of cell regions is calculated using the contour_of_instance_segmentation function.
905
- - The background intensity is computed based on the specified mode ('Mean' or 'Median').
906
- - The operation determines whether to subtract or divide the background intensity from the image.
907
- """
834
+
835
+
908
836
  border = contour_of_instance_segmentation(label=labels, distance=distance * (-1))
909
837
  if model == 'mean':
910
838
  measurement = 'intensity_nanmean'
@@ -987,70 +915,6 @@ def blob_detection(image, label, diameter, threshold=0., channel_name=None, targ
987
915
  return detections
988
916
 
989
917
 
990
- # def blob_detectionv0(image, label, threshold, diameter):
991
- # """
992
- # Perform blob detection on an image based on labeled regions.
993
-
994
- # Parameters:
995
- # - image (numpy.ndarray): The input image data.
996
- # - label (numpy.ndarray): An array specifying labeled regions in the image.
997
- # - threshold (float): The threshold value for blob detection.
998
- # - diameter (float): The expected diameter of blobs.
999
-
1000
- # Returns:
1001
- # - dict: A dictionary containing information about detected blobs.
1002
-
1003
- # This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
1004
- # and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
1005
- # based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
1006
- # detected blobs and their mean intensity for each labeled region.
1007
-
1008
- # Example:
1009
- # >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1010
- # >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
1011
- # >>> threshold = 0.1
1012
- # >>> diameter = 5.0
1013
- # >>> result = blob_detection(image, label, threshold, diameter)
1014
- # >>> print(result)
1015
- # {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
1016
-
1017
- # Note:
1018
- # - Blobs are detected using the Difference of Gaussians (DoG) method.
1019
- # - Detected blobs are filtered based on the specified threshold and expected diameter.
1020
- # - The returned dictionary contains information about the number of detected blobs and their mean intensity
1021
- # for each labeled region.
1022
- # """
1023
- # blob_labels = {}
1024
- # dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
1025
- # for mask_index in np.unique(label):
1026
- # if mask_index == 0:
1027
- # continue
1028
- # removed_background = image.copy()
1029
- # one_mask = label.copy()
1030
- # one_mask[np.where(label != mask_index)] = 0
1031
- # dilated_copy = dilated_image.copy()
1032
- # dilated_copy[np.where(dilated_image != mask_index)] = 0
1033
- # removed_background[np.where(dilated_copy == 0)] = 0
1034
- # min_sigma = (1 / (1 + math.sqrt(2))) * diameter
1035
- # max_sigma = math.sqrt(2) * min_sigma
1036
- # blobs = blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
1037
- # max_sigma=max_sigma)
1038
-
1039
- # mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
1040
- # if not np.any(mask):
1041
- # continue
1042
- # blobs_filtered = blobs[mask]
1043
- # binary_blobs = np.zeros_like(label)
1044
- # for blob in blobs_filtered:
1045
- # y, x, r = blob
1046
- # rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
1047
- # binary_blobs[rr, cc] = 1
1048
- # spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
1049
- # blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
1050
- # return blob_labels
1051
-
1052
- ### Classification ####
1053
-
1054
918
  def estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=0.5):
1055
919
 
1056
920
  """
@@ -1178,6 +1042,7 @@ def interpret_track_classification(df, class_attr, irreversible_event=False, uni
1178
1042
  Example
1179
1043
  -------
1180
1044
  >>> df = interpret_track_classification(df, 'class', irreversible_event=True, r2_threshold=0.7)
1045
+
1181
1046
  """
1182
1047
 
1183
1048
  cols = list(df.columns)
@@ -1325,6 +1190,7 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1325
1190
  Example
1326
1191
  -------
1327
1192
  >>> df = classify_irreversible_events(df, 'class', r2_threshold=0.7)
1193
+
1328
1194
  """
1329
1195
 
1330
1196
  df = data.copy()
@@ -1426,6 +1292,7 @@ def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1426
1292
  Example
1427
1293
  -------
1428
1294
  >>> df = classify_unique_states(df, 'class', percentile=75)
1295
+
1429
1296
  """
1430
1297
 
1431
1298
  cols = list(df.columns)
@@ -1521,6 +1388,7 @@ def classify_cells_from_query(df, status_attr, query):
1521
1388
  ------
1522
1389
  Exception
1523
1390
  If the query is invalid or if there are issues with the DataFrame or query syntax, an error message is printed, and `None` is returned.
1391
+
1524
1392
  """
1525
1393
 
1526
1394
 
@@ -129,14 +129,7 @@ def measure_pairs(pos, neighborhood_protocol):
129
129
 
130
130
 
131
131
  def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs={'window': 3, 'mode': 'bi'}):
132
- """
133
- pos: position to process
134
- target_classes [list]: target classes to keep
135
- neigh_dist: neighborhood cut distance
136
- theta_dist: distance to edge threshold
137
- velocity_kwargs: params for derivative of relative position
138
- neighborhood_kwargs: params for neigh
139
- """
132
+
140
133
 
141
134
  reference_population = neighborhood_protocol['reference']
142
135
  neighbor_population = neighborhood_protocol['neighbor']
@@ -422,12 +415,13 @@ def timeline_matching(timeline1, timeline2):
422
415
  -------
423
416
  tuple
424
417
  A tuple containing:
425
- - full_timeline : numpy.ndarray
426
- The unified timeline spanning from the minimum to the maximum time point in the input timelines.
427
- - index1 : list of int
428
- The indices of `timeline1` in the `full_timeline`.
429
- - index2 : list of int
430
- The indices of `timeline2` in the `full_timeline`.
418
+
419
+ - full_timeline : numpy.ndarray
420
+ The unified timeline spanning from the minimum to the maximum time point in the input timelines.
421
+ - index1 : list of int
422
+ The indices of `timeline1` in the `full_timeline`.
423
+ - index2 : list of int
424
+ The indices of `timeline2` in the `full_timeline`.
431
425
 
432
426
  Examples
433
427
  --------
@@ -446,6 +440,7 @@ def timeline_matching(timeline1, timeline2):
446
440
  - The function combines the two timelines and generates a continuous range from the minimum to the maximum time point.
447
441
  - It then finds the indices of the original timelines in this unified timeline.
448
442
  - The function assumes that the input timelines consist of integer values.
443
+
449
444
  """
450
445
 
451
446
  min_t = np.amin(np.concatenate((timeline1, timeline2)))
@@ -550,16 +545,17 @@ def extract_neighborhoods_from_pickles(pos):
550
545
  -------
551
546
  list of dict
552
547
  A list of dictionaries, each containing a neighborhood protocol. Each dictionary has the keys:
553
- - 'reference' : str
554
- The reference population ('targets' or 'effectors').
555
- - 'neighbor' : str
556
- The neighbor population.
557
- - 'type' : str
558
- The type of neighborhood ('circle' or 'contact').
559
- - 'distance' : float
560
- The distance parameter for the neighborhood.
561
- - 'description' : str
562
- The original neighborhood string.
548
+
549
+ - 'reference' : str
550
+ The reference population ('targets' or 'effectors').
551
+ - 'neighbor' : str
552
+ The neighbor population.
553
+ - 'type' : str
554
+ The type of neighborhood ('circle' or 'contact').
555
+ - 'distance' : float
556
+ The distance parameter for the neighborhood.
557
+ - 'description' : str
558
+ The original neighborhood string.
563
559
 
564
560
  Notes
565
561
  -----
@@ -572,8 +568,9 @@ def extract_neighborhoods_from_pickles(pos):
572
568
  --------
573
569
  >>> protocols = extract_neighborhoods_from_pickles('/path/to/data')
574
570
  >>> for protocol in protocols:
575
- >>> print(protocol)
571
+ >>> print(protocol)
576
572
  {'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
573
+
577
574
  """
578
575
 
579
576
  tab_tc = pos + os.sep.join(['output', 'tables', 'trajectories_targets.pkl'])
@@ -618,16 +615,17 @@ def extract_neighborhood_settings(neigh_string, population='targets'):
618
615
  -------
619
616
  dict
620
617
  A dictionary containing the neighborhood protocol with keys:
621
- - 'reference' : str
622
- The reference population.
623
- - 'neighbor' : str
624
- The neighbor population.
625
- - 'type' : str
626
- The type of neighborhood ('circle' or 'contact').
627
- - 'distance' : float
628
- The distance parameter for the neighborhood.
629
- - 'description' : str
630
- The original neighborhood string.
618
+
619
+ - 'reference' : str
620
+ The reference population.
621
+ - 'neighbor' : str
622
+ The neighbor population.
623
+ - 'type' : str
624
+ The type of neighborhood ('circle' or 'contact').
625
+ - 'distance' : float
626
+ The distance parameter for the neighborhood.
627
+ - 'description' : str
628
+ The original neighborhood string.
631
629
 
632
630
  Raises
633
631
  ------
@@ -644,6 +642,7 @@ def extract_neighborhood_settings(neigh_string, population='targets'):
644
642
  --------
645
643
  >>> extract_neighborhood_settings('neighborhood_self_contact_5_px', 'targets')
646
644
  {'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
645
+
647
646
  """
648
647
 
649
648
  assert neigh_string.startswith('neighborhood')
@@ -684,23 +683,20 @@ def expand_pair_table(data):
684
683
  Parameters
685
684
  ----------
686
685
  data : pandas.DataFrame
687
- DataFrame containing the pair table, which should include the columns:
688
- - 'reference_population': The reference population type.
689
- - 'neighbor_population': The neighbor population type.
690
- - 'position': The position identifier for each pair.
686
+ DataFrame containing the pair table
691
687
 
692
688
  Returns
693
689
  -------
694
690
  pandas.DataFrame
695
691
  Expanded DataFrame that includes merged reference and neighbor data, sorted by position, reference population,
696
- neighbor population, and frame. Rows without values in 'REFERENCE_ID', 'NEIGHBOR_ID', 'reference_population',
697
- or 'neighbor_population' are dropped.
692
+ neighbor population, and frame. Rows without values in `REFERENCE_ID`, `NEIGHBOR_ID`, `reference_population`,
693
+ or `neighbor_population` are dropped.
698
694
 
699
695
  Notes
700
696
  -----
701
697
  - For each unique pair of `reference_population` and `neighbor_population`, the function identifies corresponding
702
698
  trajectories CSV files based on the position identifier.
703
- - The function reads the trajectories CSV files, prefixes columns with 'reference_' or 'neighbor_' to avoid
699
+ - The function reads the trajectories CSV files, prefixes columns with `reference_` or `neighbor_` to avoid
704
700
  conflicts, and merges data from reference and neighbor tables based on `TRACK_ID` or `ID`, and `FRAME`.
705
701
  - Merges are performed in an outer join manner to retain all rows, regardless of missing values in the target files.
706
702
  - The final DataFrame is sorted and cleaned to ensure only valid pairings are included.
@@ -713,7 +709,8 @@ def expand_pair_table(data):
713
709
  Raises
714
710
  ------
715
711
  AssertionError
716
- If 'reference_population' or 'neighbor_population' is not found in the columns of `data`.
712
+ If `reference_population` or `neighbor_population` is not found in the columns of `data`.
713
+
717
714
  """
718
715
 
719
716
  assert 'reference_population' in list(data.columns),"Please provide a valid pair table..."