celldetective 1.3.7__py3-none-any.whl → 1.3.7.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
celldetective/measure.py CHANGED
@@ -86,14 +86,14 @@ def measure(stack=None, labels=None, trajectories=None, channel_names=None,
86
86
  >>> stack = np.random.rand(10, 100, 100, 3)
87
87
  >>> labels = np.random.randint(0, 2, (10, 100, 100))
88
88
  >>> trajectories = pd.DataFrame({'TRACK_ID': [1, 2, 3], 'FRAME': [1, 1, 1],
89
- ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
89
+ ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
90
90
  >>> channel_names = ['channel1', 'channel2', 'channel3']
91
91
  >>> features = ['area', 'intensity_mean']
92
92
  >>> intensity_measurement_radii = [5, 10]
93
93
  >>> border_distances = 2
94
94
  >>> measurements = measure(stack=stack, labels=labels, trajectories=trajectories, channel_names=channel_names,
95
- ... features=features, intensity_measurement_radii=intensity_measurement_radii,
96
- ... border_distances=border_distances)
95
+ ... features=features, intensity_measurement_radii=intensity_measurement_radii,
96
+ ... border_distances=border_distances)
97
97
  # Perform measurements on the stack, labels, and trajectories, computing isotropic intensities and additional features.
98
98
 
99
99
  """
@@ -661,12 +661,12 @@ def measure_isotropic_intensity(positions, # Dataframe of cell positions @ t
661
661
  Examples
662
662
  --------
663
663
  >>> positions = pd.DataFrame({'TRACK_ID': [1, 2, 3], 'FRAME': [1, 1, 1],
664
- ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
664
+ ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
665
665
  >>> img = np.random.rand(100, 100, 3)
666
666
  >>> channels = ['channel1', 'channel2', 'channel3']
667
667
  >>> intensity_measurement_radii = 5
668
668
  >>> positions = measure_isotropic_intensity(positions, img, channels=channels,
669
- ... intensity_measurement_radii=intensity_measurement_radii)
669
+ ... intensity_measurement_radii=intensity_measurement_radii)
670
670
  # Measure isotropic intensity values around cell positions in the image.
671
671
 
672
672
  """
@@ -813,45 +813,7 @@ def measure_at_position(pos, mode, return_measurements=False, threads=1):
813
813
 
814
814
 
815
815
  def local_normalisation(image, labels, background_intensity, measurement='intensity_median', operation='subtract', clip=False):
816
- """
817
- Perform local normalization on an image based on labels.
818
-
819
- Parameters:
820
- - image (numpy.ndarray): The input image.
821
- - labels (numpy.ndarray): An array specifying the labels for different regions in the image.
822
- - background_intensity (pandas.DataFrame): A DataFrame containing background intensity values
823
- corresponding to each label.
824
- - mode (str): The normalization mode ('Mean' or 'Median').
825
- - operation (str): The operation to perform ('Subtract' or 'Divide').
826
-
827
- Returns:
828
- - numpy.ndarray: The normalized image.
829
-
830
- This function performs local normalization on an image based on the provided labels. It iterates over
831
- each unique label, excluding the background label (0), and performs the specified operation with the
832
- background intensity values corresponding to that label. The background intensity values are obtained
833
- from the provided background_intensity DataFrame based on the normalization mode.
834
-
835
- If the operation is 'Subtract', the background intensity is subtracted from the image pixel values.
836
- If the operation is 'Divide', the image pixel values are divided by the background intensity.
837
-
838
- Example:
839
- >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
840
- >>> labels = np.array([[0, 1, 1], [2, 2, 3], [3, 3, 0]])
841
- >>> background_intensity = pd.DataFrame({'intensity_mean': [10, 20, 30]})
842
- >>> mode = 'Mean'
843
- >>> operation = 'Subtract'
844
- >>> result = local_normalisation(image, labels, background_intensity, mode, operation)
845
- >>> print(result)
846
- [[-9. -8. -7.]
847
- [14. 15. 6.]
848
- [27. 28. 9.]]
849
-
850
- Note:
851
- - The background intensity DataFrame should have columns named 'intensity_mean' or 'intensity_median'
852
- based on the mode specified.
853
- - The background intensity values should be provided in the same order as the labels.
854
- """
816
+
855
817
 
856
818
  for index, cell in enumerate(np.unique(labels)):
857
819
  if cell == 0:
@@ -869,42 +831,8 @@ def local_normalisation(image, labels, background_intensity, measurement='intens
869
831
 
870
832
 
871
833
  def normalise_by_cell(image, labels, distance=5, model='median', operation='subtract', clip=False):
872
- """
873
- Normalize an image based on cell regions.
874
-
875
- Parameters:
876
- - image (numpy.ndarray): The input image.
877
- - labels (numpy.ndarray): An array specifying the labels for different regions in the image.
878
- - distance (float): The distance parameter for finding the contour of cell regions.
879
- - mode (str): The normalization mode ('Mean' or 'Median').
880
- - operation (str): The operation to perform ('Subtract' or 'Divide').
881
-
882
- Returns:
883
- - numpy.ndarray: The normalized image.
884
-
885
- This function normalizes an image based on cell regions defined by the provided labels. It calculates
886
- the border of cell regions using the contour_of_instance_segmentation function with the specified
887
- distance parameter. Then, it computes the background intensity of each cell region based on the mode
888
- ('Mean' or 'Median'). Finally, it performs local normalization using the local_normalisation function
889
- and returns the normalized image.
890
-
891
- Example:
892
- >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
893
- >>> labels = np.array([[0, 1, 1], [2, 2, 3], [3, 3, 0]])
894
- >>> distance = 2.0
895
- >>> mode = 'Mean'
896
- >>> operation = 'Subtract'
897
- >>> result = normalise_by_cell(image, labels, distance, mode, operation)
898
- >>> print(result)
899
- [[-9. -8. -7.]
900
- [14. 15. 6.]
901
- [27. 28. 9.]]
902
-
903
- Note:
904
- - The contour of cell regions is calculated using the contour_of_instance_segmentation function.
905
- - The background intensity is computed based on the specified mode ('Mean' or 'Median').
906
- - The operation determines whether to subtract or divide the background intensity from the image.
907
- """
834
+
835
+
908
836
  border = contour_of_instance_segmentation(label=labels, distance=distance * (-1))
909
837
  if model == 'mean':
910
838
  measurement = 'intensity_nanmean'
@@ -987,70 +915,6 @@ def blob_detection(image, label, diameter, threshold=0., channel_name=None, targ
987
915
  return detections
988
916
 
989
917
 
990
- # def blob_detectionv0(image, label, threshold, diameter):
991
- # """
992
- # Perform blob detection on an image based on labeled regions.
993
-
994
- # Parameters:
995
- # - image (numpy.ndarray): The input image data.
996
- # - label (numpy.ndarray): An array specifying labeled regions in the image.
997
- # - threshold (float): The threshold value for blob detection.
998
- # - diameter (float): The expected diameter of blobs.
999
-
1000
- # Returns:
1001
- # - dict: A dictionary containing information about detected blobs.
1002
-
1003
- # This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
1004
- # and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
1005
- # based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
1006
- # detected blobs and their mean intensity for each labeled region.
1007
-
1008
- # Example:
1009
- # >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1010
- # >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
1011
- # >>> threshold = 0.1
1012
- # >>> diameter = 5.0
1013
- # >>> result = blob_detection(image, label, threshold, diameter)
1014
- # >>> print(result)
1015
- # {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
1016
-
1017
- # Note:
1018
- # - Blobs are detected using the Difference of Gaussians (DoG) method.
1019
- # - Detected blobs are filtered based on the specified threshold and expected diameter.
1020
- # - The returned dictionary contains information about the number of detected blobs and their mean intensity
1021
- # for each labeled region.
1022
- # """
1023
- # blob_labels = {}
1024
- # dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
1025
- # for mask_index in np.unique(label):
1026
- # if mask_index == 0:
1027
- # continue
1028
- # removed_background = image.copy()
1029
- # one_mask = label.copy()
1030
- # one_mask[np.where(label != mask_index)] = 0
1031
- # dilated_copy = dilated_image.copy()
1032
- # dilated_copy[np.where(dilated_image != mask_index)] = 0
1033
- # removed_background[np.where(dilated_copy == 0)] = 0
1034
- # min_sigma = (1 / (1 + math.sqrt(2))) * diameter
1035
- # max_sigma = math.sqrt(2) * min_sigma
1036
- # blobs = blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
1037
- # max_sigma=max_sigma)
1038
-
1039
- # mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
1040
- # if not np.any(mask):
1041
- # continue
1042
- # blobs_filtered = blobs[mask]
1043
- # binary_blobs = np.zeros_like(label)
1044
- # for blob in blobs_filtered:
1045
- # y, x, r = blob
1046
- # rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
1047
- # binary_blobs[rr, cc] = 1
1048
- # spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
1049
- # blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
1050
- # return blob_labels
1051
-
1052
- ### Classification ####
1053
-
1054
918
  def estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=0.5):
1055
919
 
1056
920
  """
@@ -1178,6 +1042,7 @@ def interpret_track_classification(df, class_attr, irreversible_event=False, uni
1178
1042
  Example
1179
1043
  -------
1180
1044
  >>> df = interpret_track_classification(df, 'class', irreversible_event=True, r2_threshold=0.7)
1045
+
1181
1046
  """
1182
1047
 
1183
1048
  cols = list(df.columns)
@@ -1325,6 +1190,7 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1325
1190
  Example
1326
1191
  -------
1327
1192
  >>> df = classify_irreversible_events(df, 'class', r2_threshold=0.7)
1193
+
1328
1194
  """
1329
1195
 
1330
1196
  df = data.copy()
@@ -1426,6 +1292,7 @@ def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1426
1292
  Example
1427
1293
  -------
1428
1294
  >>> df = classify_unique_states(df, 'class', percentile=75)
1295
+
1429
1296
  """
1430
1297
 
1431
1298
  cols = list(df.columns)
@@ -1521,6 +1388,7 @@ def classify_cells_from_query(df, status_attr, query):
1521
1388
  ------
1522
1389
  Exception
1523
1390
  If the query is invalid or if there are issues with the DataFrame or query syntax, an error message is printed, and `None` is returned.
1391
+
1524
1392
  """
1525
1393
 
1526
1394
 
@@ -129,14 +129,7 @@ def measure_pairs(pos, neighborhood_protocol):
129
129
 
130
130
 
131
131
  def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs={'window': 3, 'mode': 'bi'}):
132
- """
133
- pos: position to process
134
- target_classes [list]: target classes to keep
135
- neigh_dist: neighborhood cut distance
136
- theta_dist: distance to edge threshold
137
- velocity_kwargs: params for derivative of relative position
138
- neighborhood_kwargs: params for neigh
139
- """
132
+
140
133
 
141
134
  reference_population = neighborhood_protocol['reference']
142
135
  neighbor_population = neighborhood_protocol['neighbor']
@@ -422,12 +415,13 @@ def timeline_matching(timeline1, timeline2):
422
415
  -------
423
416
  tuple
424
417
  A tuple containing:
425
- - full_timeline : numpy.ndarray
426
- The unified timeline spanning from the minimum to the maximum time point in the input timelines.
427
- - index1 : list of int
428
- The indices of `timeline1` in the `full_timeline`.
429
- - index2 : list of int
430
- The indices of `timeline2` in the `full_timeline`.
418
+
419
+ - full_timeline : numpy.ndarray
420
+ The unified timeline spanning from the minimum to the maximum time point in the input timelines.
421
+ - index1 : list of int
422
+ The indices of `timeline1` in the `full_timeline`.
423
+ - index2 : list of int
424
+ The indices of `timeline2` in the `full_timeline`.
431
425
 
432
426
  Examples
433
427
  --------
@@ -446,6 +440,7 @@ def timeline_matching(timeline1, timeline2):
446
440
  - The function combines the two timelines and generates a continuous range from the minimum to the maximum time point.
447
441
  - It then finds the indices of the original timelines in this unified timeline.
448
442
  - The function assumes that the input timelines consist of integer values.
443
+
449
444
  """
450
445
 
451
446
  min_t = np.amin(np.concatenate((timeline1, timeline2)))
@@ -550,16 +545,17 @@ def extract_neighborhoods_from_pickles(pos):
550
545
  -------
551
546
  list of dict
552
547
  A list of dictionaries, each containing a neighborhood protocol. Each dictionary has the keys:
553
- - 'reference' : str
554
- The reference population ('targets' or 'effectors').
555
- - 'neighbor' : str
556
- The neighbor population.
557
- - 'type' : str
558
- The type of neighborhood ('circle' or 'contact').
559
- - 'distance' : float
560
- The distance parameter for the neighborhood.
561
- - 'description' : str
562
- The original neighborhood string.
548
+
549
+ - 'reference' : str
550
+ The reference population ('targets' or 'effectors').
551
+ - 'neighbor' : str
552
+ The neighbor population.
553
+ - 'type' : str
554
+ The type of neighborhood ('circle' or 'contact').
555
+ - 'distance' : float
556
+ The distance parameter for the neighborhood.
557
+ - 'description' : str
558
+ The original neighborhood string.
563
559
 
564
560
  Notes
565
561
  -----
@@ -572,8 +568,9 @@ def extract_neighborhoods_from_pickles(pos):
572
568
  --------
573
569
  >>> protocols = extract_neighborhoods_from_pickles('/path/to/data')
574
570
  >>> for protocol in protocols:
575
- >>> print(protocol)
571
+ >>> print(protocol)
576
572
  {'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
573
+
577
574
  """
578
575
 
579
576
  tab_tc = pos + os.sep.join(['output', 'tables', 'trajectories_targets.pkl'])
@@ -618,16 +615,17 @@ def extract_neighborhood_settings(neigh_string, population='targets'):
618
615
  -------
619
616
  dict
620
617
  A dictionary containing the neighborhood protocol with keys:
621
- - 'reference' : str
622
- The reference population.
623
- - 'neighbor' : str
624
- The neighbor population.
625
- - 'type' : str
626
- The type of neighborhood ('circle' or 'contact').
627
- - 'distance' : float
628
- The distance parameter for the neighborhood.
629
- - 'description' : str
630
- The original neighborhood string.
618
+
619
+ - 'reference' : str
620
+ The reference population.
621
+ - 'neighbor' : str
622
+ The neighbor population.
623
+ - 'type' : str
624
+ The type of neighborhood ('circle' or 'contact').
625
+ - 'distance' : float
626
+ The distance parameter for the neighborhood.
627
+ - 'description' : str
628
+ The original neighborhood string.
631
629
 
632
630
  Raises
633
631
  ------
@@ -644,6 +642,7 @@ def extract_neighborhood_settings(neigh_string, population='targets'):
644
642
  --------
645
643
  >>> extract_neighborhood_settings('neighborhood_self_contact_5_px', 'targets')
646
644
  {'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
645
+
647
646
  """
648
647
 
649
648
  assert neigh_string.startswith('neighborhood')
@@ -684,23 +683,20 @@ def expand_pair_table(data):
684
683
  Parameters
685
684
  ----------
686
685
  data : pandas.DataFrame
687
- DataFrame containing the pair table, which should include the columns:
688
- - 'reference_population': The reference population type.
689
- - 'neighbor_population': The neighbor population type.
690
- - 'position': The position identifier for each pair.
686
+ DataFrame containing the pair table
691
687
 
692
688
  Returns
693
689
  -------
694
690
  pandas.DataFrame
695
691
  Expanded DataFrame that includes merged reference and neighbor data, sorted by position, reference population,
696
- neighbor population, and frame. Rows without values in 'REFERENCE_ID', 'NEIGHBOR_ID', 'reference_population',
697
- or 'neighbor_population' are dropped.
692
+ neighbor population, and frame. Rows without values in `REFERENCE_ID`, `NEIGHBOR_ID`, `reference_population`,
693
+ or `neighbor_population` are dropped.
698
694
 
699
695
  Notes
700
696
  -----
701
697
  - For each unique pair of `reference_population` and `neighbor_population`, the function identifies corresponding
702
698
  trajectories CSV files based on the position identifier.
703
- - The function reads the trajectories CSV files, prefixes columns with 'reference_' or 'neighbor_' to avoid
699
+ - The function reads the trajectories CSV files, prefixes columns with `reference_` or `neighbor_` to avoid
704
700
  conflicts, and merges data from reference and neighbor tables based on `TRACK_ID` or `ID`, and `FRAME`.
705
701
  - Merges are performed in an outer join manner to retain all rows, regardless of missing values in the target files.
706
702
  - The final DataFrame is sorted and cleaned to ensure only valid pairings are included.
@@ -713,7 +709,8 @@ def expand_pair_table(data):
713
709
  Raises
714
710
  ------
715
711
  AssertionError
716
- If 'reference_population' or 'neighbor_population' is not found in the columns of `data`.
712
+ If `reference_population` or `neighbor_population` is not found in the columns of `data`.
713
+
717
714
  """
718
715
 
719
716
  assert 'reference_population' in list(data.columns),"Please provide a valid pair table..."