celldetective 1.3.0.post1__py3-none-any.whl → 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. celldetective/_version.py +1 -1
  2. celldetective/events.py +88 -11
  3. celldetective/extra_properties.py +5 -1
  4. celldetective/gui/InitWindow.py +35 -9
  5. celldetective/gui/classifier_widget.py +99 -23
  6. celldetective/gui/control_panel.py +7 -1
  7. celldetective/gui/generic_signal_plot.py +161 -2
  8. celldetective/gui/gui_utils.py +90 -1
  9. celldetective/gui/layouts.py +128 -7
  10. celldetective/gui/measurement_options.py +3 -3
  11. celldetective/gui/plot_signals_ui.py +8 -3
  12. celldetective/gui/process_block.py +77 -32
  13. celldetective/gui/retrain_segmentation_model_options.py +24 -10
  14. celldetective/gui/signal_annotator.py +53 -26
  15. celldetective/gui/signal_annotator2.py +17 -30
  16. celldetective/gui/survival_ui.py +24 -3
  17. celldetective/gui/tableUI.py +300 -183
  18. celldetective/gui/viewers.py +263 -3
  19. celldetective/io.py +56 -3
  20. celldetective/links/zenodo.json +136 -123
  21. celldetective/measure.py +3 -0
  22. celldetective/models/tracking_configs/biased_motion.json +68 -0
  23. celldetective/models/tracking_configs/no_z_motion.json +202 -0
  24. celldetective/neighborhood.py +154 -69
  25. celldetective/preprocessing.py +172 -3
  26. celldetective/relative_measurements.py +128 -4
  27. celldetective/scripts/measure_cells.py +3 -3
  28. celldetective/signals.py +212 -215
  29. celldetective/tracking.py +7 -3
  30. celldetective/utils.py +22 -6
  31. {celldetective-1.3.0.post1.dist-info → celldetective-1.3.2.dist-info}/METADATA +3 -3
  32. {celldetective-1.3.0.post1.dist-info → celldetective-1.3.2.dist-info}/RECORD +36 -34
  33. {celldetective-1.3.0.post1.dist-info → celldetective-1.3.2.dist-info}/WHEEL +1 -1
  34. {celldetective-1.3.0.post1.dist-info → celldetective-1.3.2.dist-info}/LICENSE +0 -0
  35. {celldetective-1.3.0.post1.dist-info → celldetective-1.3.2.dist-info}/entry_points.txt +0 -0
  36. {celldetective-1.3.0.post1.dist-info → celldetective-1.3.2.dist-info}/top_level.txt +0 -0
@@ -6,12 +6,13 @@ from tqdm import tqdm
6
6
  import numpy as np
7
7
  import os
8
8
  from celldetective.io import get_config, get_experiment_wells, interpret_wells_and_positions, extract_well_name_and_number, get_positions_in_well, extract_position_name, get_position_movie_path, load_frames, auto_load_number_of_frames
9
- from celldetective.utils import estimate_unreliable_edge, unpad, ConfigSectionMap, _extract_channel_indices_from_config, _extract_nbr_channels_from_config, _get_img_num_per_channel
9
+ from celldetective.utils import interpolate_nan, estimate_unreliable_edge, unpad, ConfigSectionMap, _extract_channel_indices_from_config, _extract_nbr_channels_from_config, _get_img_num_per_channel
10
10
  from celldetective.segmentation import filter_image, threshold_image
11
11
  from csbdeep.io import save_tiff_imagej_compatible
12
12
  from gc import collect
13
13
  from lmfit import Parameters, Model
14
14
  import tifffile.tifffile as tiff
15
+ from scipy.ndimage import shift
15
16
 
16
17
  def estimate_background_per_condition(experiment, threshold_on_std=1, well_option='*', target_channel="channel_name", frame_range=[0,5], mode="timeseries", activation_protocol=[['gauss',2],['std',4]], show_progress_per_pos=False, show_progress_per_well=True):
17
18
 
@@ -879,7 +880,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
879
880
 
880
881
  stack_length_auto = auto_load_number_of_frames(stack_path)
881
882
  if stack_length_auto is None and stack_length is None:
882
- print('stack length not provided')
883
+ print('Stack length not provided...')
883
884
  return None
884
885
  if stack_length_auto is not None:
885
886
  stack_length = stack_length_auto
@@ -899,6 +900,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
899
900
 
900
901
  frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
901
902
  target_img = frames[:,:,target_channel_index].copy()
903
+
902
904
  correction = field_correction(target_img, threshold_on_std=threshold_on_std, operation=operation, model=model, clip=clip, activation_protocol=activation_protocol)
903
905
  frames[:,:,target_channel_index] = correction.copy()
904
906
 
@@ -919,6 +921,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
919
921
 
920
922
  frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
921
923
  target_img = frames[:,:,target_channel_index].copy()
924
+
922
925
  correction = field_correction(target_img, threshold_on_std=threshold_on_std, operation=operation, model=model, clip=clip, activation_protocol=activation_protocol)
923
926
  frames[:,:,target_channel_index] = correction.copy()
924
927
 
@@ -981,6 +984,8 @@ def field_correction(img, threshold_on_std=1, operation='divide', model='parabol
981
984
  """
982
985
 
983
986
  target_copy = img.copy().astype(float)
987
+ if np.percentile(target_copy.flatten(),99.9)==0.0:
988
+ return target_copy
984
989
 
985
990
  std_frame = filter_image(target_copy,filters=activation_protocol)
986
991
  edge = estimate_unreliable_edge(activation_protocol)
@@ -1050,4 +1055,168 @@ def fit_background_model(img, cell_masks=None, model='paraboloid', edge_exclusio
1050
1055
  if bg is not None:
1051
1056
  bg = np.array(bg)
1052
1057
 
1053
- return bg
1058
+ return bg
1059
+
1060
+
1061
+ def correct_channel_offset(
1062
+ experiment,
1063
+ well_option='*',
1064
+ position_option='*',
1065
+ target_channel="channel_name",
1066
+ correction_horizontal = 0,
1067
+ correction_vertical = 0,
1068
+ show_progress_per_well = True,
1069
+ show_progress_per_pos = False,
1070
+ export = False,
1071
+ return_stacks = False,
1072
+ movie_prefix=None,
1073
+ export_prefix='Corrected',
1074
+ return_stack = True,
1075
+ **kwargs,
1076
+ ):
1077
+
1078
+
1079
+ config = get_config(experiment)
1080
+ wells = get_experiment_wells(experiment)
1081
+ len_movie = float(ConfigSectionMap(config,"MovieSettings")["len_movie"])
1082
+ if movie_prefix is None:
1083
+ movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
1084
+
1085
+ well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
1086
+ channel_indices = _extract_channel_indices_from_config(config, [target_channel])
1087
+ nbr_channels = _extract_nbr_channels_from_config(config)
1088
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
1089
+
1090
+ stacks = []
1091
+
1092
+ for k, well_path in enumerate(tqdm(wells[well_indices], disable=not show_progress_per_well)):
1093
+
1094
+ well_name, _ = extract_well_name_and_number(well_path)
1095
+ positions = get_positions_in_well(well_path)
1096
+ selection = positions[position_indices]
1097
+ if isinstance(selection[0],np.ndarray):
1098
+ selection = selection[0]
1099
+
1100
+ for pidx,pos_path in enumerate(tqdm(selection, disable=not show_progress_per_pos)):
1101
+
1102
+ stack_path = get_position_movie_path(pos_path, prefix=movie_prefix)
1103
+ print(f'Applying the correction to position {extract_position_name(pos_path)}...')
1104
+ len_movie_auto = auto_load_number_of_frames(stack_path)
1105
+ if len_movie_auto is not None:
1106
+ len_movie = len_movie_auto
1107
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
1108
+
1109
+ corrected_stack = correct_channel_offset_single_stack(stack_path,
1110
+ target_channel_index=channel_indices[0],
1111
+ nbr_channels=nbr_channels,
1112
+ stack_length=len_movie,
1113
+ correction_vertical=correction_vertical,
1114
+ correction_horizontal=correction_horizontal,
1115
+ export=export,
1116
+ prefix=export_prefix,
1117
+ return_stacks = return_stacks,
1118
+ )
1119
+ print('Correction successful.')
1120
+ if return_stacks:
1121
+ stacks.append(corrected_stack)
1122
+ else:
1123
+ del corrected_stack
1124
+ collect()
1125
+
1126
+ if return_stacks:
1127
+ return stacks
1128
+
1129
+
1130
+ def correct_channel_offset_single_stack(stack_path,
1131
+ target_channel_index=0,
1132
+ nbr_channels=1,
1133
+ stack_length=45,
1134
+ correction_vertical=0,
1135
+ correction_horizontal=0,
1136
+ export=False,
1137
+ prefix="Corrected",
1138
+ return_stacks=True,
1139
+ ):
1140
+
1141
+ assert os.path.exists(stack_path),f"The stack {stack_path} does not exist... Abort."
1142
+
1143
+ stack_length_auto = auto_load_number_of_frames(stack_path)
1144
+ if stack_length_auto is None and stack_length is None:
1145
+ print('Stack length not provided...')
1146
+ return None
1147
+ if stack_length_auto is not None:
1148
+ stack_length = stack_length_auto
1149
+
1150
+ corrected_stack = []
1151
+
1152
+ if export:
1153
+ path,file = os.path.split(stack_path)
1154
+ if prefix is None:
1155
+ newfile = 'temp_'+file
1156
+ else:
1157
+ newfile = '_'.join([prefix,file])
1158
+
1159
+ with tiff.TiffWriter(os.sep.join([path,newfile]),imagej=True) as tif:
1160
+
1161
+ for i in tqdm(range(0,int(stack_length*nbr_channels),nbr_channels)):
1162
+
1163
+ frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
1164
+ target_img = frames[:,:,target_channel_index].copy()
1165
+
1166
+ if np.percentile(target_img.flatten(), 99.9)==0.0:
1167
+ correction = target_img
1168
+ elif np.any(target_img.flatten()!=target_img.flatten()):
1169
+ # Routine to interpolate NaN for the spline filter then mask it again
1170
+ target_interp = interpolate_nan(target_img)
1171
+ correction = shift(target_interp, [correction_vertical, correction_horizontal])
1172
+ correction_nan = shift(target_img, [correction_vertical, correction_horizontal], prefilter=False)
1173
+ nan_i, nan_j = np.where(correction_nan!=correction_nan)
1174
+ correction[nan_i, nan_j] = np.nan
1175
+ else:
1176
+ correction = shift(target_img, [correction_vertical, correction_horizontal])
1177
+
1178
+ frames[:,:,target_channel_index] = correction.copy()
1179
+
1180
+ if return_stacks:
1181
+ corrected_stack.append(frames)
1182
+
1183
+ if export:
1184
+ tif.write(np.moveaxis(frames,-1,0).astype(np.dtype('f')), contiguous=True)
1185
+ del frames
1186
+ del target_img
1187
+ del correction
1188
+ collect()
1189
+
1190
+ if prefix is None:
1191
+ os.replace(os.sep.join([path,newfile]), os.sep.join([path,file]))
1192
+ else:
1193
+ for i in tqdm(range(0,int(stack_length*nbr_channels),nbr_channels)):
1194
+
1195
+ frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
1196
+ target_img = frames[:,:,target_channel_index].copy()
1197
+
1198
+ if np.percentile(target_img.flatten(), 99.9)==0.0:
1199
+ correction = target_img
1200
+ elif np.any(target_img.flatten()!=target_img.flatten()):
1201
+ # Routine to interpolate NaN for the spline filter then mask it again
1202
+ target_interp = interpolate_nan(target_img)
1203
+ correction = shift(target_interp, [correction_vertical, correction_horizontal])
1204
+ correction_nan = shift(target_img, [correction_vertical, correction_horizontal], prefilter=False)
1205
+ nan_i, nan_j = np.where(correction_nan!=correction_nan)
1206
+ correction[nan_i, nan_j] = np.nan
1207
+ else:
1208
+ correction = shift(target_img, [correction_vertical, correction_horizontal])
1209
+
1210
+ frames[:,:,target_channel_index] = correction.copy()
1211
+
1212
+ corrected_stack.append(frames)
1213
+
1214
+ del frames
1215
+ del target_img
1216
+ del correction
1217
+ collect()
1218
+
1219
+ if return_stacks:
1220
+ return np.array(corrected_stack)
1221
+ else:
1222
+ return None
@@ -90,6 +90,10 @@ def measure_pairs(pos, neighborhood_protocol):
90
90
  cosine_dot_vector[:] = np.nan
91
91
 
92
92
  coords_neighbor = group_neigh[['POSITION_X', 'POSITION_Y']].to_numpy()[0]
93
+ intersection = np.nan
94
+ if 'intersection' in list(group_neigh.columns):
95
+ intersection = group_neigh['intersection'].values[0]
96
+
93
97
  neighbor_vector[0] = coords_neighbor[0] - coords_reference[0]
94
98
  neighbor_vector[1] = coords_neighbor[1] - coords_reference[1]
95
99
 
@@ -109,7 +113,7 @@ def measure_pairs(pos, neighborhood_protocol):
109
113
  {'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
110
114
  'reference_population': reference_population,
111
115
  'neighbor_population': neighbor_population,
112
- 'FRAME': t, 'distance': relative_distance,
116
+ 'FRAME': t, 'distance': relative_distance, 'intersection': intersection,
113
117
  'angle': angle * 180 / np.pi,
114
118
  f'status_{neighborhood_description}': 1,
115
119
  f'class_{neighborhood_description}': 0,
@@ -201,9 +205,14 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
201
205
  neighbor_dicts = group.loc[: , f'{neighborhood_description}'].values
202
206
  timeline_reference = group['FRAME'].to_numpy()
203
207
  coords_reference = group[['POSITION_X', 'POSITION_Y']].to_numpy()
208
+ if "area" in list(group.columns):
209
+ ref_area = group['area'].to_numpy()
210
+ else:
211
+ ref_area = [np.nan]*len(coords_reference)
204
212
 
205
213
  neighbor_ids = []
206
214
  neighbor_ids_per_t = []
215
+ intersection_values = []
207
216
 
208
217
  time_of_first_entrance_in_neighborhood = {}
209
218
  t_departure={}
@@ -218,10 +227,16 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
218
227
  for neigh in neighbors_at_t:
219
228
  if neigh['id'] not in neighbor_ids:
220
229
  time_of_first_entrance_in_neighborhood[neigh['id']]=t
230
+ if 'intersection' in neigh:
231
+ intersection_values.append({"frame": t, "neigh_id": neigh['id'], "intersection": neigh['intersection']})
232
+ else:
233
+ intersection_values.append({"frame": t, "neigh_id": neigh['id'], "intersection": np.nan})
221
234
  neighbor_ids.append(neigh['id'])
222
235
  neighs_t.append(neigh['id'])
223
236
  neighbor_ids_per_t.append(neighs_t)
224
237
 
238
+ intersection_values = pd.DataFrame(intersection_values)
239
+
225
240
  #print(neighbor_ids_per_t)
226
241
  unique_neigh = list(np.unique(neighbor_ids))
227
242
  print(f'Reference cell {tid}: found {len(unique_neigh)} neighbour cells: {unique_neigh}...')
@@ -232,6 +247,11 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
232
247
 
233
248
  coords_neighbor = group_neigh[['POSITION_X', 'POSITION_Y']].to_numpy()
234
249
  timeline_neighbor = group_neigh['FRAME'].to_numpy()
250
+ if "area" in list(group_neigh.columns):
251
+ neigh_area = group_neigh['area'].to_numpy()
252
+ else:
253
+ neigh_area = [np.nan]*len(timeline_neighbor)
254
+
235
255
 
236
256
  # # Perform timeline matching to have same start-end points and no gaps
237
257
  full_timeline, _, _ = timeline_matching(timeline_reference, timeline_neighbor)
@@ -239,6 +259,9 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
239
259
  neighbor_vector = np.zeros((len(full_timeline), 2))
240
260
  neighbor_vector[:,:] = np.nan
241
261
 
262
+ intersection_vector = np.zeros((len(full_timeline)))
263
+ intersection_vector[:] = np.nan
264
+
242
265
  centre_of_mass_columns = [(c,c.replace('POSITION_X','POSITION_Y')) for c in list(neighbor_properties.columns) if c.endswith('centre_of_mass_POSITION_X')]
243
266
  centre_of_mass_labels = [c.replace('_centre_of_mass_POSITION_X','') for c in list(neighbor_properties.columns) if c.endswith('centre_of_mass_POSITION_X')]
244
267
 
@@ -319,7 +342,20 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
319
342
 
320
343
  if t in timeline_reference: # meaning position exists on both sides
321
344
 
322
- idx_reference = list(timeline_reference).index(t)
345
+ idx_reference = list(timeline_reference).index(t)
346
+ inter = intersection_values.loc[(intersection_values['neigh_id']==nc)&(intersection_values["frame"]==t),"intersection"].values
347
+ if len(inter)==0:
348
+ inter = np.nan
349
+ else:
350
+ inter = inter[0]
351
+
352
+ neigh_inter_fraction = np.nan
353
+ if inter==inter and neigh_area[t]==neigh_area[t]:
354
+ neigh_inter_fraction = inter / neigh_area[t]
355
+
356
+ ref_inter_fraction = np.nan
357
+ if inter==inter and ref_area[t]==ref_area[t]:
358
+ ref_inter_fraction = inter / ref_area[t]
323
359
 
324
360
  if nc in neighbor_ids_per_t[idx_reference]:
325
361
 
@@ -328,7 +364,7 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
328
364
  {'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
329
365
  'reference_population': reference_population,
330
366
  'neighbor_population': neighbor_population,
331
- 'FRAME': t, 'distance': relative_distance[t],
367
+ 'FRAME': t, 'distance': relative_distance[t], 'intersection': inter, 'reference_frac_area_intersection': ref_inter_fraction, 'neighbor_frac_area_intersection': neigh_inter_fraction,
332
368
  'velocity': rel_velocity[t],
333
369
  'velocity_smooth': rel_velocity_long_timescale[t],
334
370
  'angle': angle[t] * 180 / np.pi,
@@ -349,7 +385,7 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
349
385
  {'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
350
386
  'reference_population': reference_population,
351
387
  'neighbor_population': neighbor_population,
352
- 'FRAME': t, 'distance': relative_distance[t],
388
+ 'FRAME': t, 'distance': relative_distance[t], 'intersection': inter, 'reference_frac_area_intersection': ref_inter_fraction, 'neighbor_frac_area_intersection': neigh_inter_fraction,
353
389
  'velocity': rel_velocity[t],
354
390
  'velocity_smooth': rel_velocity_long_timescale[t],
355
391
  'angle': angle[t] * 180 / np.pi,
@@ -642,4 +678,92 @@ def extract_neighborhood_settings(neigh_string, population='targets'):
642
678
  return neigh_protocol
643
679
 
644
680
 
681
+ def expand_pair_table(data):
682
+
683
+ """
684
+ Expands a pair table by merging reference and neighbor trajectory data from CSV files based on the specified
685
+ reference and neighbor populations, and their associated positions and frames.
686
+
687
+ Parameters
688
+ ----------
689
+ data : pandas.DataFrame
690
+ DataFrame containing the pair table, which should include the columns:
691
+ - 'reference_population': The reference population type.
692
+ - 'neighbor_population': The neighbor population type.
693
+ - 'position': The position identifier for each pair.
694
+
695
+ Returns
696
+ -------
697
+ pandas.DataFrame
698
+ Expanded DataFrame that includes merged reference and neighbor data, sorted by position, reference population,
699
+ neighbor population, and frame. Rows without values in 'REFERENCE_ID', 'NEIGHBOR_ID', 'reference_population',
700
+ or 'neighbor_population' are dropped.
701
+
702
+ Notes
703
+ -----
704
+ - For each unique pair of `reference_population` and `neighbor_population`, the function identifies corresponding
705
+ trajectories CSV files based on the position identifier.
706
+ - The function reads the trajectories CSV files, prefixes columns with 'reference_' or 'neighbor_' to avoid
707
+ conflicts, and merges data from reference and neighbor tables based on `TRACK_ID` or `ID`, and `FRAME`.
708
+ - Merges are performed in an outer join manner to retain all rows, regardless of missing values in the target files.
709
+ - The final DataFrame is sorted and cleaned to ensure only valid pairings are included.
710
+
711
+ Example
712
+ -------
713
+ >>> expanded_df = expand_pair_table(pair_table)
714
+ >>> expanded_df.head()
715
+
716
+ Raises
717
+ ------
718
+ AssertionError
719
+ If 'reference_population' or 'neighbor_population' is not found in the columns of `data`.
720
+ """
721
+
722
+ assert 'reference_population' in list(data.columns),"Please provide a valid pair table..."
723
+ assert 'neighbor_population' in list(data.columns),"Please provide a valid pair table..."
724
+
725
+ expanded_table = []
726
+
727
+ for neigh, group in data.groupby(['reference_population','neighbor_population']):
728
+
729
+ ref_pop = neigh[0]; neigh_pop = neigh[1];
730
+
731
+ for pos,pos_group in group.groupby('position'):
732
+
733
+ ref_tab = os.sep.join([pos,'output','tables',f'trajectories_{ref_pop}.csv'])
734
+ neigh_tab = os.sep.join([pos,'output','tables',f'trajectories_{neigh_pop}.csv'])
735
+
736
+ if os.path.exists(ref_tab):
737
+ df_ref = pd.read_csv(ref_tab)
738
+ if 'TRACK_ID' in df_ref.columns:
739
+ if not np.all(df_ref['TRACK_ID'].isnull()):
740
+ ref_merge_cols = ['TRACK_ID','FRAME']
741
+ else:
742
+ ref_merge_cols = ['ID','FRAME']
743
+ else:
744
+ ref_merge_cols = ['ID','FRAME']
745
+
746
+ if os.path.exists(neigh_tab):
747
+ df_neigh = pd.read_csv(neigh_tab)
748
+ if 'TRACK_ID' in df_neigh.columns:
749
+ if not np.all(df_neigh['TRACK_ID'].isnull()):
750
+ neigh_merge_cols = ['TRACK_ID','FRAME']
751
+ else:
752
+ neigh_merge_cols = ['ID','FRAME']
753
+ else:
754
+ neigh_merge_cols = ['ID','FRAME']
755
+
756
+ df_ref = df_ref.add_prefix('reference_',axis=1)
757
+ df_neigh = df_neigh.add_prefix('neighbor_',axis=1)
758
+ ref_merge_cols = ['reference_'+c for c in ref_merge_cols]
759
+ neigh_merge_cols = ['neighbor_'+c for c in neigh_merge_cols]
760
+
761
+ merge_ref = pos_group.merge(df_ref, how='outer', left_on=['REFERENCE_ID','FRAME'], right_on=ref_merge_cols, suffixes=('', '_reference'))
762
+ merge_neigh = merge_ref.merge(df_neigh, how='outer', left_on=['NEIGHBOR_ID','FRAME'], right_on=neigh_merge_cols, suffixes=('_reference', '_neighbor'))
763
+ expanded_table.append(merge_neigh)
764
+
765
+ df_expanded = pd.concat(expanded_table, axis=0, ignore_index = True)
766
+ df_expanded = df_expanded.sort_values(by=['position', 'reference_population','neighbor_population','REFERENCE_ID','NEIGHBOR_ID','FRAME'])
767
+ df_expanded = df_expanded.dropna(axis=0, subset=['REFERENCE_ID','NEIGHBOR_ID','reference_population','neighbor_population'])
645
768
 
769
+ return df_expanded
@@ -251,14 +251,14 @@ def measure_index(indices):
251
251
  iso_table = measure_isotropic_intensity(positions_at_t, img, channels=channel_names, intensity_measurement_radii=intensity_measurement_radii, column_labels=column_labels, operations=isotropic_operations, verbose=False)
252
252
 
253
253
  if do_iso_intensities and do_features:
254
- measurements_at_t = iso_table.merge(feature_table, how='outer', on='class_id',suffixes=('', '_delme'))
254
+ measurements_at_t = iso_table.merge(feature_table, how='outer', on='class_id',suffixes=('_delme', ''))
255
255
  measurements_at_t = measurements_at_t[[c for c in measurements_at_t.columns if not c.endswith('_delme')]]
256
256
  elif do_iso_intensities * (not do_features):
257
257
  measurements_at_t = iso_table
258
258
  elif do_features:
259
- measurements_at_t = positions_at_t.merge(feature_table, how='outer', on='class_id',suffixes=('', '_delme'))
259
+ measurements_at_t = positions_at_t.merge(feature_table, how='outer', on='class_id',suffixes=('_delme', ''))
260
260
  measurements_at_t = measurements_at_t[[c for c in measurements_at_t.columns if not c.endswith('_delme')]]
261
-
261
+
262
262
  center_of_mass_x_cols = [c for c in list(measurements_at_t.columns) if c.endswith('centre_of_mass_x')]
263
263
  center_of_mass_y_cols = [c for c in list(measurements_at_t.columns) if c.endswith('centre_of_mass_y')]
264
264
  for c in center_of_mass_x_cols: