celldetective 1.3.9.post5__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. celldetective/__init__.py +0 -3
  2. celldetective/_version.py +1 -1
  3. celldetective/events.py +2 -4
  4. celldetective/extra_properties.py +132 -0
  5. celldetective/gui/InitWindow.py +33 -45
  6. celldetective/gui/__init__.py +1 -0
  7. celldetective/gui/about.py +19 -15
  8. celldetective/gui/analyze_block.py +34 -19
  9. celldetective/gui/base_components.py +23 -0
  10. celldetective/gui/btrack_options.py +26 -34
  11. celldetective/gui/classifier_widget.py +68 -81
  12. celldetective/gui/configure_new_exp.py +113 -17
  13. celldetective/gui/control_panel.py +68 -141
  14. celldetective/gui/generic_signal_plot.py +9 -12
  15. celldetective/gui/gui_utils.py +49 -21
  16. celldetective/gui/json_readers.py +5 -4
  17. celldetective/gui/layouts.py +246 -22
  18. celldetective/gui/measurement_options.py +32 -17
  19. celldetective/gui/neighborhood_options.py +10 -13
  20. celldetective/gui/plot_measurements.py +21 -17
  21. celldetective/gui/plot_signals_ui.py +125 -72
  22. celldetective/gui/process_block.py +180 -123
  23. celldetective/gui/processes/compute_neighborhood.py +594 -0
  24. celldetective/gui/processes/measure_cells.py +5 -0
  25. celldetective/gui/processes/segment_cells.py +27 -6
  26. celldetective/gui/processes/track_cells.py +6 -0
  27. celldetective/gui/retrain_segmentation_model_options.py +12 -20
  28. celldetective/gui/retrain_signal_model_options.py +57 -56
  29. celldetective/gui/seg_model_loader.py +21 -62
  30. celldetective/gui/signal_annotator.py +129 -70
  31. celldetective/gui/signal_annotator2.py +431 -635
  32. celldetective/gui/signal_annotator_options.py +8 -11
  33. celldetective/gui/survival_ui.py +49 -95
  34. celldetective/gui/tableUI.py +28 -25
  35. celldetective/gui/thresholds_gui.py +617 -1221
  36. celldetective/gui/viewers.py +106 -39
  37. celldetective/gui/workers.py +9 -3
  38. celldetective/io.py +57 -20
  39. celldetective/measure.py +63 -27
  40. celldetective/neighborhood.py +342 -268
  41. celldetective/preprocessing.py +25 -17
  42. celldetective/relative_measurements.py +50 -29
  43. celldetective/scripts/analyze_signals.py +4 -1
  44. celldetective/scripts/measure_relative.py +4 -1
  45. celldetective/scripts/segment_cells.py +0 -6
  46. celldetective/scripts/track_cells.py +3 -1
  47. celldetective/scripts/train_segmentation_model.py +7 -4
  48. celldetective/signals.py +29 -14
  49. celldetective/tracking.py +7 -2
  50. celldetective/utils.py +36 -8
  51. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/METADATA +24 -16
  52. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/RECORD +57 -55
  53. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/WHEEL +1 -1
  54. tests/test_qt.py +21 -21
  55. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/entry_points.txt +0 -0
  56. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info/licenses}/LICENSE +0 -0
  57. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/top_level.txt +0 -0
@@ -14,7 +14,7 @@ from lmfit import Parameters, Model
14
14
  import tifffile.tifffile as tiff
15
15
  from scipy.ndimage import shift
16
16
 
17
- def estimate_background_per_condition(experiment, threshold_on_std=1, well_option='*', target_channel="channel_name", frame_range=[0,5], mode="timeseries", activation_protocol=[['gauss',2],['std',4]], show_progress_per_pos=False, show_progress_per_well=True):
17
+ def estimate_background_per_condition(experiment, threshold_on_std=1, well_option='*', target_channel="channel_name", frame_range=[0,5], mode="timeseries", activation_protocol=[['gauss',2],['std',4]], show_progress_per_pos=False, show_progress_per_well=True, offset=None):
18
18
 
19
19
  """
20
20
  Estimate the background for each condition in an experiment.
@@ -149,6 +149,9 @@ def estimate_background_per_condition(experiment, threshold_on_std=1, well_optio
149
149
 
150
150
  try:
151
151
  background = np.nanmedian(frame_mean_per_position,axis=0)
152
+ if offset is not None:
153
+ #print("The offset is applied to background...")
154
+ background -= offset
152
155
  backgrounds.append({"bg": background, "well": well_path})
153
156
  print(f"Background successfully computed for well {well_name}...")
154
157
  except Exception as e:
@@ -170,6 +173,7 @@ def correct_background_model_free(
170
173
  opt_coef_nbr = 100,
171
174
  operation = 'divide',
172
175
  clip = False,
176
+ offset = None,
173
177
  show_progress_per_well = True,
174
178
  show_progress_per_pos = False,
175
179
  export = False,
@@ -259,7 +263,7 @@ def correct_background_model_free(
259
263
  well_name, _ = extract_well_name_and_number(well_path)
260
264
 
261
265
  try:
262
- background = estimate_background_per_condition(experiment, threshold_on_std=threshold_on_std, well_option=int(well_indices[k]), target_channel=target_channel, frame_range=frame_range, mode=mode, show_progress_per_pos=True, show_progress_per_well=False, activation_protocol=activation_protocol)
266
+ background = estimate_background_per_condition(experiment, threshold_on_std=threshold_on_std, well_option=int(well_indices[k]), target_channel=target_channel, frame_range=frame_range, mode=mode, show_progress_per_pos=True, show_progress_per_well=False, activation_protocol=activation_protocol, offset=offset)
263
267
  background = background[0]
264
268
  background = background['bg']
265
269
  except Exception as e:
@@ -283,18 +287,19 @@ def correct_background_model_free(
283
287
 
284
288
  corrected_stack = apply_background_to_stack(stack_path,
285
289
  background,
286
- target_channel_index=channel_indices[0],
287
- nbr_channels=nbr_channels,
288
- stack_length=len_movie,
289
- threshold_on_std=threshold_on_std,
290
- optimize_option=optimize_option,
291
- opt_coef_range=opt_coef_range,
292
- opt_coef_nbr=opt_coef_nbr,
293
- operation=operation,
294
- clip=clip,
295
- export=export,
296
- activation_protocol=activation_protocol,
297
- prefix=export_prefix,
290
+ target_channel_index = channel_indices[0],
291
+ nbr_channels = nbr_channels,
292
+ stack_length = len_movie,
293
+ threshold_on_std = threshold_on_std,
294
+ optimize_option = optimize_option,
295
+ opt_coef_range = opt_coef_range,
296
+ opt_coef_nbr = opt_coef_nbr,
297
+ operation = operation,
298
+ clip = clip,
299
+ offset = offset,
300
+ export = export,
301
+ activation_protocol = activation_protocol,
302
+ prefix = export_prefix,
298
303
  )
299
304
  print('Correction successful.')
300
305
  if return_stacks:
@@ -310,7 +315,7 @@ def correct_background_model_free(
310
315
 
311
316
 
312
317
 
313
- def apply_background_to_stack(stack_path, background, target_channel_index=0, nbr_channels=1, stack_length=45,activation_protocol=[['gauss',2],['std',4]], threshold_on_std=1, optimize_option=True, opt_coef_range=(0.95,1.05), opt_coef_nbr=100, operation='divide', clip=False, export=False, prefix="Corrected"):
318
+ def apply_background_to_stack(stack_path, background, target_channel_index=0, nbr_channels=1, stack_length=45, offset = None, activation_protocol=[['gauss',2],['std',4]], threshold_on_std=1, optimize_option=True, opt_coef_range=(0.95,1.05), opt_coef_nbr=100, operation='divide', clip=False, export=False, prefix="Corrected"):
314
319
 
315
320
  """
316
321
  Apply background correction to an image stack.
@@ -385,11 +390,14 @@ def apply_background_to_stack(stack_path, background, target_channel_index=0, nb
385
390
 
386
391
  frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
387
392
  target_img = frames[:,:,target_channel_index].copy()
388
-
393
+ if offset is not None:
394
+ #print(f"The offset is applied to image...")
395
+ target_img -= offset
396
+
389
397
  if optimize_option:
390
398
 
391
399
  target_copy = target_img.copy()
392
-
400
+
393
401
  std_frame = filter_image(target_copy.copy(),filters=activation_protocol)
394
402
  edge = estimate_unreliable_edge(activation_protocol)
395
403
  mask = threshold_image(std_frame, threshold_on_std, np.inf, foreground_value=1, edge_exclusion=edge)
@@ -396,7 +396,7 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
396
396
  return df_pairs
397
397
 
398
398
  except KeyError:
399
- print(f"Neighborhood {description} not found in data frame. Measurements for this neighborhood will not be calculated")
399
+ print(f"Neighborhood not found in data frame. Measurements for this neighborhood will not be calculated")
400
400
 
401
401
 
402
402
  def timeline_matching(timeline1, timeline2):
@@ -531,7 +531,7 @@ def update_effector_table(df_relative, df_effector):
531
531
  df_effector.loc[df_effector['ID'] == effector, 'group_neighborhood'] = 0
532
532
  return df_effector
533
533
 
534
- def extract_neighborhoods_from_pickles(pos):
534
+ def extract_neighborhoods_from_pickles(pos, populations=['targets','effectors']):
535
535
 
536
536
  """
537
537
  Extract neighborhood protocols from pickle files located at a given position.
@@ -573,29 +573,40 @@ def extract_neighborhoods_from_pickles(pos):
573
573
 
574
574
  """
575
575
 
576
- tab_tc = pos + os.sep.join(['output', 'tables', 'trajectories_targets.pkl'])
577
- if os.path.exists(tab_tc):
578
- df_targets = np.load(tab_tc, allow_pickle=True)
579
- else:
580
- df_targets = None
581
- if os.path.exists(tab_tc.replace('targets','effectors')):
582
- df_effectors = np.load(tab_tc.replace('targets','effectors'), allow_pickle=True)
583
- else:
584
- df_effectors = None
585
-
586
- neighborhood_protocols=[]
587
-
588
- if df_targets is not None:
589
- for column in list(df_targets.columns):
590
- if column.startswith('neighborhood'):
591
- neigh_protocol = extract_neighborhood_settings(column, population='targets')
592
- neighborhood_protocols.append(neigh_protocol)
593
-
594
- if df_effectors is not None:
595
- for column in list(df_effectors.columns):
596
- if column.startswith('neighborhood'):
597
- neigh_protocol = extract_neighborhood_settings(column, population='effectors')
598
- neighborhood_protocols.append(neigh_protocol)
576
+ neighborhood_protocols = []
577
+
578
+ for pop in populations:
579
+ tab_pop = pos + os.sep.join(['output', 'tables', f'trajectories_{pop}.pkl'])
580
+ if os.path.exists(tab_pop):
581
+ df_pop = np.load(tab_pop, allow_pickle=True)
582
+ for column in list(df_pop.columns):
583
+ if column.startswith('neighborhood'):
584
+ neigh_protocol = extract_neighborhood_settings(column, population=pop)
585
+ neighborhood_protocols.append(neigh_protocol)
586
+
587
+ # tab_tc = pos + os.sep.join(['output', 'tables', 'trajectories_targets.pkl'])
588
+ # if os.path.exists(tab_tc):
589
+ # df_targets = np.load(tab_tc, allow_pickle=True)
590
+ # else:
591
+ # df_targets = None
592
+ # if os.path.exists(tab_tc.replace('targets','effectors')):
593
+ # df_effectors = np.load(tab_tc.replace('targets','effectors'), allow_pickle=True)
594
+ # else:
595
+ # df_effectors = None
596
+
597
+ # neighborhood_protocols=[]
598
+
599
+ # if df_targets is not None:
600
+ # for column in list(df_targets.columns):
601
+ # if column.startswith('neighborhood'):
602
+ # neigh_protocol = extract_neighborhood_settings(column, population='targets')
603
+ # neighborhood_protocols.append(neigh_protocol)
604
+
605
+ # if df_effectors is not None:
606
+ # for column in list(df_effectors.columns):
607
+ # if column.startswith('neighborhood'):
608
+ # neigh_protocol = extract_neighborhood_settings(column, population='effectors')
609
+ # neighborhood_protocols.append(neigh_protocol)
599
610
 
600
611
  return neighborhood_protocols
601
612
 
@@ -646,10 +657,18 @@ def extract_neighborhood_settings(neigh_string, population='targets'):
646
657
  """
647
658
 
648
659
  assert neigh_string.startswith('neighborhood')
649
- if population=='targets':
650
- neighbor_population = 'effectors'
651
- elif population=='effectors':
652
- neighbor_population = 'targets'
660
+ print(f"{neigh_string=}")
661
+
662
+ if '_(' in neigh_string and ')_' in neigh_string:
663
+ # determine neigh pop from string
664
+ neighbor_population = neigh_string.split('_(')[-1].split(')_')[0].split('-')[-1]
665
+ print(f'{neighbor_population=}')
666
+ else:
667
+ # old method
668
+ if population=='targets':
669
+ neighbor_population = 'effectors'
670
+ elif population=='effectors':
671
+ neighbor_population = 'targets'
653
672
 
654
673
  if 'self' in neigh_string:
655
674
 
@@ -716,6 +735,8 @@ def expand_pair_table(data):
716
735
  assert 'reference_population' in list(data.columns),"Please provide a valid pair table..."
717
736
  assert 'neighbor_population' in list(data.columns),"Please provide a valid pair table..."
718
737
 
738
+ data.__dict__.update(data.astype({'reference_population': str, 'neighbor_population': str}).__dict__)
739
+
719
740
  expanded_table = []
720
741
 
721
742
  for neigh, group in data.groupby(['reference_population','neighbor_population']):
@@ -15,7 +15,7 @@ parser = argparse.ArgumentParser(description="Classify and regress the signals b
15
15
  formatter_class=argparse.ArgumentDefaultsHelpFormatter)
16
16
  parser.add_argument('-p',"--position", required=True, help="Path to the position")
17
17
  parser.add_argument('-m',"--model", required=True, help="Path to the model")
18
- parser.add_argument("--mode", default="target", choices=["target","effector","targets","effectors"],help="Cell population of interest")
18
+ parser.add_argument("--mode", default="target", help="Cell population of interest")
19
19
  parser.add_argument("--use_gpu", default="True", choices=["True","False"],help="use GPU")
20
20
 
21
21
  args = parser.parse_args()
@@ -36,6 +36,9 @@ if mode.lower()=="target" or mode.lower()=="targets":
36
36
 
37
37
  elif mode.lower()=="effector" or mode.lower()=="effectors":
38
38
  table_name = "trajectories_effectors.csv"
39
+ else:
40
+ table_name = f"trajectories_{mode}.csv"
41
+
39
42
 
40
43
  # Load trajectories, add centroid if not in trajectory
41
44
  trajectories = pos+os.sep.join(['output','tables', table_name])
@@ -2,6 +2,7 @@ import argparse
2
2
  import os
3
3
  from celldetective.relative_measurements import measure_pair_signals_at_position, extract_neighborhoods_from_pickles
4
4
  from celldetective.utils import ConfigSectionMap, extract_experiment_channels
5
+ from celldetective.io import get_experiment_populations
5
6
 
6
7
  from pathlib import Path, PurePath
7
8
 
@@ -37,6 +38,8 @@ len_movie = float(ConfigSectionMap(config, "MovieSettings")["len_movie"])
37
38
  channel_names, channel_indices = extract_experiment_channels(expfolder)
38
39
  nbr_channels = len(channel_names)
39
40
 
41
+ populations = get_experiment_populations(expfolder, dtype=str)
42
+
40
43
  # from tracking instructions, fetch btrack config, features, haralick, clean_traj, idea: fetch custom timeline?
41
44
  instr_path = PurePath(expfolder, Path(f"{instruction_file}"))
42
45
  previous_pair_table_path = pos + os.sep.join(['output', 'tables', 'trajectories_pairs.csv'])
@@ -46,7 +49,7 @@ previous_neighborhoods = []
46
49
  associated_reference_population = []
47
50
 
48
51
 
49
- neighborhoods_to_measure = extract_neighborhoods_from_pickles(pos)
52
+ neighborhoods_to_measure = extract_neighborhoods_from_pickles(pos, populations=populations)
50
53
  all_df_pairs = []
51
54
  if os.path.exists(previous_pair_table_path):
52
55
  df_0 = pd.read_csv(previous_pair_table_path)
@@ -183,12 +183,6 @@ with concurrent.futures.ThreadPoolExecutor() as executor:
183
183
  print("Exception: ", e)
184
184
 
185
185
  print('Done.')
186
-
187
- try:
188
- del model
189
- except:
190
- pass
191
-
192
186
  gc.collect()
193
187
 
194
188
 
@@ -6,7 +6,9 @@ import argparse
6
6
  import datetime
7
7
  import os
8
8
  import json
9
- from celldetective.io import auto_load_number_of_frames, interpret_tracking_configuration, extract_position_name
9
+ from celldetective.io import _load_frames_to_measure, auto_load_number_of_frames, interpret_tracking_configuration, \
10
+ extract_position_name, \
11
+ locate_labels
10
12
  from celldetective.utils import _mask_intensity_measurements, extract_experiment_channels, ConfigSectionMap, _get_img_num_per_channel, extract_experiment_channels
11
13
  from celldetective.measure import drop_tonal_features, measure_features
12
14
  from celldetective.tracking import track
@@ -124,15 +124,18 @@ if model_type=='cellpose':
124
124
  device = torch.device("cpu")
125
125
  else:
126
126
  print('Using GPU for training...')
127
-
127
+
128
+ diam_mean = 30.0
128
129
  logger, log_file = logger_setup()
129
130
  print(f'Pretrained model: ',pretrained)
130
131
  if pretrained is not None:
132
+ if pretrained.endswith('CP_nuclei'):
133
+ diam_mean = 17.0
131
134
  pretrained_path = os.sep.join([pretrained,os.path.split(pretrained)[-1]])
132
135
  else:
133
136
  pretrained_path = pretrained
134
137
 
135
- model = CellposeModel(gpu=use_gpu, model_type=None, pretrained_model=pretrained_path, diam_mean=30.0, nchan=X_aug[0].shape[0],)
138
+ model = CellposeModel(gpu=use_gpu, model_type=None, pretrained_model=pretrained_path, diam_mean=diam_mean, nchan=X_aug[0].shape[0],)
136
139
  model.train(train_data=X_aug, train_labels=Y_aug, normalize=False, channels=None, batch_size=batch_size,
137
140
  min_train_masks=1,save_path=target_directory+os.sep+model_name,n_epochs=epochs, model_name=model_name, learning_rate=learning_rate, test_data = X_val, test_labels=Y_val)
138
141
 
@@ -152,7 +155,7 @@ if model_type=='cellpose':
152
155
  config_inputs = {"channels": target_channels, "diameter": standard_diameter, 'cellprob_threshold': 0., 'flow_threshold': 0.4,
153
156
  'normalization_percentile': normalization_percentile, 'normalization_clip': normalization_clip,
154
157
  'normalization_values': normalization_values, 'model_type': 'cellpose',
155
- 'spatial_calibration': input_spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
158
+ 'spatial_calibration': input_spatial_calibration, 'cell_size_um': round(diameter*input_spatial_calibration,4), 'dataset': {'train': files_train, 'validation': files_val}}
156
159
  json_input_config = json.dumps(config_inputs, indent=4)
157
160
  with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile:
158
161
  outfile.write(json_input_config)
@@ -227,7 +230,7 @@ elif model_type=='stardist':
227
230
 
228
231
  config_inputs = {"channels": target_channels, 'normalization_percentile': normalization_percentile,
229
232
  'normalization_clip': normalization_clip, 'normalization_values': normalization_values,
230
- 'model_type': 'stardist', 'spatial_calibration': spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
233
+ 'model_type': 'stardist', 'spatial_calibration': spatial_calibration,'cell_size_um': median_size * spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
231
234
 
232
235
  json_input_config = json.dumps(config_inputs, indent=4)
233
236
  with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile:
celldetective/signals.py CHANGED
@@ -156,6 +156,10 @@ def analyze_signals(trajectories, model, interpolate_na=True,
156
156
  f = open(model_config_path)
157
157
  config = json.load(f)
158
158
  required_signals = config["channels"]
159
+ if 'selected_channels' in config:
160
+ selected_signals = config['selected_channels']
161
+ if np.any([s=='None' for s in selected_signals]):
162
+ trajectories['None'] = 0.
159
163
  model_signal_length = config['model_signal_length']
160
164
 
161
165
  try:
@@ -264,6 +268,8 @@ def analyze_signals(trajectories, model, interpolate_na=True,
264
268
  plt.pause(3)
265
269
  plt.close()
266
270
 
271
+ if "None" in list(trajectories.columns):
272
+ trajectories = trajectories.drop(columns=['None'])
267
273
  return trajectories
268
274
 
269
275
  def analyze_signals_at_position(pos, model, mode, use_gpu=True, return_table=False):
@@ -324,7 +330,7 @@ def analyze_signals_at_position(pos, model, mode, use_gpu=True, return_table=Fal
324
330
  else:
325
331
  return None
326
332
 
327
- def analyze_pair_signals_at_position(pos, model, use_gpu=True):
333
+ def analyze_pair_signals_at_position(pos, model, use_gpu=True, populations=['targets','effectors']):
328
334
 
329
335
 
330
336
  pos = pos.replace('\\','/')
@@ -332,13 +338,10 @@ def analyze_pair_signals_at_position(pos, model, use_gpu=True):
332
338
  assert os.path.exists(pos),f'Position {pos} is not a valid path.'
333
339
  if not pos.endswith('/'):
334
340
  pos += '/'
335
-
336
- df_targets = get_position_pickle(pos, population='targets')
337
- df_effectors = get_position_pickle(pos, population='effectors')
338
- dataframes = {
339
- 'targets': df_targets,
340
- 'effectors': df_effectors,
341
- }
341
+
342
+ dataframes = {}
343
+ for pop in populations:
344
+ dataframes.update({pop: get_position_pickle(pos, population=pop)})
342
345
  df_pairs = get_position_table(pos, population='pairs')
343
346
 
344
347
  # Need to identify expected reference / neighbor tables
@@ -354,12 +357,19 @@ def analyze_pair_signals_at_position(pos, model, use_gpu=True):
354
357
  reference_population = model_config_path['reference_population']
355
358
  neighbor_population = model_config_path['neighbor_population']
356
359
 
360
+ if dataframes[reference_population] is None:
361
+ print(f"No tabulated data can be found for the reference population ({reference_population})... Abort...")
362
+ return None
363
+
364
+ if dataframes[neighbor_population] is None:
365
+ print(f"No tabulated data can be found for the neighbor population ({neighbor_population})... Abort...")
366
+ return None
367
+
357
368
  df = analyze_pair_signals(df_pairs, dataframes[reference_population], dataframes[neighbor_population], model=model)
358
-
359
369
  table = pos + os.sep.join(["output","tables",f"trajectories_pairs.csv"])
360
370
  df.to_csv(table, index=False)
361
371
 
362
- return None
372
+ return None
363
373
 
364
374
 
365
375
  def analyze_pair_signals(trajectories_pairs,trajectories_reference,trajectories_neighbors, model, interpolate_na=True, selected_signals=None,
@@ -2823,7 +2833,7 @@ def columnwise_mean(matrix, min_nbr_values = 1, projection='mean'):
2823
2833
  return mean_line, mean_line_std
2824
2834
 
2825
2835
 
2826
- def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], return_matrix=False, forced_max_duration=None, min_nbr_values=2,conflict_mode='mean', projection='mean'):
2836
+ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], return_matrix=False, forced_max_duration=None, min_nbr_values=2,conflict_mode='mean', projection='mean',pairs=False):
2827
2837
 
2828
2838
  """
2829
2839
  Calculate the mean and standard deviation of a specified signal for tracks of a given class in the input DataFrame.
@@ -2878,14 +2888,19 @@ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], retu
2878
2888
  if isinstance(time_col, (int,float)):
2879
2889
  abs_time = True
2880
2890
 
2881
- n_tracks = len(df.groupby(['position','TRACK_ID']))
2891
+ if not pairs:
2892
+ groupby_cols = ['position','TRACK_ID']
2893
+ else:
2894
+ groupby_cols = ['position','REFERENCE_ID','NEIGHBOR_ID']
2895
+
2896
+ n_tracks = len(df.groupby(groupby_cols))
2882
2897
  signal_matrix = np.zeros((n_tracks,int(max_duration)*2 + 1))
2883
2898
  signal_matrix[:,:] = np.nan
2884
2899
 
2885
- df = df.sort_values(by=['position','TRACK_ID','FRAME'])
2900
+ df = df.sort_values(by=groupby_cols+['FRAME'])
2886
2901
 
2887
2902
  trackid=0
2888
- for track,track_group in df.loc[df[class_col].isin(class_value)].groupby(['position','TRACK_ID']):
2903
+ for track,track_group in df.loc[df[class_col].isin(class_value)].groupby(groupby_cols):
2889
2904
  cclass = track_group[class_col].to_numpy()[0]
2890
2905
  if cclass != 0:
2891
2906
  ref_time = 0
celldetective/tracking.py CHANGED
@@ -1002,11 +1002,12 @@ def write_first_detection_class(df, img_shape=None, edge_threshold=20, column_la
1002
1002
  positions_x = track_group[column_labels['x']].values
1003
1003
  positions_y = track_group[column_labels['y']].values
1004
1004
  dt = 1
1005
-
1005
+
1006
1006
  # Initialize
1007
1007
  cclass = 2; t_first = np.nan;
1008
1008
 
1009
1009
  if np.any(detection==detection):
1010
+
1010
1011
  t_first = timeline[detection==detection][0]
1011
1012
  x_first = positions_x[detection==detection][0]; y_first = positions_y[detection==detection][0];
1012
1013
 
@@ -1015,13 +1016,17 @@ def write_first_detection_class(df, img_shape=None, edge_threshold=20, column_la
1015
1016
  edge_test = (x_first < edge_threshold) or (y_first < edge_threshold) or (y_first > (img_shape[0] - edge_threshold)) or (x_first > (img_shape[1] - edge_threshold))
1016
1017
 
1017
1018
  cclass = 0
1018
- if t_first<=0 or edge_test:
1019
+ if t_first<=0:
1019
1020
  t_first = -1
1020
1021
  cclass = 2
1021
1022
  else:
1022
1023
  t_first = float(t_first) - float(dt)
1023
1024
  if t_first==0:
1024
1025
  t_first += 0.01
1026
+
1027
+ if edge_test:
1028
+ cclass = 2
1029
+ # switch to class 2 but keep time/status information
1025
1030
  else:
1026
1031
  t_first = -1
1027
1032
  cclass = 2
celldetective/utils.py CHANGED
@@ -32,6 +32,19 @@ from cliffs_delta import cliffs_delta
32
32
  from stardist.models import StarDist2D
33
33
  from cellpose.models import CellposeModel
34
34
  from pathlib import PosixPath, PurePosixPath, WindowsPath
35
+ from prettytable import PrettyTable
36
+
37
+
38
+ def is_integer_array(arr):
39
+
40
+ # Mask out NaNs
41
+ non_nan_values = arr[arr==arr].flatten()
42
+ test = np.all(np.mod(non_nan_values, 1) == 0)
43
+
44
+ if test:
45
+ return True
46
+ else:
47
+ return False
35
48
 
36
49
  def get_config(experiment):
37
50
 
@@ -370,7 +383,10 @@ def _prep_stardist_model(model_name, path, use_gpu=False, scale=1):
370
383
  model = StarDist2D(None, name=model_name, basedir=path)
371
384
  model.config.use_gpu = use_gpu
372
385
  model.use_gpu = use_gpu
386
+
373
387
  scale_model = scale
388
+
389
+
374
390
  print(f"StarDist model {model_name} successfully loaded...")
375
391
  return model, scale_model
376
392
 
@@ -424,7 +440,10 @@ def _prep_cellpose_model(model_name, path, use_gpu=False, n_channels=2, scale=No
424
440
  else:
425
441
  scale_model = scale * model.diam_mean / model.diam_labels
426
442
 
427
- print(f"Diam mean: {model.diam_mean}; Diam labels: {model.diam_labels}; Final rescaling: {scale_model}...")
443
+ print(f'Cell size in model: {model.diam_mean} pixels...')
444
+ print(f'Cell size in training set: {model.diam_labels} pixels...')
445
+ print(f"Rescaling factor to apply: {scale_model}...")
446
+
428
447
  print(f'Cellpose model {model_name} successfully loaded...')
429
448
  return model, scale_model
430
449
 
@@ -1899,7 +1918,7 @@ def _extract_channel_indices_from_config(config, channels_to_extract):
1899
1918
  c1 = int(ConfigSectionMap(config,"Channels")[c])
1900
1919
  channels.append(c1)
1901
1920
  except Exception as e:
1902
- print(f"Warning... The channel {c} required by the model is not available in your data...")
1921
+ print(f"Warning: The channel {c} required by the model is not available in your data...")
1903
1922
  channels.append(None)
1904
1923
  if np.all([c is None for c in channels]):
1905
1924
  channels = None
@@ -2864,11 +2883,13 @@ def get_zenodo_files(cat=None):
2864
2883
  categories.append(category)
2865
2884
 
2866
2885
  if cat is not None:
2867
- assert cat in [os.sep.join(['models','segmentation_generic']), os.sep.join(['models','segmentation_targets']), os.sep.join(['models','segmentation_effectors']), \
2868
- 'demos', os.sep.join(['datasets','signal_annotations']), os.sep.join(['datasets','segmentation_annotations']), os.sep.join(['models','signal_detection'])]
2869
- categories = np.array(categories)
2870
- all_files_short = np.array(all_files_short)
2871
- return list(all_files_short[np.where(categories==cat)[0]])
2886
+ if cat in [os.sep.join(['models','segmentation_generic']), os.sep.join(['models','segmentation_targets']), os.sep.join(['models','segmentation_effectors']), \
2887
+ 'demos', os.sep.join(['datasets','signal_annotations']), os.sep.join(['datasets','segmentation_annotations']), os.sep.join(['models','signal_detection'])]:
2888
+ categories = np.array(categories)
2889
+ all_files_short = np.array(all_files_short)
2890
+ return list(all_files_short[np.where(categories==cat)[0]])
2891
+ else:
2892
+ return []
2872
2893
  else:
2873
2894
  return all_files_short,categories
2874
2895
 
@@ -3076,4 +3097,11 @@ def test_2samp_generic(data, feature=None, groupby_cols=None, method="ks_2samp",
3076
3097
  pivot.set_index("cdt1",drop=True, inplace=True)
3077
3098
  pivot.index.name = None
3078
3099
 
3079
- return pivot
3100
+ return pivot
3101
+
3102
+ def pretty_table(dct):
3103
+ table = PrettyTable()
3104
+ for c in dct.keys():
3105
+ table.add_column(str(c), [])
3106
+ table.add_row([dct.get(c, "") for c in dct.keys()])
3107
+ print(table)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: celldetective
3
- Version: 1.3.9.post5
3
+ Version: 1.4.0
4
4
  Summary: description
5
5
  Home-page: http://github.com/remyeltorro/celldetective
6
6
  Author: Rémy Torro
@@ -44,12 +44,17 @@ Requires-Dist: h5py
44
44
  Requires-Dist: cliffs_delta
45
45
  Requires-Dist: requests
46
46
  Requires-Dist: trackpy
47
+ Requires-Dist: prettyprint
48
+ Requires-Dist: pandas
49
+ Requires-Dist: matplotlib
50
+ Requires-Dist: prettytable
47
51
  Dynamic: author
48
52
  Dynamic: author-email
49
53
  Dynamic: description
50
54
  Dynamic: description-content-type
51
55
  Dynamic: home-page
52
56
  Dynamic: license
57
+ Dynamic: license-file
53
58
  Dynamic: requires-dist
54
59
  Dynamic: summary
55
60
 
@@ -183,26 +188,29 @@ For more information about how to get started, please check the [documentation](
183
188
  # How to cite?
184
189
 
185
190
  If you use this software in your research, please cite the
186
- [Celldetective](https://www.biorxiv.org/content/10.1101/2024.03.15.585250v3)
187
- paper (currently preprint):
191
+ [Celldetective](https://elifesciences.org/reviewed-preprints/105302)
192
+ paper (currently a reviewed preprint at eLife):
188
193
 
189
194
  ``` raw
190
- @article {Torro2024.03.15.585250,
191
- author = {Torro, R{\'e}my and D{\'\i}az-Bello, Beatriz and Arawi, Dalia El and Dervanova, Ksenija and Ammer, Lorna and Dupuy, Florian and Chames, Patrick and Sengupta, Kheya and Limozin, Laurent},
192
- title = {Celldetective: an AI-enhanced image analysis tool for unraveling dynamic cell interactions},
193
- elocation-id = {2024.03.15.585250},
194
- year = {2024},
195
- doi = {10.1101/2024.03.15.585250},
196
- publisher = {Cold Spring Harbor Laboratory},
197
- abstract = {A current challenge in bioimaging for immunology and immunotherapy research lies in analyzing multimodal and multidimensional data that capture dynamic interactions between diverse cell populations. Here, we introduce Celldetective, an open-source Python-based software designed for high-performance, end-to-end analysis of image-based in vitro immune and immunotherapy assays. Purpose-built for multicondition, 2D multichannel time-lapse microscopy of mixed cell populations, Celldetective is optimized for the needs of immunology assays. The software seamlessly integrates AI-based segmentation, Bayesian tracking, and automated single-cell event detection, all within an intuitive graphical interface that supports interactive visualization, annotation, and training capabilities. We demonstrate its utility with original data on immune effector cell interactions with an activating surface, mediated by bispecific antibodies, and further showcase its potential for analyzing extensive sets of pairwise interactions in antibody-dependent cell cytotoxicity events.Competing Interest StatementThe authors have declared no competing interest.},
198
- URL = {https://www.biorxiv.org/content/early/2024/11/13/2024.03.15.585250},
199
- eprint = {https://www.biorxiv.org/content/early/2024/11/13/2024.03.15.585250.full.pdf},
200
- journal = {bioRxiv}
195
+ @article{torroCelldetectiveAIenhancedImage2025,
196
+ title = {Celldetective: An {{AI-enhanced}} Image Analysis Tool for Unraveling Dynamic Cell Interactions},
197
+ shorttitle = {Celldetective},
198
+ author = {Torro, Rémy and Díaz-Bello, Beatriz and Arawi, Dalia El and Dervanova, Ksenija and Ammer, Lorna and Dupuy, Florian and Chames, Patrick and Sengupta, Kheya and Limozin, Laurent},
199
+ date = {2025-03-10},
200
+ journaltitle = {eLife},
201
+ volume = {14},
202
+ publisher = {eLife Sciences Publications Limited},
203
+ doi = {10.7554/eLife.105302.1},
204
+ url = {https://elifesciences.org/reviewed-preprints/105302},
205
+ urldate = {2025-03-20},
206
+ abstract = {A current challenge in bioimaging for immunology and immunotherapy research lies in analyzing multimodal and multidimensional data that capture dynamic interactions between diverse cell populations. Here, we introduce Celldetective, an open-source Python-based software designed for high-performance, end-to-end analysis of image-based in vitro immune and immunotherapy assays. Purpose-built for multicondition, 2D multichannel time-lapse microscopy of mixed cell populations, Celldetective is optimized for the needs of immunology assays. The software seamlessly integrates AI-based segmentation, Bayesian tracking, and automated single-cell event detection, all within an intuitive graphical interface that supports interactive visualization, annotation, and training capabilities. We demonstrate its utility with original data on immune effector cell interactions with an activating surface, mediated by bispecific antibodies, and further showcase its potential for analyzing extensive sets of pairwise interactions in antibody-dependent cell cytotoxicity events.},
207
+ langid = {english},
208
+ file = {/home/torro/Zotero/storage/VFYBBMQF/Torro et al. - 2025 - Celldetective an AI-enhanced image analysis tool .pdf;/home/torro/Zotero/storage/UGMCKKST/105302.html}
201
209
  }
202
210
  ```
203
211
 
204
212
  Make sure you to cite the papers of any segmentation model (StarDist,
205
- Cellpose) or tracker (bTrack) you used through Celldetective.
213
+ Cellpose) or tracker (bTrack, TrackPy) you used through Celldetective.
206
214
 
207
215
  # Bibliography
208
216