celldetective 1.3.9.post5__py3-none-any.whl → 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. celldetective/__init__.py +0 -3
  2. celldetective/_version.py +1 -1
  3. celldetective/events.py +2 -4
  4. celldetective/exceptions.py +11 -0
  5. celldetective/extra_properties.py +132 -0
  6. celldetective/filters.py +7 -1
  7. celldetective/gui/InitWindow.py +37 -46
  8. celldetective/gui/__init__.py +3 -9
  9. celldetective/gui/about.py +19 -15
  10. celldetective/gui/analyze_block.py +34 -19
  11. celldetective/gui/base_annotator.py +786 -0
  12. celldetective/gui/base_components.py +23 -0
  13. celldetective/gui/classifier_widget.py +86 -94
  14. celldetective/gui/configure_new_exp.py +163 -46
  15. celldetective/gui/control_panel.py +76 -146
  16. celldetective/gui/{signal_annotator.py → event_annotator.py} +533 -1438
  17. celldetective/gui/generic_signal_plot.py +11 -13
  18. celldetective/gui/gui_utils.py +54 -23
  19. celldetective/gui/help/neighborhood.json +2 -2
  20. celldetective/gui/json_readers.py +5 -4
  21. celldetective/gui/layouts.py +265 -31
  22. celldetective/gui/{signal_annotator2.py → pair_event_annotator.py} +433 -635
  23. celldetective/gui/plot_measurements.py +21 -17
  24. celldetective/gui/plot_signals_ui.py +125 -72
  25. celldetective/gui/process_block.py +283 -188
  26. celldetective/gui/processes/compute_neighborhood.py +594 -0
  27. celldetective/gui/processes/downloader.py +37 -34
  28. celldetective/gui/processes/measure_cells.py +19 -8
  29. celldetective/gui/processes/segment_cells.py +47 -11
  30. celldetective/gui/processes/track_cells.py +18 -13
  31. celldetective/gui/seg_model_loader.py +21 -62
  32. celldetective/gui/settings/__init__.py +7 -0
  33. celldetective/gui/settings/_settings_base.py +70 -0
  34. celldetective/gui/{retrain_signal_model_options.py → settings/_settings_event_model_training.py} +54 -109
  35. celldetective/gui/{measurement_options.py → settings/_settings_measurements.py} +54 -92
  36. celldetective/gui/{neighborhood_options.py → settings/_settings_neighborhood.py} +10 -13
  37. celldetective/gui/settings/_settings_segmentation.py +49 -0
  38. celldetective/gui/{retrain_segmentation_model_options.py → settings/_settings_segmentation_model_training.py} +38 -92
  39. celldetective/gui/{signal_annotator_options.py → settings/_settings_signal_annotator.py} +78 -103
  40. celldetective/gui/{btrack_options.py → settings/_settings_tracking.py} +85 -116
  41. celldetective/gui/styles.py +2 -1
  42. celldetective/gui/survival_ui.py +49 -95
  43. celldetective/gui/tableUI.py +53 -25
  44. celldetective/gui/table_ops/__init__.py +0 -0
  45. celldetective/gui/table_ops/merge_groups.py +118 -0
  46. celldetective/gui/thresholds_gui.py +617 -1221
  47. celldetective/gui/viewers.py +107 -42
  48. celldetective/gui/workers.py +8 -4
  49. celldetective/io.py +137 -57
  50. celldetective/links/zenodo.json +145 -144
  51. celldetective/measure.py +94 -53
  52. celldetective/neighborhood.py +342 -268
  53. celldetective/preprocessing.py +56 -35
  54. celldetective/regionprops/_regionprops.py +16 -5
  55. celldetective/relative_measurements.py +50 -29
  56. celldetective/scripts/analyze_signals.py +4 -1
  57. celldetective/scripts/measure_cells.py +5 -5
  58. celldetective/scripts/measure_relative.py +20 -12
  59. celldetective/scripts/segment_cells.py +4 -10
  60. celldetective/scripts/segment_cells_thresholds.py +3 -3
  61. celldetective/scripts/track_cells.py +10 -8
  62. celldetective/scripts/train_segmentation_model.py +18 -6
  63. celldetective/signals.py +29 -14
  64. celldetective/tracking.py +14 -3
  65. celldetective/utils.py +91 -62
  66. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/METADATA +24 -16
  67. celldetective-1.4.1.dist-info/RECORD +123 -0
  68. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/WHEEL +1 -1
  69. tests/gui/__init__.py +0 -0
  70. tests/gui/test_new_project.py +228 -0
  71. tests/gui/test_project.py +99 -0
  72. tests/test_preprocessing.py +2 -2
  73. celldetective/models/segmentation_effectors/ricm_bf_all_last/config_input.json +0 -79
  74. celldetective/models/segmentation_effectors/ricm_bf_all_last/ricm_bf_all_last +0 -0
  75. celldetective/models/segmentation_effectors/ricm_bf_all_last/training_instructions.json +0 -37
  76. celldetective/models/segmentation_effectors/test-transfer/config_input.json +0 -39
  77. celldetective/models/segmentation_effectors/test-transfer/test-transfer +0 -0
  78. celldetective/models/signal_detection/NucCond/classification_loss.png +0 -0
  79. celldetective/models/signal_detection/NucCond/classifier.h5 +0 -0
  80. celldetective/models/signal_detection/NucCond/config_input.json +0 -1
  81. celldetective/models/signal_detection/NucCond/log_classifier.csv +0 -126
  82. celldetective/models/signal_detection/NucCond/log_regressor.csv +0 -282
  83. celldetective/models/signal_detection/NucCond/regression_loss.png +0 -0
  84. celldetective/models/signal_detection/NucCond/regressor.h5 +0 -0
  85. celldetective/models/signal_detection/NucCond/scores.npy +0 -0
  86. celldetective/models/signal_detection/NucCond/test_confusion_matrix.png +0 -0
  87. celldetective/models/signal_detection/NucCond/test_regression.png +0 -0
  88. celldetective/models/signal_detection/NucCond/validation_confusion_matrix.png +0 -0
  89. celldetective/models/signal_detection/NucCond/validation_regression.png +0 -0
  90. celldetective-1.3.9.post5.dist-info/RECORD +0 -129
  91. tests/test_qt.py +0 -103
  92. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/entry_points.txt +0 -0
  93. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info/licenses}/LICENSE +0 -0
  94. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/top_level.txt +0 -0
celldetective/signals.py CHANGED
@@ -156,6 +156,10 @@ def analyze_signals(trajectories, model, interpolate_na=True,
156
156
  f = open(model_config_path)
157
157
  config = json.load(f)
158
158
  required_signals = config["channels"]
159
+ if 'selected_channels' in config:
160
+ selected_signals = config['selected_channels']
161
+ if np.any([s=='None' for s in selected_signals]):
162
+ trajectories['None'] = 0.
159
163
  model_signal_length = config['model_signal_length']
160
164
 
161
165
  try:
@@ -264,6 +268,8 @@ def analyze_signals(trajectories, model, interpolate_na=True,
264
268
  plt.pause(3)
265
269
  plt.close()
266
270
 
271
+ if "None" in list(trajectories.columns):
272
+ trajectories = trajectories.drop(columns=['None'])
267
273
  return trajectories
268
274
 
269
275
  def analyze_signals_at_position(pos, model, mode, use_gpu=True, return_table=False):
@@ -324,7 +330,7 @@ def analyze_signals_at_position(pos, model, mode, use_gpu=True, return_table=Fal
324
330
  else:
325
331
  return None
326
332
 
327
- def analyze_pair_signals_at_position(pos, model, use_gpu=True):
333
+ def analyze_pair_signals_at_position(pos, model, use_gpu=True, populations=['targets','effectors']):
328
334
 
329
335
 
330
336
  pos = pos.replace('\\','/')
@@ -332,13 +338,10 @@ def analyze_pair_signals_at_position(pos, model, use_gpu=True):
332
338
  assert os.path.exists(pos),f'Position {pos} is not a valid path.'
333
339
  if not pos.endswith('/'):
334
340
  pos += '/'
335
-
336
- df_targets = get_position_pickle(pos, population='targets')
337
- df_effectors = get_position_pickle(pos, population='effectors')
338
- dataframes = {
339
- 'targets': df_targets,
340
- 'effectors': df_effectors,
341
- }
341
+
342
+ dataframes = {}
343
+ for pop in populations:
344
+ dataframes.update({pop: get_position_pickle(pos, population=pop)})
342
345
  df_pairs = get_position_table(pos, population='pairs')
343
346
 
344
347
  # Need to identify expected reference / neighbor tables
@@ -354,12 +357,19 @@ def analyze_pair_signals_at_position(pos, model, use_gpu=True):
354
357
  reference_population = model_config_path['reference_population']
355
358
  neighbor_population = model_config_path['neighbor_population']
356
359
 
360
+ if dataframes[reference_population] is None:
361
+ print(f"No tabulated data can be found for the reference population ({reference_population})... Abort...")
362
+ return None
363
+
364
+ if dataframes[neighbor_population] is None:
365
+ print(f"No tabulated data can be found for the neighbor population ({neighbor_population})... Abort...")
366
+ return None
367
+
357
368
  df = analyze_pair_signals(df_pairs, dataframes[reference_population], dataframes[neighbor_population], model=model)
358
-
359
369
  table = pos + os.sep.join(["output","tables",f"trajectories_pairs.csv"])
360
370
  df.to_csv(table, index=False)
361
371
 
362
- return None
372
+ return None
363
373
 
364
374
 
365
375
  def analyze_pair_signals(trajectories_pairs,trajectories_reference,trajectories_neighbors, model, interpolate_na=True, selected_signals=None,
@@ -2823,7 +2833,7 @@ def columnwise_mean(matrix, min_nbr_values = 1, projection='mean'):
2823
2833
  return mean_line, mean_line_std
2824
2834
 
2825
2835
 
2826
- def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], return_matrix=False, forced_max_duration=None, min_nbr_values=2,conflict_mode='mean', projection='mean'):
2836
+ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], return_matrix=False, forced_max_duration=None, min_nbr_values=2,conflict_mode='mean', projection='mean',pairs=False):
2827
2837
 
2828
2838
  """
2829
2839
  Calculate the mean and standard deviation of a specified signal for tracks of a given class in the input DataFrame.
@@ -2878,14 +2888,19 @@ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], retu
2878
2888
  if isinstance(time_col, (int,float)):
2879
2889
  abs_time = True
2880
2890
 
2881
- n_tracks = len(df.groupby(['position','TRACK_ID']))
2891
+ if not pairs:
2892
+ groupby_cols = ['position','TRACK_ID']
2893
+ else:
2894
+ groupby_cols = ['position','REFERENCE_ID','NEIGHBOR_ID']
2895
+
2896
+ n_tracks = len(df.groupby(groupby_cols))
2882
2897
  signal_matrix = np.zeros((n_tracks,int(max_duration)*2 + 1))
2883
2898
  signal_matrix[:,:] = np.nan
2884
2899
 
2885
- df = df.sort_values(by=['position','TRACK_ID','FRAME'])
2900
+ df = df.sort_values(by=groupby_cols+['FRAME'])
2886
2901
 
2887
2902
  trackid=0
2888
- for track,track_group in df.loc[df[class_col].isin(class_value)].groupby(['position','TRACK_ID']):
2903
+ for track,track_group in df.loc[df[class_col].isin(class_value)].groupby(groupby_cols):
2889
2904
  cclass = track_group[class_col].to_numpy()[0]
2890
2905
  if cclass != 0:
2891
2906
  ref_time = 0
celldetective/tracking.py CHANGED
@@ -189,7 +189,8 @@ def track(labels, configuration=None, stack=None, spatial_calibration=1, feature
189
189
 
190
190
  if clean_trajectories_kwargs is not None:
191
191
  df = clean_trajectories(df.copy(),**clean_trajectories_kwargs)
192
-
192
+
193
+ df.loc[df["status_firstdetection"].isna(), "status_firstdetection"] = 0
193
194
  df['ID'] = np.arange(len(df)).astype(int)
194
195
 
195
196
  invalid_cols = [c for c in list(df.columns) if c.startswith('Unnamed')]
@@ -1003,10 +1004,14 @@ def write_first_detection_class(df, img_shape=None, edge_threshold=20, column_la
1003
1004
  positions_y = track_group[column_labels['y']].values
1004
1005
  dt = 1
1005
1006
 
1007
+ timeline = track_group['FRAME'].to_numpy()
1008
+ status = np.ones_like(timeline)
1009
+
1006
1010
  # Initialize
1007
1011
  cclass = 2; t_first = np.nan;
1008
1012
 
1009
1013
  if np.any(detection==detection):
1014
+
1010
1015
  t_first = timeline[detection==detection][0]
1011
1016
  x_first = positions_x[detection==detection][0]; y_first = positions_y[detection==detection][0];
1012
1017
 
@@ -1015,19 +1020,25 @@ def write_first_detection_class(df, img_shape=None, edge_threshold=20, column_la
1015
1020
  edge_test = (x_first < edge_threshold) or (y_first < edge_threshold) or (y_first > (img_shape[0] - edge_threshold)) or (x_first > (img_shape[1] - edge_threshold))
1016
1021
 
1017
1022
  cclass = 0
1018
- if t_first<=0 or edge_test:
1023
+ if t_first<=0:
1019
1024
  t_first = -1
1020
1025
  cclass = 2
1021
1026
  else:
1022
1027
  t_first = float(t_first) - float(dt)
1023
1028
  if t_first==0:
1024
1029
  t_first += 0.01
1030
+
1031
+ if edge_test:
1032
+ cclass = 2
1033
+ # switch to class 2 but keep time/status information
1025
1034
  else:
1026
1035
  t_first = -1
1027
1036
  cclass = 2
1028
-
1037
+
1038
+ status[timeline < t_first] = 0.
1029
1039
  df.loc[indices, 'class_firstdetection'] = cclass
1030
1040
  df.loc[indices, 't_firstdetection'] = t_first
1041
+ df.loc[indices, 'status_firstdetection'] = status
1031
1042
 
1032
1043
  return df
1033
1044
 
celldetective/utils.py CHANGED
@@ -1,8 +1,6 @@
1
-
2
1
  import numpy as np
3
2
  import pandas as pd
4
3
  import matplotlib.pyplot as plt
5
- import re
6
4
  import os
7
5
  from scipy.ndimage import shift, zoom
8
6
  os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
@@ -31,9 +29,22 @@ from scipy.stats import ks_2samp
31
29
  from cliffs_delta import cliffs_delta
32
30
  from stardist.models import StarDist2D
33
31
  from cellpose.models import CellposeModel
34
- from pathlib import PosixPath, PurePosixPath, WindowsPath
32
+ from pathlib import PosixPath, PurePath, PurePosixPath, WindowsPath, Path
33
+ from prettytable import PrettyTable
34
+ from typing import List, Dict, Union, Optional
35
+
36
+ def is_integer_array(arr: np.ndarray) -> bool:
37
+
38
+ # Mask out NaNs
39
+ non_nan_values = arr[arr==arr].flatten()
40
+ test = np.all(np.mod(non_nan_values, 1) == 0)
41
+
42
+ if test:
43
+ return True
44
+ else:
45
+ return False
35
46
 
36
- def get_config(experiment):
47
+ def get_config(experiment: Union[str,Path]) -> str:
37
48
 
38
49
  """
39
50
  Retrieves the path to the configuration file for a given experiment.
@@ -81,7 +92,7 @@ def get_config(experiment):
81
92
  return config
82
93
 
83
94
 
84
- def _remove_invalid_cols(df):
95
+ def _remove_invalid_cols(df: pd.DataFrame) -> pd.DataFrame:
85
96
 
86
97
  """
87
98
  Removes invalid columns from a DataFrame.
@@ -107,7 +118,7 @@ def _remove_invalid_cols(df):
107
118
  df = df.dropna(axis=1, how='all')
108
119
  return df
109
120
 
110
- def _extract_coordinates_from_features(df, timepoint):
121
+ def _extract_coordinates_from_features(df: pd.DataFrame, timepoint: int) -> pd.DataFrame:
111
122
 
112
123
  """
113
124
  Re-format coordinates from a regionprops table to tracking/measurement table format.
@@ -148,7 +159,7 @@ def _extract_coordinates_from_features(df, timepoint):
148
159
 
149
160
  return coords
150
161
 
151
- def _mask_intensity_measurements(df, mask_channels):
162
+ def _mask_intensity_measurements(df: pd.DataFrame, mask_channels: Optional[List[str]]):
152
163
 
153
164
  """
154
165
  Removes columns from a DataFrame that match specific channel name patterns.
@@ -197,7 +208,7 @@ def _mask_intensity_measurements(df, mask_channels):
197
208
  df = df.drop(cols_to_drop, axis=1)
198
209
  return df
199
210
 
200
- def _rearrange_multichannel_frame(frame, n_channels=None):
211
+ def _rearrange_multichannel_frame(frame: np.ndarray, n_channels: Optional[int] = None) -> np.ndarray:
201
212
 
202
213
  """
203
214
  Rearranges the axes of a multi-channel frame to ensure the channel axis is at the end.
@@ -258,7 +269,7 @@ def _rearrange_multichannel_frame(frame, n_channels=None):
258
269
 
259
270
  return frame
260
271
 
261
- def _fix_no_contrast(frames, value=1):
272
+ def _fix_no_contrast(frames: np.ndarray, value: Union[float,int] = 1):
262
273
 
263
274
  """
264
275
  Ensures that frames with no contrast (i.e., containing only a single unique value) are adjusted.
@@ -297,7 +308,7 @@ def _fix_no_contrast(frames, value=1):
297
308
  frames[0,0,k] += value
298
309
  return frames
299
310
 
300
- def zoom_multiframes(frames, zoom_factor):
311
+ def zoom_multiframes(frames: np.ndarray, zoom_factor: float) -> np.ndarray:
301
312
 
302
313
  """
303
314
  Applies zooming to each frame (channel) in a multi-frame image.
@@ -370,7 +381,10 @@ def _prep_stardist_model(model_name, path, use_gpu=False, scale=1):
370
381
  model = StarDist2D(None, name=model_name, basedir=path)
371
382
  model.config.use_gpu = use_gpu
372
383
  model.use_gpu = use_gpu
384
+
373
385
  scale_model = scale
386
+
387
+
374
388
  print(f"StarDist model {model_name} successfully loaded...")
375
389
  return model, scale_model
376
390
 
@@ -424,7 +438,10 @@ def _prep_cellpose_model(model_name, path, use_gpu=False, n_channels=2, scale=No
424
438
  else:
425
439
  scale_model = scale * model.diam_mean / model.diam_labels
426
440
 
427
- print(f"Diam mean: {model.diam_mean}; Diam labels: {model.diam_labels}; Final rescaling: {scale_model}...")
441
+ print(f'Cell size in model: {model.diam_mean} pixels...')
442
+ print(f'Cell size in training set: {model.diam_labels} pixels...')
443
+ print(f"Rescaling factor to apply: {scale_model}...")
444
+
428
445
  print(f'Cellpose model {model_name} successfully loaded...')
429
446
  return model, scale_model
430
447
 
@@ -1134,27 +1151,24 @@ def demangle_column_name(name):
1134
1151
 
1135
1152
  def extract_cols_from_query(query: str):
1136
1153
 
1137
- # Track variables in a dictionary to be used as a dictionary of globals. From: https://stackoverflow.com/questions/64576913/extract-pandas-dataframe-column-names-from-query-string
1138
-
1139
- variables = {}
1154
+ backtick_pattern = r'`([^`]+)`'
1155
+ backticked = set(re.findall(backtick_pattern, query))
1140
1156
 
1141
- while True:
1142
- try:
1143
- # Try creating a Expr object with the query string and dictionary of globals.
1144
- # This will raise an error as long as the dictionary of globals is incomplete.
1145
- env = pd.core.computation.scope.ensure_scope(level=0, global_dict=variables)
1146
- pd.core.computation.eval.Expr(query, env=env)
1157
+ # 2. Remove backtick sections so they don't get double-counted
1158
+ cleaned_query = re.sub(backtick_pattern, "", query)
1147
1159
 
1148
- # Exit the loop when evaluation is successful.
1149
- break
1150
- except pd.errors.UndefinedVariableError as e:
1151
- # This relies on the format defined here: https://github.com/pandas-dev/pandas/blob/965ceca9fd796940050d6fc817707bba1c4f9bff/pandas/errors/__init__.py#L401
1152
- name = re.findall("name '(.+?)' is not defined", str(e))[0]
1160
+ # 3. Extract bare identifiers from the remaining string
1161
+ identifier_pattern = r'\b([A-Za-z_]\w*)\b'
1162
+ bare = set(re.findall(identifier_pattern, cleaned_query))
1153
1163
 
1154
- # Add the name to the globals dictionary with a dummy value.
1155
- variables[name] = None
1164
+ # 4. Remove Python keywords, operators, and pandas builtins
1165
+ blacklist = set(dir(pd)) | set(dir(__builtins__)) | {
1166
+ "and", "or", "not", "in", "True", "False"
1167
+ }
1168
+ bare = {c for c in bare if c not in blacklist}
1169
+ cols = backticked | bare
1156
1170
 
1157
- return [demangle_column_name(name) for name in variables.keys()]
1171
+ return list([demangle_column_name(c) for c in cols])
1158
1172
 
1159
1173
  def create_patch_mask(h, w, center=None, radius=None):
1160
1174
 
@@ -1261,7 +1275,7 @@ def rename_intensity_column(df, channels):
1261
1275
  sections = np.array(re.split('-|_', intensity_cols[k]))
1262
1276
  test_digit = np.array([False for s in sections])
1263
1277
  for j,s in enumerate(sections):
1264
- if s.isdigit():
1278
+ if str(s).isdigit():
1265
1279
  if int(s)<len(channel_names):
1266
1280
  test_digit[j] = True
1267
1281
 
@@ -1794,8 +1808,8 @@ def _extract_channel_indices(channels, required_channels):
1794
1808
 
1795
1809
  return channel_indices
1796
1810
 
1797
- def ConfigSectionMap(path,section):
1798
-
1811
+ def config_section_to_dict(path: Union[str,PurePath,Path], section: str) -> Union[Dict,None]:
1812
+
1799
1813
  """
1800
1814
  Parse the config file to extract experiment parameters
1801
1815
  following https://wiki.python.org/moin/ConfigParserExamples
@@ -1818,7 +1832,7 @@ def ConfigSectionMap(path,section):
1818
1832
  --------
1819
1833
  >>> config = "path/to/config_file.ini"
1820
1834
  >>> section = "Channels"
1821
- >>> channel_dictionary = ConfigSectionMap(config,section)
1835
+ >>> channel_dictionary = config_section_to_dict(config,section)
1822
1836
  >>> print(channel_dictionary)
1823
1837
  # {'brightfield_channel': '0',
1824
1838
  # 'live_nuclei_channel': 'nan',
@@ -1842,7 +1856,7 @@ def ConfigSectionMap(path,section):
1842
1856
  try:
1843
1857
  dict1[option] = Config.get(section, option)
1844
1858
  if dict1[option] == -1:
1845
- DebugPrint("skip: %s" % option)
1859
+ print("skip: %s" % option)
1846
1860
  except:
1847
1861
  print("exception on %s!" % option)
1848
1862
  dict1[option] = None
@@ -1896,10 +1910,10 @@ def _extract_channel_indices_from_config(config, channels_to_extract):
1896
1910
  channels = []
1897
1911
  for c in channels_to_extract:
1898
1912
  try:
1899
- c1 = int(ConfigSectionMap(config,"Channels")[c])
1913
+ c1 = int(config_section_to_dict(config, "Channels")[c])
1900
1914
  channels.append(c1)
1901
1915
  except Exception as e:
1902
- print(f"Warning... The channel {c} required by the model is not available in your data...")
1916
+ print(f"Warning: The channel {c} required by the model is not available in your data...")
1903
1917
  channels.append(None)
1904
1918
  if np.all([c is None for c in channels]):
1905
1919
  channels = None
@@ -1922,10 +1936,10 @@ def _extract_nbr_channels_from_config(config, return_names=False):
1922
1936
  nbr_channels = 0
1923
1937
  channels = []
1924
1938
  try:
1925
- fields = ConfigSectionMap(config,"Channels")
1939
+ fields = config_section_to_dict(config, "Channels")
1926
1940
  for c in fields:
1927
1941
  try:
1928
- channel = int(ConfigSectionMap(config, "Channels")[c])
1942
+ channel = int(config_section_to_dict(config, "Channels")[c])
1929
1943
  nbr_channels += 1
1930
1944
  channels.append(c)
1931
1945
  except:
@@ -1939,49 +1953,49 @@ def _extract_nbr_channels_from_config(config, return_names=False):
1939
1953
  nbr_channels = 0
1940
1954
  channels = []
1941
1955
  try:
1942
- brightfield_channel = int(ConfigSectionMap(config,"MovieSettings")["brightfield_channel"])
1956
+ brightfield_channel = int(config_section_to_dict(config, "MovieSettings")["brightfield_channel"])
1943
1957
  nbr_channels += 1
1944
1958
  channels.append('brightfield_channel')
1945
1959
  except:
1946
1960
  brightfield_channel = None
1947
1961
 
1948
1962
  try:
1949
- live_nuclei_channel = int(ConfigSectionMap(config,"MovieSettings")["live_nuclei_channel"])
1963
+ live_nuclei_channel = int(config_section_to_dict(config, "MovieSettings")["live_nuclei_channel"])
1950
1964
  nbr_channels += 1
1951
1965
  channels.append('live_nuclei_channel')
1952
1966
  except:
1953
1967
  live_nuclei_channel = None
1954
1968
 
1955
1969
  try:
1956
- dead_nuclei_channel = int(ConfigSectionMap(config,"MovieSettings")["dead_nuclei_channel"])
1970
+ dead_nuclei_channel = int(config_section_to_dict(config, "MovieSettings")["dead_nuclei_channel"])
1957
1971
  nbr_channels +=1
1958
1972
  channels.append('dead_nuclei_channel')
1959
1973
  except:
1960
1974
  dead_nuclei_channel = None
1961
1975
 
1962
1976
  try:
1963
- effector_fluo_channel = int(ConfigSectionMap(config,"MovieSettings")["effector_fluo_channel"])
1977
+ effector_fluo_channel = int(config_section_to_dict(config, "MovieSettings")["effector_fluo_channel"])
1964
1978
  nbr_channels +=1
1965
1979
  channels.append('effector_fluo_channel')
1966
1980
  except:
1967
1981
  effector_fluo_channel = None
1968
1982
 
1969
1983
  try:
1970
- adhesion_channel = int(ConfigSectionMap(config,"MovieSettings")["adhesion_channel"])
1984
+ adhesion_channel = int(config_section_to_dict(config, "MovieSettings")["adhesion_channel"])
1971
1985
  nbr_channels += 1
1972
1986
  channels.append('adhesion_channel')
1973
1987
  except:
1974
1988
  adhesion_channel = None
1975
1989
 
1976
1990
  try:
1977
- fluo_channel_1 = int(ConfigSectionMap(config,"MovieSettings")["fluo_channel_1"])
1991
+ fluo_channel_1 = int(config_section_to_dict(config, "MovieSettings")["fluo_channel_1"])
1978
1992
  nbr_channels += 1
1979
1993
  channels.append('fluo_channel_1')
1980
1994
  except:
1981
1995
  fluo_channel_1 = None
1982
1996
 
1983
1997
  try:
1984
- fluo_channel_2 = int(ConfigSectionMap(config,"MovieSettings")["fluo_channel_2"])
1998
+ fluo_channel_2 = int(config_section_to_dict(config, "MovieSettings")["fluo_channel_2"])
1985
1999
  nbr_channels += 1
1986
2000
  channels.append('fluo_channel_2')
1987
2001
  except:
@@ -2087,10 +2101,10 @@ def _extract_labels_from_config(config,number_of_wells):
2087
2101
 
2088
2102
 
2089
2103
  try:
2090
- concentrations = ConfigSectionMap(config,"Labels")["concentrations"].split(",")
2091
- cell_types = ConfigSectionMap(config,"Labels")["cell_types"].split(",")
2092
- antibodies = ConfigSectionMap(config,"Labels")["antibodies"].split(",")
2093
- pharmaceutical_agents = ConfigSectionMap(config,"Labels")["pharmaceutical_agents"].split(",")
2104
+ concentrations = config_section_to_dict(config, "Labels")["concentrations"].split(",")
2105
+ cell_types = config_section_to_dict(config, "Labels")["cell_types"].split(",")
2106
+ antibodies = config_section_to_dict(config, "Labels")["antibodies"].split(",")
2107
+ pharmaceutical_agents = config_section_to_dict(config, "Labels")["pharmaceutical_agents"].split(",")
2094
2108
  index = np.arange(len(concentrations)).astype(int) + 1
2095
2109
  if not np.all(pharmaceutical_agents=="None"):
2096
2110
  labels = [f"W{idx}: [CT] "+a+"; [Ab] "+b+" @ "+c+" pM "+d for idx,a,b,c,d in zip(index,cell_types,antibodies,concentrations,pharmaceutical_agents)]
@@ -2136,10 +2150,10 @@ def _extract_channels_from_config(config):
2136
2150
  channel_names = []
2137
2151
  channel_indices = []
2138
2152
  try:
2139
- fields = ConfigSectionMap(config,"Channels")
2153
+ fields = config_section_to_dict(config, "Channels")
2140
2154
  for c in fields:
2141
2155
  try:
2142
- idx = int(ConfigSectionMap(config, "Channels")[c])
2156
+ idx = int(config_section_to_dict(config, "Channels")[c])
2143
2157
  channel_names.append(c)
2144
2158
  channel_indices.append(idx)
2145
2159
  except:
@@ -2188,7 +2202,7 @@ def extract_experiment_channels(experiment):
2188
2202
  return _extract_channels_from_config(config)
2189
2203
 
2190
2204
 
2191
- def get_software_location():
2205
+ def get_software_location() -> str:
2192
2206
 
2193
2207
  """
2194
2208
  Get the installation folder of celldetective.
@@ -2241,7 +2255,7 @@ def remove_trajectory_measurements(trajectories, column_labels={'track': "TRACK_
2241
2255
  tracks = trajectories.copy()
2242
2256
 
2243
2257
  columns_to_keep = [column_labels['track'], column_labels['time'], column_labels['x'], column_labels['y'],column_labels['x']+'_um', column_labels['y']+'_um', 'class_id',
2244
- 't', 'state', 'generation', 'root', 'parent', 'ID', 't0', 'class', 'status', 'class_color', 'status_color', 'class_firstdetection', 't_firstdetection', 'velocity']
2258
+ 't', 'state', 'generation', 'root', 'parent', 'ID', 't0', 'class', 'status', 'class_color', 'status_color', 'class_firstdetection', 't_firstdetection', 'status_firstdetection','velocity']
2245
2259
  cols = list(tracks.columns)
2246
2260
  for c in columns_to_keep:
2247
2261
  if c not in cols:
@@ -2833,8 +2847,7 @@ def download_url_to_file(url, dst, progress=True):
2833
2847
  shutil.move(f.name, dst)
2834
2848
  finally:
2835
2849
  f.close()
2836
- if os.path.exists(f.name):
2837
- os.remove(f.name)
2850
+ remove_file_if_exists(f.name)
2838
2851
 
2839
2852
  def get_zenodo_files(cat=None):
2840
2853
 
@@ -2864,11 +2877,13 @@ def get_zenodo_files(cat=None):
2864
2877
  categories.append(category)
2865
2878
 
2866
2879
  if cat is not None:
2867
- assert cat in [os.sep.join(['models','segmentation_generic']), os.sep.join(['models','segmentation_targets']), os.sep.join(['models','segmentation_effectors']), \
2868
- 'demos', os.sep.join(['datasets','signal_annotations']), os.sep.join(['datasets','segmentation_annotations']), os.sep.join(['models','signal_detection'])]
2869
- categories = np.array(categories)
2870
- all_files_short = np.array(all_files_short)
2871
- return list(all_files_short[np.where(categories==cat)[0]])
2880
+ if cat in [os.sep.join(['models','segmentation_generic']), os.sep.join(['models','segmentation_targets']), os.sep.join(['models','segmentation_effectors']), \
2881
+ 'demos', os.sep.join(['datasets','signal_annotations']), os.sep.join(['datasets','segmentation_annotations']), os.sep.join(['models','signal_detection'])]:
2882
+ categories = np.array(categories)
2883
+ all_files_short = np.array(all_files_short)
2884
+ return list(all_files_short[np.where(categories==cat)[0]])
2885
+ else:
2886
+ return []
2872
2887
  else:
2873
2888
  return all_files_short,categories
2874
2889
 
@@ -2964,7 +2979,7 @@ def collapse_trajectories_by_status(df, status=None, projection='mean', populati
2964
2979
 
2965
2980
  return group_table
2966
2981
 
2967
- def step_function(t, t_shift, dt):
2982
+ def step_function(t: Union[np.ndarray,List], t_shift: float, dt: float) -> np.ndarray:
2968
2983
 
2969
2984
  """
2970
2985
  Computes a step function using the logistic sigmoid function.
@@ -3006,7 +3021,7 @@ def step_function(t, t_shift, dt):
3006
3021
  return 1/(1+np.exp(-(t-t_shift)/dt))
3007
3022
 
3008
3023
 
3009
- def test_2samp_generic(data, feature=None, groupby_cols=None, method="ks_2samp", *args, **kwargs):
3024
+ def test_2samp_generic(data: pd.DataFrame, feature: Optional[str] = None, groupby_cols: Optional[Union[str,List[str]]] = None, method="ks_2samp", *args, **kwargs) -> pd.DataFrame:
3010
3025
 
3011
3026
  """
3012
3027
  Performs pairwise statistical tests between groups of data, comparing a specified feature using a chosen method.
@@ -3076,4 +3091,18 @@ def test_2samp_generic(data, feature=None, groupby_cols=None, method="ks_2samp",
3076
3091
  pivot.set_index("cdt1",drop=True, inplace=True)
3077
3092
  pivot.index.name = None
3078
3093
 
3079
- return pivot
3094
+ return pivot
3095
+
3096
+ def pretty_table(dct: dict):
3097
+ table = PrettyTable()
3098
+ for c in dct.keys():
3099
+ table.add_column(str(c), [])
3100
+ table.add_row([dct.get(c, "") for c in dct.keys()])
3101
+ print(table)
3102
+
3103
+ def remove_file_if_exists(file: Union[str,Path]):
3104
+ if os.path.exists(file):
3105
+ try:
3106
+ os.remove(file)
3107
+ except Exception as e:
3108
+ print(e)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: celldetective
3
- Version: 1.3.9.post5
3
+ Version: 1.4.1
4
4
  Summary: description
5
5
  Home-page: http://github.com/remyeltorro/celldetective
6
6
  Author: Rémy Torro
@@ -44,12 +44,17 @@ Requires-Dist: h5py
44
44
  Requires-Dist: cliffs_delta
45
45
  Requires-Dist: requests
46
46
  Requires-Dist: trackpy
47
+ Requires-Dist: prettyprint
48
+ Requires-Dist: pandas
49
+ Requires-Dist: matplotlib
50
+ Requires-Dist: prettytable
47
51
  Dynamic: author
48
52
  Dynamic: author-email
49
53
  Dynamic: description
50
54
  Dynamic: description-content-type
51
55
  Dynamic: home-page
52
56
  Dynamic: license
57
+ Dynamic: license-file
53
58
  Dynamic: requires-dist
54
59
  Dynamic: summary
55
60
 
@@ -183,26 +188,29 @@ For more information about how to get started, please check the [documentation](
183
188
  # How to cite?
184
189
 
185
190
  If you use this software in your research, please cite the
186
- [Celldetective](https://www.biorxiv.org/content/10.1101/2024.03.15.585250v3)
187
- paper (currently preprint):
191
+ [Celldetective](https://elifesciences.org/reviewed-preprints/105302)
192
+ paper (currently a reviewed preprint at eLife):
188
193
 
189
194
  ``` raw
190
- @article {Torro2024.03.15.585250,
191
- author = {Torro, R{\'e}my and D{\'\i}az-Bello, Beatriz and Arawi, Dalia El and Dervanova, Ksenija and Ammer, Lorna and Dupuy, Florian and Chames, Patrick and Sengupta, Kheya and Limozin, Laurent},
192
- title = {Celldetective: an AI-enhanced image analysis tool for unraveling dynamic cell interactions},
193
- elocation-id = {2024.03.15.585250},
194
- year = {2024},
195
- doi = {10.1101/2024.03.15.585250},
196
- publisher = {Cold Spring Harbor Laboratory},
197
- abstract = {A current challenge in bioimaging for immunology and immunotherapy research lies in analyzing multimodal and multidimensional data that capture dynamic interactions between diverse cell populations. Here, we introduce Celldetective, an open-source Python-based software designed for high-performance, end-to-end analysis of image-based in vitro immune and immunotherapy assays. Purpose-built for multicondition, 2D multichannel time-lapse microscopy of mixed cell populations, Celldetective is optimized for the needs of immunology assays. The software seamlessly integrates AI-based segmentation, Bayesian tracking, and automated single-cell event detection, all within an intuitive graphical interface that supports interactive visualization, annotation, and training capabilities. We demonstrate its utility with original data on immune effector cell interactions with an activating surface, mediated by bispecific antibodies, and further showcase its potential for analyzing extensive sets of pairwise interactions in antibody-dependent cell cytotoxicity events.Competing Interest StatementThe authors have declared no competing interest.},
198
- URL = {https://www.biorxiv.org/content/early/2024/11/13/2024.03.15.585250},
199
- eprint = {https://www.biorxiv.org/content/early/2024/11/13/2024.03.15.585250.full.pdf},
200
- journal = {bioRxiv}
195
+ @article{torroCelldetectiveAIenhancedImage2025,
196
+ title = {Celldetective: An {{AI-enhanced}} Image Analysis Tool for Unraveling Dynamic Cell Interactions},
197
+ shorttitle = {Celldetective},
198
+ author = {Torro, Rémy and Díaz-Bello, Beatriz and Arawi, Dalia El and Dervanova, Ksenija and Ammer, Lorna and Dupuy, Florian and Chames, Patrick and Sengupta, Kheya and Limozin, Laurent},
199
+ date = {2025-03-10},
200
+ journaltitle = {eLife},
201
+ volume = {14},
202
+ publisher = {eLife Sciences Publications Limited},
203
+ doi = {10.7554/eLife.105302.1},
204
+ url = {https://elifesciences.org/reviewed-preprints/105302},
205
+ urldate = {2025-03-20},
206
+ abstract = {A current challenge in bioimaging for immunology and immunotherapy research lies in analyzing multimodal and multidimensional data that capture dynamic interactions between diverse cell populations. Here, we introduce Celldetective, an open-source Python-based software designed for high-performance, end-to-end analysis of image-based in vitro immune and immunotherapy assays. Purpose-built for multicondition, 2D multichannel time-lapse microscopy of mixed cell populations, Celldetective is optimized for the needs of immunology assays. The software seamlessly integrates AI-based segmentation, Bayesian tracking, and automated single-cell event detection, all within an intuitive graphical interface that supports interactive visualization, annotation, and training capabilities. We demonstrate its utility with original data on immune effector cell interactions with an activating surface, mediated by bispecific antibodies, and further showcase its potential for analyzing extensive sets of pairwise interactions in antibody-dependent cell cytotoxicity events.},
207
+ langid = {english},
208
+ file = {/home/torro/Zotero/storage/VFYBBMQF/Torro et al. - 2025 - Celldetective an AI-enhanced image analysis tool .pdf;/home/torro/Zotero/storage/UGMCKKST/105302.html}
201
209
  }
202
210
  ```
203
211
 
204
212
  Make sure you to cite the papers of any segmentation model (StarDist,
205
- Cellpose) or tracker (bTrack) you used through Celldetective.
213
+ Cellpose) or tracker (bTrack, TrackPy) you used through Celldetective.
206
214
 
207
215
  # Bibliography
208
216