celldetective 1.3.4.post1__py3-none-any.whl → 1.3.6.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. celldetective/_version.py +1 -1
  2. celldetective/events.py +10 -5
  3. celldetective/filters.py +11 -0
  4. celldetective/gui/btrack_options.py +151 -1
  5. celldetective/gui/classifier_widget.py +44 -15
  6. celldetective/gui/configure_new_exp.py +13 -0
  7. celldetective/gui/control_panel.py +4 -2
  8. celldetective/gui/generic_signal_plot.py +2 -6
  9. celldetective/gui/gui_utils.py +170 -12
  10. celldetective/gui/measurement_options.py +85 -54
  11. celldetective/gui/neighborhood_options.py +1 -1
  12. celldetective/gui/plot_signals_ui.py +3 -4
  13. celldetective/gui/process_block.py +8 -6
  14. celldetective/gui/signal_annotator.py +10 -3
  15. celldetective/gui/signal_annotator2.py +146 -193
  16. celldetective/gui/survival_ui.py +121 -34
  17. celldetective/gui/tableUI.py +26 -12
  18. celldetective/gui/thresholds_gui.py +9 -52
  19. celldetective/gui/viewers.py +58 -21
  20. celldetective/io.py +1087 -161
  21. celldetective/measure.py +175 -102
  22. celldetective/preprocessing.py +2 -2
  23. celldetective/relative_measurements.py +6 -9
  24. celldetective/scripts/measure_cells.py +13 -3
  25. celldetective/scripts/segment_cells.py +0 -1
  26. celldetective/scripts/track_cells.py +25 -1
  27. celldetective/signals.py +9 -7
  28. celldetective/tracking.py +130 -81
  29. celldetective/utils.py +28 -7
  30. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/METADATA +3 -2
  31. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/RECORD +35 -35
  32. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/LICENSE +0 -0
  33. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/WHEEL +0 -0
  34. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/entry_points.txt +0 -0
  35. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/top_level.txt +0 -0
celldetective/measure.py CHANGED
@@ -24,6 +24,9 @@ import celldetective.extra_properties as extra_properties
24
24
  from celldetective.extra_properties import *
25
25
  from inspect import getmembers, isfunction
26
26
  from skimage.morphology import disk
27
+ from scipy.signal import find_peaks, peak_widths
28
+
29
+ from celldetective.segmentation import filter_image
27
30
 
28
31
  abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'celldetective'])
29
32
 
@@ -193,6 +196,12 @@ def measure(stack=None, labels=None, trajectories=None, channel_names=None,
193
196
  elif do_features*(trajectories is None):
194
197
  measurements_at_t = positions_at_t
195
198
 
199
+ try:
200
+ measurements_at_t['radial_distance'] = np.sqrt((measurements_at_t[column_labels['x']] - img.shape[0] / 2) ** 2 + (
201
+ measurements_at_t[column_labels['y']] - img.shape[1] / 2) ** 2)
202
+ except Exception as e:
203
+ print(f"{e=}")
204
+
196
205
  timestep_dataframes.append(measurements_at_t)
197
206
 
198
207
  measurements = pd.concat(timestep_dataframes)
@@ -300,19 +309,28 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
300
309
 
301
310
  """
302
311
 
312
+ if isinstance(features, list):
313
+ features = features.copy()
314
+
303
315
  if features is None:
304
316
  features = []
305
317
 
306
- # Add label to have identity of mask
307
- if 'label' not in features:
308
- features.append('label')
309
-
318
+ measure_mean_intensities = False
310
319
  if img is None:
311
320
  if verbose:
312
321
  print('No image was provided... Skip intensity measurements.')
313
322
  border_dist = None;
314
323
  haralick_options = None;
315
324
  features = drop_tonal_features(features)
325
+
326
+ if 'intensity_mean' in features:
327
+ measure_mean_intensities = True
328
+ features.remove('intensity_mean')
329
+
330
+ # Add label to have identity of mask
331
+ if 'label' not in features:
332
+ features.append('label')
333
+
316
334
  if img is not None:
317
335
  if img.ndim == 2:
318
336
  img = img[:, :, np.newaxis]
@@ -326,7 +344,9 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
326
344
  for index, channel in enumerate(channels):
327
345
  if channel == spot_detection['channel']:
328
346
  ind = index
329
- df_spots = blob_detection(img, label, diameter=spot_detection['diameter'],threshold=spot_detection['threshold'], channel_name=spot_detection['channel'], target_channel=ind)
347
+ if "image_preprocessing" not in spot_detection:
348
+ spot_detection.update({'image_preprocessing': None})
349
+ df_spots = blob_detection(img, label, diameter=spot_detection['diameter'],threshold=spot_detection['threshold'], channel_name=spot_detection['channel'], target_channel=ind, image_preprocessing=spot_detection['image_preprocessing'])
330
350
 
331
351
  if normalisation_list:
332
352
  for norm in normalisation_list:
@@ -351,10 +371,16 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
351
371
  if f in extra_props:
352
372
  feats.remove(f)
353
373
  extra_props_list.append(getattr(extra_properties, f))
374
+
375
+ # Add intensity nan mean if need to measure mean intensities
376
+ if measure_mean_intensities:
377
+ extra_props_list.append(getattr(extra_properties, 'intensity_nanmean'))
378
+
354
379
  if len(extra_props_list) == 0:
355
380
  extra_props_list = None
356
381
  else:
357
382
  extra_props_list = tuple(extra_props_list)
383
+
358
384
  props = regionprops_table(label, intensity_image=img, properties=feats, extra_properties=extra_props_list)
359
385
  df_props = pd.DataFrame(props)
360
386
  if spot_detection is not None:
@@ -898,12 +924,15 @@ def normalise_by_cell(image, labels, distance=5, model='median', operation='subt
898
924
  return normalised_frame
899
925
 
900
926
 
901
- def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
927
+ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log", image_preprocessing=None):
902
928
 
903
929
  if np.percentile(image.flatten(),99.9)==0.0:
904
930
  return None
905
931
 
906
- dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
932
+ if isinstance(image_preprocessing, (list, np.ndarray)):
933
+ image = filter_image(image.copy(),filters=image_preprocessing) # apply prefiltering to images before spot detection
934
+
935
+ dilated_image = ndimage.grey_dilation(label, footprint=disk(int(1.2*diameter))) # dilation larger than spot diameter to be safe
907
936
 
908
937
  masked_image = image.copy()
909
938
  masked_image[np.where((dilated_image == 0)|(image!=image))] = 0
@@ -912,7 +941,8 @@ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
912
941
  if method=="dog":
913
942
  blobs = blob_dog(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
914
943
  elif method=="log":
915
- blobs = blob_log(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
944
+ blobs = blob_log(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
945
+
916
946
  # Exclude spots outside of cell masks
917
947
  mask = np.array([label[int(y), int(x)] != 0 for y, x, _ in blobs])
918
948
  if np.any(mask):
@@ -923,14 +953,15 @@ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
923
953
  return blobs_filtered
924
954
 
925
955
 
926
- def blob_detection(image, label, diameter, threshold=0., channel_name=None, target_channel=0, method="log"):
956
+ def blob_detection(image, label, diameter, threshold=0., channel_name=None, target_channel=0, method="log", image_preprocessing=None):
927
957
 
958
+
928
959
  image = image[:, :, target_channel].copy()
929
960
  if np.percentile(image.flatten(),99.9)==0.0:
930
961
  return None
931
962
 
932
963
  detections = []
933
- blobs_filtered = extract_blobs_in_image(image, label, diameter, threshold=threshold)
964
+ blobs_filtered = extract_blobs_in_image(image, label, diameter, method=method, threshold=threshold, image_preprocessing=image_preprocessing)
934
965
 
935
966
  for lbl in np.unique(label):
936
967
  if lbl>0:
@@ -955,69 +986,6 @@ def blob_detection(image, label, diameter, threshold=0., channel_name=None, targ
955
986
 
956
987
  return detections
957
988
 
958
-
959
- # def blob_detectionv0(image, label, threshold, diameter):
960
- # """
961
- # Perform blob detection on an image based on labeled regions.
962
-
963
- # Parameters:
964
- # - image (numpy.ndarray): The input image data.
965
- # - label (numpy.ndarray): An array specifying labeled regions in the image.
966
- # - threshold (float): The threshold value for blob detection.
967
- # - diameter (float): The expected diameter of blobs.
968
-
969
- # Returns:
970
- # - dict: A dictionary containing information about detected blobs.
971
-
972
- # This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
973
- # and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
974
- # based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
975
- # detected blobs and their mean intensity for each labeled region.
976
-
977
- # Example:
978
- # >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
979
- # >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
980
- # >>> threshold = 0.1
981
- # >>> diameter = 5.0
982
- # >>> result = blob_detection(image, label, threshold, diameter)
983
- # >>> print(result)
984
- # {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
985
-
986
- # Note:
987
- # - Blobs are detected using the Difference of Gaussians (DoG) method.
988
- # - Detected blobs are filtered based on the specified threshold and expected diameter.
989
- # - The returned dictionary contains information about the number of detected blobs and their mean intensity
990
- # for each labeled region.
991
- # """
992
- # blob_labels = {}
993
- # dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
994
- # for mask_index in np.unique(label):
995
- # if mask_index == 0:
996
- # continue
997
- # removed_background = image.copy()
998
- # one_mask = label.copy()
999
- # one_mask[np.where(label != mask_index)] = 0
1000
- # dilated_copy = dilated_image.copy()
1001
- # dilated_copy[np.where(dilated_image != mask_index)] = 0
1002
- # removed_background[np.where(dilated_copy == 0)] = 0
1003
- # min_sigma = (1 / (1 + math.sqrt(2))) * diameter
1004
- # max_sigma = math.sqrt(2) * min_sigma
1005
- # blobs = blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
1006
- # max_sigma=max_sigma)
1007
-
1008
- # mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
1009
- # if not np.any(mask):
1010
- # continue
1011
- # blobs_filtered = blobs[mask]
1012
- # binary_blobs = np.zeros_like(label)
1013
- # for blob in blobs_filtered:
1014
- # y, x, r = blob
1015
- # rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
1016
- # binary_blobs[rr, cc] = 1
1017
- # spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
1018
- # blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
1019
- # return blob_labels
1020
-
1021
989
  ### Classification ####
1022
990
 
1023
991
  def estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=0.5):
@@ -1105,7 +1073,7 @@ def estimate_time(df, class_attr, model='step_function', class_of_interest=[2],
1105
1073
  return df
1106
1074
 
1107
1075
 
1108
- def interpret_track_classification(df, class_attr, irreversible_event=False, unique_state=False,r2_threshold=0.5, percentile_recovery=50):
1076
+ def interpret_track_classification(df, class_attr, irreversible_event=False, unique_state=False, transient_event=False, r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1109
1077
 
1110
1078
  """
1111
1079
  Interpret and classify tracked cells based on their status signals.
@@ -1164,15 +1132,87 @@ def interpret_track_classification(df, class_attr, irreversible_event=False, uni
1164
1132
 
1165
1133
  if irreversible_event:
1166
1134
 
1167
- df = classify_irreversible_events(df, class_attr, r2_threshold=r2_threshold, percentile_recovery=percentile_recovery)
1135
+ df = classify_irreversible_events(df, class_attr, r2_threshold=r2_threshold, percentile_recovery=percentile_recovery, pre_event=pre_event)
1168
1136
 
1169
1137
  elif unique_state:
1170
1138
 
1171
- df = classify_unique_states(df, class_attr, percentile=50)
1139
+ df = classify_unique_states(df, class_attr, percentile=50, pre_event=pre_event)
1140
+
1141
+ elif transient_event:
1142
+
1143
+ df = classify_transient_events(df, class_attr, pre_event=pre_event)
1144
+
1145
+ return df
1146
+
1147
+
1148
+ def classify_transient_events(data, class_attr, pre_event=None):
1149
+
1150
+ df = data.copy()
1151
+ cols = list(df.columns)
1152
+
1153
+ # Control input
1154
+ assert 'TRACK_ID' in cols,'Please provide tracked data...'
1155
+ if 'position' in cols:
1156
+ sort_cols = ['position', 'TRACK_ID']
1157
+ df = df.sort_values(by=sort_cols+['FRAME'])
1158
+ else:
1159
+ sort_cols = ['TRACK_ID']
1160
+ df = df.sort_values(by=sort_cols+['FRAME'])
1161
+ if pre_event is not None:
1162
+ assert 't_'+pre_event in cols,"Pre-event time does not seem to be a valid column in the DataFrame..."
1163
+ assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1164
+
1165
+ stat_col = class_attr.replace('class','status')
1166
+
1167
+ for tid,track in df.groupby(sort_cols):
1168
+
1169
+ indices = track[class_attr].index
1170
+
1171
+ if pre_event is not None:
1172
+
1173
+ if track['class_'+pre_event].values[0]==1:
1174
+ df.loc[indices, class_attr] = np.nan
1175
+ df.loc[indices, stat_col] = np.nan
1176
+ continue
1177
+ else:
1178
+ # pre-event took place (if left-censored took place at time -1)
1179
+ t_pre_event = track['t_'+pre_event].values[0]
1180
+ indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1181
+ df.loc[indices_pre, stat_col] = np.nan # set to NaN all statuses before pre-event
1182
+ track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1183
+
1184
+ status = track[stat_col].to_numpy()
1185
+ timeline = track['FRAME'].to_numpy()
1186
+ timeline_safe = timeline[status==status]
1187
+ status_safe = list(status[status==status])
1188
+
1189
+ peaks, _ = find_peaks(status_safe)
1190
+ widths, _, left, right = peak_widths(status_safe, peaks, rel_height=1)
1191
+ minimum_weight = 0
1192
+
1193
+ if len(peaks)>0:
1194
+ idx = np.argmax(widths)
1195
+ peak = peaks[idx]; width = widths[idx];
1196
+ if width >= minimum_weight:
1197
+ left = left[idx]; right = right[idx];
1198
+ left = timeline_safe[int(left)]; right = timeline_safe[int(right)];
1199
+
1200
+ df.loc[indices, class_attr] = 0
1201
+ df.loc[indices, class_attr.replace('class_','t_')] = left + (right - left)/2.0
1202
+ else:
1203
+ df.loc[indices, class_attr] = 1
1204
+ df.loc[indices, class_attr.replace('class_','t_')] = -1
1205
+ else:
1206
+ df.loc[indices, class_attr] = 1
1207
+ df.loc[indices, class_attr.replace('class_','t_')] = -1
1208
+
1209
+
1210
+ print("Classes: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1172
1211
 
1173
1212
  return df
1174
1213
 
1175
- def classify_irreversible_events(df, class_attr, r2_threshold=0.5, percentile_recovery=50):
1214
+
1215
+ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1176
1216
 
1177
1217
  """
1178
1218
  Classify irreversible events in a tracked dataset based on the status of cells and transitions.
@@ -1210,45 +1250,62 @@ def classify_irreversible_events(df, class_attr, r2_threshold=0.5, percentile_re
1210
1250
  >>> df = classify_irreversible_events(df, 'class', r2_threshold=0.7)
1211
1251
  """
1212
1252
 
1253
+ df = data.copy()
1213
1254
  cols = list(df.columns)
1255
+
1256
+ # Control input
1214
1257
  assert 'TRACK_ID' in cols,'Please provide tracked data...'
1215
1258
  if 'position' in cols:
1216
1259
  sort_cols = ['position', 'TRACK_ID']
1217
1260
  else:
1218
1261
  sort_cols = ['TRACK_ID']
1262
+ if pre_event is not None:
1263
+ assert 't_'+pre_event in cols,"Pre-event time does not seem to be a valid column in the DataFrame..."
1264
+ assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1219
1265
 
1220
1266
  stat_col = class_attr.replace('class','status')
1221
1267
 
1222
1268
  for tid,track in df.groupby(sort_cols):
1223
-
1224
- # Set status to 0.0 before first detection
1225
- t_firstdetection = track['t_firstdetection'].values[0]
1226
- indices_pre_detection = track.loc[track['FRAME']<=t_firstdetection,class_attr].index
1227
- track.loc[indices_pre_detection,stat_col] = 0.0
1228
- df.loc[indices_pre_detection,stat_col] = 0.0
1229
-
1230
- track_valid = track.dropna(subset=stat_col)
1231
- indices_valid = track_valid[class_attr].index
1232
1269
 
1233
1270
  indices = track[class_attr].index
1271
+
1272
+ if pre_event is not None:
1273
+ if track['class_'+pre_event].values[0]==1:
1274
+ df.loc[indices, class_attr] = np.nan
1275
+ df.loc[indices, stat_col] = np.nan
1276
+ continue
1277
+ else:
1278
+ # pre-event took place (if left-censored took place at time -1)
1279
+ t_pre_event = track['t_'+pre_event].values[0]
1280
+ indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1281
+ df.loc[indices_pre, stat_col] = np.nan # set to NaN all statuses before pre-event
1282
+ track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1283
+ else:
1284
+ # set state to 0 before first detection
1285
+ t_firstdetection = track['t_firstdetection'].values[0]
1286
+ indices_pre_detection = track.loc[track['FRAME']<=t_firstdetection,class_attr].index
1287
+ track.loc[indices_pre_detection,stat_col] = 0.0
1288
+ df.loc[indices_pre_detection,stat_col] = 0.0
1289
+
1290
+ # The non-NaN part of track (post pre-event)
1291
+ track_valid = track.dropna(subset=stat_col, inplace=False)
1234
1292
  status_values = track_valid[stat_col].to_numpy()
1235
1293
 
1236
1294
  if np.all([s==0 for s in status_values]):
1237
- # all negative, no event
1295
+ # all negative to condition, event not observed
1238
1296
  df.loc[indices, class_attr] = 1
1239
-
1240
1297
  elif np.all([s==1 for s in status_values]):
1241
- # all positive, event already observed
1298
+ # all positive, event already observed (left-censored)
1242
1299
  df.loc[indices, class_attr] = 2
1243
- #df.loc[indices, class_attr.replace('class','status')] = 2
1244
1300
  else:
1245
- # ambiguity, possible transition
1301
+ # ambiguity, possible transition, use `unique_state` technique after
1246
1302
  df.loc[indices, class_attr] = 2
1247
-
1303
+
1248
1304
  print("Classes after initial pass: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1249
1305
 
1250
1306
  df.loc[df[class_attr]!=2, class_attr.replace('class', 't')] = -1
1251
- df = estimate_time(df, class_attr, model='step_function', class_of_interest=[2],r2_threshold=r2_threshold)
1307
+ # Try to fit time on class 2 cells (ambiguous)
1308
+ df = estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=r2_threshold)
1252
1309
  print("Classes after fit: ", df.loc[df['FRAME']==0,class_attr].value_counts())
1253
1310
 
1254
1311
  # Revisit class 2 cells to classify as neg/pos with percentile tolerance
@@ -1257,7 +1314,8 @@ def classify_irreversible_events(df, class_attr, r2_threshold=0.5, percentile_re
1257
1314
 
1258
1315
  return df
1259
1316
 
1260
- def classify_unique_states(df, class_attr, percentile=50):
1317
+
1318
+ def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1261
1319
 
1262
1320
  """
1263
1321
  Classify unique cell states based on percentile values of a status attribute in a tracked dataset.
@@ -1300,19 +1358,31 @@ def classify_unique_states(df, class_attr, percentile=50):
1300
1358
  else:
1301
1359
  sort_cols = ['TRACK_ID']
1302
1360
 
1303
- stat_col = class_attr.replace('class','status')
1304
-
1361
+ if pre_event is not None:
1362
+ assert 't_'+pre_event in cols,"Pre-event time does not seem to be a valid column in the DataFrame..."
1363
+ assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1305
1364
 
1306
- for tid,track in df.groupby(sort_cols):
1307
-
1308
-
1309
- track_valid = track.dropna(subset=stat_col)
1310
- indices_valid = track_valid[class_attr].index
1365
+ stat_col = class_attr.replace('class','status')
1366
+
1367
+ for tid, track in df.groupby(sort_cols):
1311
1368
 
1312
1369
  indices = track[class_attr].index
1313
- status_values = track_valid[stat_col].to_numpy()
1314
1370
 
1371
+ if pre_event is not None:
1372
+ if track['class_'+pre_event].values[0]==1:
1373
+ df.loc[indices, class_attr] = np.nan
1374
+ df.loc[indices, stat_col] = np.nan
1375
+ df.loc[indices, stat_col.replace('status_','t_')] = -1
1376
+ continue
1377
+ else:
1378
+ t_pre_event = track['t_'+pre_event].values[0]
1379
+ indices_pre = track.loc[track['FRAME']<=t_pre_event, class_attr].index
1380
+ df.loc[indices_pre, stat_col] = np.nan
1381
+ track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1315
1382
 
1383
+ # Post pre-event track
1384
+ track_valid = track.dropna(subset=stat_col, inplace=False)
1385
+ status_values = track_valid[stat_col].to_numpy()
1316
1386
  frames = track_valid['FRAME'].to_numpy()
1317
1387
  t_first = track['t_firstdetection'].to_numpy()[0]
1318
1388
  perc_status = np.nanpercentile(status_values[frames>=t_first], percentile)
@@ -1383,8 +1453,11 @@ def classify_cells_from_query(df, status_attr, query):
1383
1453
 
1384
1454
  df = df.copy()
1385
1455
  df.loc[:,status_attr] = 0
1456
+ df[status_attr] = df[status_attr].astype(float)
1386
1457
 
1387
1458
  cols = extract_cols_from_query(query)
1459
+ print(f"{cols=}")
1460
+
1388
1461
  cols_in_df = np.all([c in list(df.columns) for c in cols], axis=0)
1389
1462
  if query=='':
1390
1463
  print('The provided query is empty...')
@@ -894,7 +894,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
894
894
  else:
895
895
  newfile = '_'.join([prefix,file])
896
896
 
897
- with tiff.TiffWriter(os.sep.join([path,newfile]),imagej=True) as tif:
897
+ with tiff.TiffWriter(os.sep.join([path,newfile]), imagej=True, bigtiff=True) as tif:
898
898
 
899
899
  for i in tqdm(range(0,int(stack_length*nbr_channels),nbr_channels)):
900
900
 
@@ -1156,7 +1156,7 @@ def correct_channel_offset_single_stack(stack_path,
1156
1156
  else:
1157
1157
  newfile = '_'.join([prefix,file])
1158
1158
 
1159
- with tiff.TiffWriter(os.sep.join([path,newfile]),imagej=True) as tif:
1159
+ with tiff.TiffWriter(os.sep.join([path,newfile]),bigtiff=True,imagej=True) as tif:
1160
1160
 
1161
1161
  for i in tqdm(range(0,int(stack_length*nbr_channels),nbr_channels)):
1162
1162
 
@@ -128,10 +128,6 @@ def measure_pairs(pos, neighborhood_protocol):
128
128
 
129
129
 
130
130
 
131
-
132
-
133
-
134
-
135
131
  def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs={'window': 3, 'mode': 'bi'}):
136
132
  """
137
133
  pos: position to process
@@ -340,9 +336,10 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
340
336
  cum_sum = 0
341
337
  for t in range(len(full_timeline)):
342
338
 
343
- if t in timeline_reference: # meaning position exists on both sides
339
+ if t in timeline_reference and t in timeline_neighbor: # meaning position exists on both sides
344
340
 
345
341
  idx_reference = list(timeline_reference).index(t)
342
+ idx_neighbor = list(timeline_neighbor).index(t)
346
343
  inter = intersection_values.loc[(intersection_values['neigh_id']==nc)&(intersection_values["frame"]==t),"intersection"].values
347
344
  if len(inter)==0:
348
345
  inter = np.nan
@@ -350,12 +347,12 @@ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs
350
347
  inter = inter[0]
351
348
 
352
349
  neigh_inter_fraction = np.nan
353
- if inter==inter and neigh_area[t]==neigh_area[t]:
354
- neigh_inter_fraction = inter / neigh_area[t]
350
+ if inter==inter and neigh_area[idx_neighbor]==neigh_area[idx_neighbor]:
351
+ neigh_inter_fraction = inter / neigh_area[idx_neighbor]
355
352
 
356
353
  ref_inter_fraction = np.nan
357
- if inter==inter and ref_area[t]==ref_area[t]:
358
- ref_inter_fraction = inter / ref_area[t]
354
+ if inter==inter and ref_area[idx_reference]==ref_area[idx_reference]:
355
+ ref_inter_fraction = inter / ref_area[idx_reference]
359
356
 
360
357
  if nc in neighbor_ids_per_t[idx_reference]:
361
358
 
@@ -259,13 +259,13 @@ def measure_index(indices):
259
259
  'y': column_labels['y']}
260
260
  feature_table.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y'}, inplace=True)
261
261
 
262
- if do_iso_intensities:
262
+ if do_iso_intensities and not trajectories is None:
263
263
  iso_table = measure_isotropic_intensity(positions_at_t, img, channels=channel_names, intensity_measurement_radii=intensity_measurement_radii, column_labels=column_labels, operations=isotropic_operations, verbose=False)
264
264
 
265
- if do_iso_intensities and do_features:
265
+ if do_iso_intensities and do_features and not trajectories is None:
266
266
  measurements_at_t = iso_table.merge(feature_table, how='outer', on='class_id',suffixes=('_delme', ''))
267
267
  measurements_at_t = measurements_at_t[[c for c in measurements_at_t.columns if not c.endswith('_delme')]]
268
- elif do_iso_intensities * (not do_features):
268
+ elif do_iso_intensities * (not do_features) * (not trajectories is None):
269
269
  measurements_at_t = iso_table
270
270
  elif do_features:
271
271
  measurements_at_t = positions_at_t.merge(feature_table, how='outer', on='class_id',suffixes=('_delme', ''))
@@ -279,6 +279,12 @@ def measure_index(indices):
279
279
  measurements_at_t.loc[:,c.replace('_y','_POSITION_Y')] = measurements_at_t[c] + measurements_at_t['POSITION_Y']
280
280
  measurements_at_t = measurements_at_t.drop(columns = center_of_mass_x_cols+center_of_mass_y_cols)
281
281
 
282
+ try:
283
+ measurements_at_t['radial_distance'] = np.sqrt((measurements_at_t[column_labels['x']] - img.shape[0] / 2) ** 2 + (
284
+ measurements_at_t[column_labels['y']] - img.shape[1] / 2) ** 2)
285
+ except Exception as e:
286
+ print(f"{e=}")
287
+
282
288
  if measurements_at_t is not None:
283
289
  measurements_at_t[column_labels['time']] = t
284
290
  timestep_dataframes.append(measurements_at_t)
@@ -308,6 +314,10 @@ if len(timestep_dataframes)>0:
308
314
  df['ID'] = np.arange(len(df))
309
315
 
310
316
  df = df.reset_index(drop=True)
317
+
318
+ invalid_cols = [c for c in list(df.columns) if c.startswith('Unnamed')]
319
+ if len(invalid_cols)>0:
320
+ df = df.drop(invalid_cols, axis=1)
311
321
 
312
322
  df.to_csv(pos+os.sep.join(["output", "tables", table_name]), index=False)
313
323
  print(f'Measurements successfully written in table {pos+os.sep.join(["output", "tables", table_name])}')
@@ -47,7 +47,6 @@ else:
47
47
 
48
48
  if not use_gpu:
49
49
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
50
- n_threads = int(process_arguments['threads'])
51
50
 
52
51
  modelname = str(process_arguments['model'])
53
52
 
@@ -97,6 +97,18 @@ if os.path.exists(instr_path):
97
97
  post_processing_options = instructions['post_processing_options']
98
98
  else:
99
99
  post_processing_options = None
100
+
101
+ btrack_option = True
102
+ if 'btrack_option' in instructions:
103
+ btrack_option = instructions['btrack_option']
104
+ search_range = None
105
+ if 'search_range' in instructions:
106
+ search_range = instructions['search_range']
107
+ memory = None
108
+ if 'memory' in instructions:
109
+ memory = instructions['memory']
110
+
111
+
100
112
  else:
101
113
  print('Tracking instructions could not be located... Using a standard bTrack motion model instead...')
102
114
  btrack_config = interpret_tracking_configuration(None)
@@ -104,7 +116,9 @@ else:
104
116
  mask_channels = None
105
117
  haralick_options = None
106
118
  post_processing_options = None
107
-
119
+ btrack_option = True
120
+ memory = None
121
+ search_range = None
108
122
  if features is None:
109
123
  features = []
110
124
 
@@ -147,6 +161,13 @@ with open(pos+f'log_{mode}.json', 'a') as f:
147
161
  f.write(f'{datetime.datetime.now()} TRACK \n')
148
162
  f.write(log+"\n")
149
163
 
164
+
165
+ if not btrack_option:
166
+ features = []
167
+ channel_names = None
168
+ haralick_options = None
169
+
170
+
150
171
  def measure_index(indices):
151
172
  for t in tqdm(indices,desc="frame"):
152
173
 
@@ -203,6 +224,9 @@ trajectories, napari_data = track(None,
203
224
  track_kwargs={'step_size': 100},
204
225
  clean_trajectories_kwargs=post_processing_options,
205
226
  volume=(shape_x, shape_y),
227
+ btrack_option=btrack_option,
228
+ search_range=search_range,
229
+ memory=memory,
206
230
  )
207
231
 
208
232
  # out trajectory table, create POSITION_X_um, POSITION_Y_um, TIME_min (new ones)
celldetective/signals.py CHANGED
@@ -167,12 +167,13 @@ def analyze_signals(trajectories, model, interpolate_na=True,
167
167
  if selected_signals is None:
168
168
  selected_signals = []
169
169
  for s in required_signals:
170
- pattern_test = [s in a or s==a for a in available_signals]
171
- #print(f'Pattern test for signal {s}: ', pattern_test)
172
- assert np.any(pattern_test),f'No signal matches with the requirements of the model {required_signals}. Please pass the signals manually with the argument selected_signals or add measurements. Abort.'
173
- valid_columns = natsorted(np.array(available_signals)[np.array(pattern_test)])
174
- print(f"Selecting the first time series among: {valid_columns} for input requirement {s}...")
175
- selected_signals.append(valid_columns[0])
170
+ priority_cols = [a for a in available_signals if a==s]
171
+ second_priority_cols = [a for a in available_signals if a.startswith(s) and a!=s]
172
+ third_priority_cols = [a for a in available_signals if s in a and not a.startswith(s)]
173
+ candidates = priority_cols + second_priority_cols + third_priority_cols
174
+ assert len(candidates)>0,f'No signal matches with the requirements of the model {required_signals}. Please pass the signals manually with the argument selected_signals or add measurements. Abort.'
175
+ print(f"Selecting the first time series among: {candidates} for input requirement {s}...")
176
+ selected_signals.append(candidates[0])
176
177
  else:
177
178
  assert len(selected_signals)==len(required_signals),f'Mismatch between the number of required signals {required_signals} and the provided signals {selected_signals}... Abort.'
178
179
 
@@ -878,6 +879,7 @@ class SignalDetectionModel(object):
878
879
 
879
880
  assert self.model_class.layers[0].input_shape[0] == self.model_reg.layers[0].input_shape[0], f"mismatch between input shape of classification: {self.model_class.layers[0].input_shape[0]} and regression {self.model_reg.layers[0].input_shape[0]} models... Error."
880
881
 
882
+
881
883
  return True
882
884
 
883
885
  def create_models_from_scratch(self):
@@ -3143,4 +3145,4 @@ if __name__ == "__main__":
3143
3145
  model = ResNetModelCurrent(1, 2, depth=2, use_pooling=True, n_classes = 3, dropout_rate=0.1, dense_collection=512,
3144
3146
  header="classifier", model_signal_length = 128)
3145
3147
  print(model.summary())
3146
- #plot_model(model, to_file='test.png', show_shapes=True)
3148
+ #plot_model(model, to_file='test.png', show_shapes=True)