celldetective 1.3.5__py3-none-any.whl → 1.3.6.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
celldetective/measure.py CHANGED
@@ -24,6 +24,9 @@ import celldetective.extra_properties as extra_properties
24
24
  from celldetective.extra_properties import *
25
25
  from inspect import getmembers, isfunction
26
26
  from skimage.morphology import disk
27
+ from scipy.signal import find_peaks, peak_widths
28
+
29
+ from celldetective.segmentation import filter_image
27
30
 
28
31
  abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'celldetective'])
29
32
 
@@ -341,7 +344,9 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
341
344
  for index, channel in enumerate(channels):
342
345
  if channel == spot_detection['channel']:
343
346
  ind = index
344
- df_spots = blob_detection(img, label, diameter=spot_detection['diameter'],threshold=spot_detection['threshold'], channel_name=spot_detection['channel'], target_channel=ind)
347
+ if "image_preprocessing" not in spot_detection:
348
+ spot_detection.update({'image_preprocessing': None})
349
+ df_spots = blob_detection(img, label, diameter=spot_detection['diameter'],threshold=spot_detection['threshold'], channel_name=spot_detection['channel'], target_channel=ind, image_preprocessing=spot_detection['image_preprocessing'])
345
350
 
346
351
  if normalisation_list:
347
352
  for norm in normalisation_list:
@@ -919,12 +924,15 @@ def normalise_by_cell(image, labels, distance=5, model='median', operation='subt
919
924
  return normalised_frame
920
925
 
921
926
 
922
- def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
927
+ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log", image_preprocessing=None):
923
928
 
924
929
  if np.percentile(image.flatten(),99.9)==0.0:
925
930
  return None
926
931
 
927
- dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
932
+ if isinstance(image_preprocessing, (list, np.ndarray)):
933
+ image = filter_image(image.copy(),filters=image_preprocessing) # apply prefiltering to images before spot detection
934
+
935
+ dilated_image = ndimage.grey_dilation(label, footprint=disk(int(1.2*diameter))) # dilation larger than spot diameter to be safe
928
936
 
929
937
  masked_image = image.copy()
930
938
  masked_image[np.where((dilated_image == 0)|(image!=image))] = 0
@@ -933,7 +941,8 @@ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
933
941
  if method=="dog":
934
942
  blobs = blob_dog(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
935
943
  elif method=="log":
936
- blobs = blob_log(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
944
+ blobs = blob_log(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
945
+
937
946
  # Exclude spots outside of cell masks
938
947
  mask = np.array([label[int(y), int(x)] != 0 for y, x, _ in blobs])
939
948
  if np.any(mask):
@@ -944,14 +953,15 @@ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
944
953
  return blobs_filtered
945
954
 
946
955
 
947
- def blob_detection(image, label, diameter, threshold=0., channel_name=None, target_channel=0, method="log"):
956
+ def blob_detection(image, label, diameter, threshold=0., channel_name=None, target_channel=0, method="log", image_preprocessing=None):
948
957
 
958
+
949
959
  image = image[:, :, target_channel].copy()
950
960
  if np.percentile(image.flatten(),99.9)==0.0:
951
961
  return None
952
962
 
953
963
  detections = []
954
- blobs_filtered = extract_blobs_in_image(image, label, diameter, threshold=threshold)
964
+ blobs_filtered = extract_blobs_in_image(image, label, diameter, method=method, threshold=threshold, image_preprocessing=image_preprocessing)
955
965
 
956
966
  for lbl in np.unique(label):
957
967
  if lbl>0:
@@ -976,69 +986,6 @@ def blob_detection(image, label, diameter, threshold=0., channel_name=None, targ
976
986
 
977
987
  return detections
978
988
 
979
-
980
- # def blob_detectionv0(image, label, threshold, diameter):
981
- # """
982
- # Perform blob detection on an image based on labeled regions.
983
-
984
- # Parameters:
985
- # - image (numpy.ndarray): The input image data.
986
- # - label (numpy.ndarray): An array specifying labeled regions in the image.
987
- # - threshold (float): The threshold value for blob detection.
988
- # - diameter (float): The expected diameter of blobs.
989
-
990
- # Returns:
991
- # - dict: A dictionary containing information about detected blobs.
992
-
993
- # This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
994
- # and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
995
- # based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
996
- # detected blobs and their mean intensity for each labeled region.
997
-
998
- # Example:
999
- # >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1000
- # >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
1001
- # >>> threshold = 0.1
1002
- # >>> diameter = 5.0
1003
- # >>> result = blob_detection(image, label, threshold, diameter)
1004
- # >>> print(result)
1005
- # {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
1006
-
1007
- # Note:
1008
- # - Blobs are detected using the Difference of Gaussians (DoG) method.
1009
- # - Detected blobs are filtered based on the specified threshold and expected diameter.
1010
- # - The returned dictionary contains information about the number of detected blobs and their mean intensity
1011
- # for each labeled region.
1012
- # """
1013
- # blob_labels = {}
1014
- # dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
1015
- # for mask_index in np.unique(label):
1016
- # if mask_index == 0:
1017
- # continue
1018
- # removed_background = image.copy()
1019
- # one_mask = label.copy()
1020
- # one_mask[np.where(label != mask_index)] = 0
1021
- # dilated_copy = dilated_image.copy()
1022
- # dilated_copy[np.where(dilated_image != mask_index)] = 0
1023
- # removed_background[np.where(dilated_copy == 0)] = 0
1024
- # min_sigma = (1 / (1 + math.sqrt(2))) * diameter
1025
- # max_sigma = math.sqrt(2) * min_sigma
1026
- # blobs = blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
1027
- # max_sigma=max_sigma)
1028
-
1029
- # mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
1030
- # if not np.any(mask):
1031
- # continue
1032
- # blobs_filtered = blobs[mask]
1033
- # binary_blobs = np.zeros_like(label)
1034
- # for blob in blobs_filtered:
1035
- # y, x, r = blob
1036
- # rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
1037
- # binary_blobs[rr, cc] = 1
1038
- # spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
1039
- # blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
1040
- # return blob_labels
1041
-
1042
989
  ### Classification ####
1043
990
 
1044
991
  def estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=0.5):
@@ -1126,7 +1073,7 @@ def estimate_time(df, class_attr, model='step_function', class_of_interest=[2],
1126
1073
  return df
1127
1074
 
1128
1075
 
1129
- def interpret_track_classification(df, class_attr, irreversible_event=False, unique_state=False,r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1076
+ def interpret_track_classification(df, class_attr, irreversible_event=False, unique_state=False, transient_event=False, r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1130
1077
 
1131
1078
  """
1132
1079
  Interpret and classify tracked cells based on their status signals.
@@ -1191,8 +1138,80 @@ def interpret_track_classification(df, class_attr, irreversible_event=False, uni
1191
1138
 
1192
1139
  df = classify_unique_states(df, class_attr, percentile=50, pre_event=pre_event)
1193
1140
 
1141
+ elif transient_event:
1142
+
1143
+ df = classify_transient_events(df, class_attr, pre_event=pre_event)
1144
+
1145
+ return df
1146
+
1147
+
1148
+ def classify_transient_events(data, class_attr, pre_event=None):
1149
+
1150
+ df = data.copy()
1151
+ cols = list(df.columns)
1152
+
1153
+ # Control input
1154
+ assert 'TRACK_ID' in cols,'Please provide tracked data...'
1155
+ if 'position' in cols:
1156
+ sort_cols = ['position', 'TRACK_ID']
1157
+ df = df.sort_values(by=sort_cols+['FRAME'])
1158
+ else:
1159
+ sort_cols = ['TRACK_ID']
1160
+ df = df.sort_values(by=sort_cols+['FRAME'])
1161
+ if pre_event is not None:
1162
+ assert 't_'+pre_event in cols,"Pre-event time does not seem to be a valid column in the DataFrame..."
1163
+ assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1164
+
1165
+ stat_col = class_attr.replace('class','status')
1166
+
1167
+ for tid,track in df.groupby(sort_cols):
1168
+
1169
+ indices = track[class_attr].index
1170
+
1171
+ if pre_event is not None:
1172
+
1173
+ if track['class_'+pre_event].values[0]==1:
1174
+ df.loc[indices, class_attr] = np.nan
1175
+ df.loc[indices, stat_col] = np.nan
1176
+ continue
1177
+ else:
1178
+ # pre-event took place (if left-censored took place at time -1)
1179
+ t_pre_event = track['t_'+pre_event].values[0]
1180
+ indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1181
+ df.loc[indices_pre, stat_col] = np.nan # set to NaN all statuses before pre-event
1182
+ track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1183
+
1184
+ status = track[stat_col].to_numpy()
1185
+ timeline = track['FRAME'].to_numpy()
1186
+ timeline_safe = timeline[status==status]
1187
+ status_safe = list(status[status==status])
1188
+
1189
+ peaks, _ = find_peaks(status_safe)
1190
+ widths, _, left, right = peak_widths(status_safe, peaks, rel_height=1)
1191
+ minimum_weight = 0
1192
+
1193
+ if len(peaks)>0:
1194
+ idx = np.argmax(widths)
1195
+ peak = peaks[idx]; width = widths[idx];
1196
+ if width >= minimum_weight:
1197
+ left = left[idx]; right = right[idx];
1198
+ left = timeline_safe[int(left)]; right = timeline_safe[int(right)];
1199
+
1200
+ df.loc[indices, class_attr] = 0
1201
+ df.loc[indices, class_attr.replace('class_','t_')] = left + (right - left)/2.0
1202
+ else:
1203
+ df.loc[indices, class_attr] = 1
1204
+ df.loc[indices, class_attr.replace('class_','t_')] = -1
1205
+ else:
1206
+ df.loc[indices, class_attr] = 1
1207
+ df.loc[indices, class_attr.replace('class_','t_')] = -1
1208
+
1209
+
1210
+ print("Classes: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1211
+
1194
1212
  return df
1195
1213
 
1214
+
1196
1215
  def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1197
1216
 
1198
1217
  """
@@ -1246,62 +1265,41 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1246
1265
 
1247
1266
  stat_col = class_attr.replace('class','status')
1248
1267
 
1249
- if pre_event is not None:
1250
-
1251
- # Version with pre event; intuition: mask status value before pre-event takes place with NaN
1252
- for tid, track in df.groupby(sort_cols):
1253
-
1254
- indices = track[class_attr].index
1268
+ for tid,track in df.groupby(sort_cols):
1255
1269
 
1270
+ indices = track[class_attr].index
1271
+
1272
+ if pre_event is not None:
1256
1273
  if track['class_'+pre_event].values[0]==1:
1257
- # Pre-event never took place, all NaN
1258
1274
  df.loc[indices, class_attr] = np.nan
1259
1275
  df.loc[indices, stat_col] = np.nan
1276
+ continue
1260
1277
  else:
1261
1278
  # pre-event took place (if left-censored took place at time -1)
1262
1279
  t_pre_event = track['t_'+pre_event].values[0]
1263
1280
  indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1264
1281
  df.loc[indices_pre, stat_col] = np.nan # set to NaN all statuses before pre-event
1265
1282
  track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1266
-
1267
- # The non-NaN part of track (post pre-event)
1268
- track_valid = track.dropna(subset=stat_col, inplace=False)
1269
- status_values = track_valid[stat_col].to_numpy()
1270
-
1271
- if np.all([s==0 for s in status_values]):
1272
- # all negative to condition, event not observed
1273
- df.loc[indices, class_attr] = 1
1274
- elif np.all([s==1 for s in status_values]):
1275
- # all positive, event already observed (left-censored)
1276
- df.loc[indices, class_attr] = 2
1277
- else:
1278
- # ambiguity, possible transition, use `unique_state` technique after
1279
- df.loc[indices, class_attr] = 2
1280
- else:
1281
- for tid,track in df.groupby(sort_cols):
1282
-
1283
- # Set status to 0.0 before first detection
1283
+ else:
1284
+ # set state to 0 before first detection
1284
1285
  t_firstdetection = track['t_firstdetection'].values[0]
1285
1286
  indices_pre_detection = track.loc[track['FRAME']<=t_firstdetection,class_attr].index
1286
1287
  track.loc[indices_pre_detection,stat_col] = 0.0
1287
1288
  df.loc[indices_pre_detection,stat_col] = 0.0
1288
-
1289
- track_valid = track.dropna(subset=stat_col)
1290
-
1291
- indices = track[class_attr].index
1292
- status_values = track_valid[stat_col].to_numpy()
1293
-
1294
- if np.all([s==0 for s in status_values]):
1295
- # all negative, no event
1296
- df.loc[indices, class_attr] = 1
1297
-
1298
- elif np.all([s==1 for s in status_values]):
1299
- # all positive, event already observed
1300
- df.loc[indices, class_attr] = 2
1301
- #df.loc[indices, class_attr.replace('class','status')] = 2
1302
- else:
1303
- # ambiguity, possible transition
1304
- df.loc[indices, class_attr] = 2
1289
+
1290
+ # The non-NaN part of track (post pre-event)
1291
+ track_valid = track.dropna(subset=stat_col, inplace=False)
1292
+ status_values = track_valid[stat_col].to_numpy()
1293
+
1294
+ if np.all([s==0 for s in status_values]):
1295
+ # all negative to condition, event not observed
1296
+ df.loc[indices, class_attr] = 1
1297
+ elif np.all([s==1 for s in status_values]):
1298
+ # all positive, event already observed (left-censored)
1299
+ df.loc[indices, class_attr] = 2
1300
+ else:
1301
+ # ambiguity, possible transition, use `unique_state` technique after
1302
+ df.loc[indices, class_attr] = 2
1305
1303
 
1306
1304
  print("Classes after initial pass: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1307
1305
 
@@ -1316,6 +1314,7 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1316
1314
 
1317
1315
  return df
1318
1316
 
1317
+
1319
1318
  def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1320
1319
 
1321
1320
  """
@@ -1364,62 +1363,38 @@ def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1364
1363
  assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1365
1364
 
1366
1365
  stat_col = class_attr.replace('class','status')
1366
+
1367
+ for tid, track in df.groupby(sort_cols):
1367
1368
 
1368
- if pre_event is not None:
1369
-
1370
- for tid, track in df.groupby(sort_cols):
1371
-
1372
- indices = track[class_attr].index
1369
+ indices = track[class_attr].index
1373
1370
 
1371
+ if pre_event is not None:
1374
1372
  if track['class_'+pre_event].values[0]==1:
1375
- # then pre event not satisfied, class/status is NaN
1376
1373
  df.loc[indices, class_attr] = np.nan
1377
1374
  df.loc[indices, stat_col] = np.nan
1378
1375
  df.loc[indices, stat_col.replace('status_','t_')] = -1
1376
+ continue
1379
1377
  else:
1380
- # Pre event might happen, set to NaN observations before pre event
1381
1378
  t_pre_event = track['t_'+pre_event].values[0]
1382
- indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1379
+ indices_pre = track.loc[track['FRAME']<=t_pre_event, class_attr].index
1383
1380
  df.loc[indices_pre, stat_col] = np.nan
1384
1381
  track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1385
1382
 
1386
- # Post pre-event track
1387
- track_valid = track.dropna(subset=stat_col, inplace=False)
1388
- status_values = track_valid[stat_col].to_numpy()
1389
-
1390
- frames = track_valid['FRAME'].to_numpy() # from t_pre-event to T
1391
- t_first = track['t_firstdetection'].to_numpy()[0]
1392
- perc_status = np.nanpercentile(status_values[frames>=t_first], percentile)
1393
-
1394
- if perc_status==perc_status:
1395
- c = ceil(perc_status)
1396
- if c==0:
1397
- df.loc[indices, class_attr] = 1
1398
- df.loc[indices, class_attr.replace('class','t')] = -1
1399
- elif c==1:
1400
- df.loc[indices, class_attr] = 2
1401
- df.loc[indices, class_attr.replace('class','t')] = -1
1402
- else:
1403
- for tid,track in df.groupby(sort_cols):
1404
-
1405
- track_valid = track.dropna(subset=stat_col)
1406
- indices_valid = track_valid[class_attr].index
1407
-
1408
- indices = track[class_attr].index
1409
- status_values = track_valid[stat_col].to_numpy()
1410
-
1411
- frames = track_valid['FRAME'].to_numpy()
1412
- t_first = track['t_firstdetection'].to_numpy()[0]
1413
- perc_status = np.nanpercentile(status_values[frames>=t_first], percentile)
1414
-
1415
- if perc_status==perc_status:
1416
- c = ceil(perc_status)
1417
- if c==0:
1418
- df.loc[indices, class_attr] = 1
1419
- df.loc[indices, class_attr.replace('class','t')] = -1
1420
- elif c==1:
1421
- df.loc[indices, class_attr] = 2
1422
- df.loc[indices, class_attr.replace('class','t')] = -1
1383
+ # Post pre-event track
1384
+ track_valid = track.dropna(subset=stat_col, inplace=False)
1385
+ status_values = track_valid[stat_col].to_numpy()
1386
+ frames = track_valid['FRAME'].to_numpy()
1387
+ t_first = track['t_firstdetection'].to_numpy()[0]
1388
+ perc_status = np.nanpercentile(status_values[frames>=t_first], percentile)
1389
+
1390
+ if perc_status==perc_status:
1391
+ c = ceil(perc_status)
1392
+ if c==0:
1393
+ df.loc[indices, class_attr] = 1
1394
+ df.loc[indices, class_attr.replace('class','t')] = -1
1395
+ elif c==1:
1396
+ df.loc[indices, class_attr] = 2
1397
+ df.loc[indices, class_attr.replace('class','t')] = -1
1423
1398
  return df
1424
1399
 
1425
1400
  def classify_cells_from_query(df, status_attr, query):
@@ -97,6 +97,18 @@ if os.path.exists(instr_path):
97
97
  post_processing_options = instructions['post_processing_options']
98
98
  else:
99
99
  post_processing_options = None
100
+
101
+ btrack_option = True
102
+ if 'btrack_option' in instructions:
103
+ btrack_option = instructions['btrack_option']
104
+ search_range = None
105
+ if 'search_range' in instructions:
106
+ search_range = instructions['search_range']
107
+ memory = None
108
+ if 'memory' in instructions:
109
+ memory = instructions['memory']
110
+
111
+
100
112
  else:
101
113
  print('Tracking instructions could not be located... Using a standard bTrack motion model instead...')
102
114
  btrack_config = interpret_tracking_configuration(None)
@@ -104,7 +116,9 @@ else:
104
116
  mask_channels = None
105
117
  haralick_options = None
106
118
  post_processing_options = None
107
-
119
+ btrack_option = True
120
+ memory = None
121
+ search_range = None
108
122
  if features is None:
109
123
  features = []
110
124
 
@@ -147,6 +161,13 @@ with open(pos+f'log_{mode}.json', 'a') as f:
147
161
  f.write(f'{datetime.datetime.now()} TRACK \n')
148
162
  f.write(log+"\n")
149
163
 
164
+
165
+ if not btrack_option:
166
+ features = []
167
+ channel_names = None
168
+ haralick_options = None
169
+
170
+
150
171
  def measure_index(indices):
151
172
  for t in tqdm(indices,desc="frame"):
152
173
 
@@ -203,6 +224,9 @@ trajectories, napari_data = track(None,
203
224
  track_kwargs={'step_size': 100},
204
225
  clean_trajectories_kwargs=post_processing_options,
205
226
  volume=(shape_x, shape_y),
227
+ btrack_option=btrack_option,
228
+ search_range=search_range,
229
+ memory=memory,
206
230
  )
207
231
 
208
232
  # out trajectory table, create POSITION_X_um, POSITION_Y_um, TIME_min (new ones)
celldetective/tracking.py CHANGED
@@ -12,13 +12,14 @@ from celldetective.io import interpret_tracking_configuration
12
12
 
13
13
  import os
14
14
  import subprocess
15
+ import trackpy as tp
15
16
 
16
17
  abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0],'celldetective'])
17
18
 
18
19
  def track(labels, configuration=None, stack=None, spatial_calibration=1, features=None, channel_names=None,
19
20
  haralick_options=None, return_napari_data=False, view_on_napari=False, mask_timepoints=None, mask_channels=None, volume=(2048,2048),
20
21
  optimizer_options = {'tm_lim': int(12e4)}, track_kwargs={'step_size': 100}, objects=None,
21
- clean_trajectories_kwargs=None, column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'},
22
+ clean_trajectories_kwargs=None, btrack_option=True, search_range=None, memory=None,column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'},
22
23
  ):
23
24
 
24
25
  """
@@ -90,6 +91,12 @@ def track(labels, configuration=None, stack=None, spatial_calibration=1, feature
90
91
  configuration = interpret_tracking_configuration(configuration)
91
92
 
92
93
  if objects is None:
94
+
95
+ if not btrack_option:
96
+ features = []
97
+ channel_names = None
98
+ haralick_options = None
99
+
93
100
  objects = extract_objects_and_features(labels, stack, features,
94
101
  channel_names=channel_names,
95
102
  haralick_options=haralick_options,
@@ -97,63 +104,81 @@ def track(labels, configuration=None, stack=None, spatial_calibration=1, feature
97
104
  mask_channels=mask_channels,
98
105
  )
99
106
 
100
- columns = list(objects.columns)
101
- to_remove = ['x','y','class_id','t']
102
- for tr in to_remove:
103
- try:
104
- columns.remove(tr)
105
- except:
106
- print(f'column {tr} could not be found...')
107
-
108
- scaler = StandardScaler()
109
- if columns:
110
- x = objects[columns].values
111
- x_scaled = scaler.fit_transform(x)
112
- df_temp = pd.DataFrame(x_scaled, columns=columns, index = objects.index)
113
- objects[columns] = df_temp
114
- else:
115
- print('Warning: no features were passed to bTrack...')
107
+ if btrack_option:
108
+ columns = list(objects.columns)
109
+ to_remove = ['x','y','class_id','t']
110
+ for tr in to_remove:
111
+ try:
112
+ columns.remove(tr)
113
+ except:
114
+ print(f'column {tr} could not be found...')
116
115
 
117
- # 2) track the objects
118
- new_btrack_objects = localizations_to_objects(objects)
116
+ scaler = StandardScaler()
117
+ if columns:
118
+ x = objects[columns].values
119
+ x_scaled = scaler.fit_transform(x)
120
+ df_temp = pd.DataFrame(x_scaled, columns=columns, index = objects.index)
121
+ objects[columns] = df_temp
122
+ else:
123
+ print('Warning: no features were passed to bTrack...')
119
124
 
120
- with BayesianTracker() as tracker:
125
+ # 2) track the objects
126
+ new_btrack_objects = localizations_to_objects(objects)
121
127
 
122
- tracker.configure(configuration)
128
+ with BayesianTracker() as tracker:
123
129
 
124
- if columns:
125
- tracking_updates = ["motion","visual"]
126
- #tracker.tracking_updates = ["motion","visual"]
127
- tracker.features = columns
130
+ tracker.configure(configuration)
131
+
132
+ if columns:
133
+ tracking_updates = ["motion","visual"]
134
+ #tracker.tracking_updates = ["motion","visual"]
135
+ tracker.features = columns
136
+ else:
137
+ tracking_updates = ["motion"]
138
+
139
+ tracker.append(new_btrack_objects)
140
+ tracker.volume = ((0,volume[0]), (0,volume[1]), (-1e5, 1e5)) #(-1e5, 1e5)
141
+ #print(tracker.volume)
142
+ tracker.track(tracking_updates=tracking_updates, **track_kwargs)
143
+ tracker.optimize(options=optimizer_options)
144
+
145
+ data, properties, graph = tracker.to_napari() #ndim=2
146
+ # do the table post processing and napari options
147
+ if data.shape[1]==4:
148
+ df = pd.DataFrame(data, columns=[column_labels['track'],column_labels['time'],column_labels['y'],column_labels['x']])
149
+ elif data.shape[1]==5:
150
+ df = pd.DataFrame(data, columns=[column_labels['track'],column_labels['time'],"z",column_labels['y'],column_labels['x']])
151
+ df = df.drop(columns=['z'])
152
+ df[column_labels['x']+'_um'] = df[column_labels['x']]*spatial_calibration
153
+ df[column_labels['y']+'_um'] = df[column_labels['y']]*spatial_calibration
154
+
155
+ else:
156
+ properties = None
157
+ graph = {}
158
+ print(f"{objects=} {objects.columns=}")
159
+ objects = objects.rename(columns={"t": "frame"})
160
+ if search_range is not None and memory is not None:
161
+ data = tp.link(objects, search_range, memory=memory,link_strategy='auto')
128
162
  else:
129
- tracking_updates = ["motion"]
130
-
131
- tracker.append(new_btrack_objects)
132
- tracker.volume = ((0,volume[0]), (0,volume[1]), (-1e5, 1e5)) #(-1e5, 1e5)
133
- #print(tracker.volume)
134
- tracker.track(tracking_updates=tracking_updates, **track_kwargs)
135
- tracker.optimize(options=optimizer_options)
136
-
137
- data, properties, graph = tracker.to_napari() #ndim=2
138
-
139
- # do the table post processing and napari options
140
- if data.shape[1]==4:
141
- df = pd.DataFrame(data, columns=[column_labels['track'],column_labels['time'],column_labels['y'],column_labels['x']])
142
- elif data.shape[1]==5:
143
- df = pd.DataFrame(data, columns=[column_labels['track'],column_labels['time'],"z",column_labels['y'],column_labels['x']])
144
- df = df.drop(columns=['z'])
145
- df[column_labels['x']+'_um'] = df[column_labels['x']]*spatial_calibration
146
- df[column_labels['y']+'_um'] = df[column_labels['y']]*spatial_calibration
147
-
148
- df = df.merge(pd.DataFrame(properties),left_index=True, right_index=True)
149
- if columns:
150
- x = df[columns].values
151
- x_scaled = scaler.inverse_transform(x)
152
- df_temp = pd.DataFrame(x_scaled, columns=columns, index = df.index)
153
- df[columns] = df_temp
154
-
155
- # set dummy features to NaN
156
- df.loc[df['dummy'],['class_id']+columns] = np.nan
163
+ print('Please provide a valid search range and memory value...')
164
+ return None
165
+ data['particle'] = data['particle'] + 1 # force track id to start at 1
166
+ df = data.rename(columns={'frame': column_labels['time'], 'x': column_labels['x'], 'y': column_labels['y'], 'particle': column_labels['track']})
167
+ df['state'] = 5.0; df['generation'] = 0.0; df['root'] = 1.0; df['parent'] = 1.0; df['dummy'] = False; df['z'] = 0.0;
168
+ data = df[[column_labels['track'],column_labels['time'],"z",column_labels['y'],column_labels['x']]].to_numpy()
169
+ print(f"{df=}")
170
+
171
+ if btrack_option:
172
+ df = df.merge(pd.DataFrame(properties),left_index=True, right_index=True)
173
+ if columns:
174
+ x = df[columns].values
175
+ x_scaled = scaler.inverse_transform(x)
176
+ df_temp = pd.DataFrame(x_scaled, columns=columns, index = df.index)
177
+ df[columns] = df_temp
178
+
179
+ # set dummy features to NaN
180
+ df.loc[df['dummy'],['class_id']+columns] = np.nan
181
+
157
182
  df = df.sort_values(by=[column_labels['track'],column_labels['time']])
158
183
  df = velocity_per_track(df, window_size=3, mode='bi')
159
184
 
celldetective/utils.py CHANGED
@@ -424,7 +424,7 @@ def estimate_unreliable_edge(activation_protocol=[['gauss',2],['std',4]]):
424
424
  else:
425
425
  edge=0
426
426
  for fct in activation_protocol:
427
- if isinstance(fct[1],(int,np.int_)):
427
+ if isinstance(fct[1],(int,np.int_)) and not fct[0]=='invert':
428
428
  edge+=fct[1]
429
429
  return edge
430
430
 
@@ -1250,7 +1250,10 @@ def ConfigSectionMap(path,section):
1250
1250
  Config = configparser.ConfigParser()
1251
1251
  Config.read(path)
1252
1252
  dict1 = {}
1253
- options = Config.options(section)
1253
+ try:
1254
+ options = Config.options(section)
1255
+ except:
1256
+ return None
1254
1257
  for option in options:
1255
1258
  try:
1256
1259
  dict1[option] = Config.get(section, option)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: celldetective
3
- Version: 1.3.5
3
+ Version: 1.3.6.post1
4
4
  Summary: description
5
5
  Home-page: http://github.com/remyeltorro/celldetective
6
6
  Author: Rémy Torro
@@ -42,6 +42,7 @@ Requires-Dist: pytest-qt
42
42
  Requires-Dist: h5py
43
43
  Requires-Dist: cliffs_delta
44
44
  Requires-Dist: requests
45
+ Requires-Dist: trackpy
45
46
 
46
47
  # Celldetective
47
48