celldetective 1.3.5__py3-none-any.whl → 1.3.6.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
celldetective/io.py CHANGED
@@ -25,7 +25,7 @@ from magicgui import magicgui
25
25
  from pathlib import Path, PurePath
26
26
  from shutil import copyfile, rmtree
27
27
 
28
- from celldetective.utils import ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file, interpolate_nan
28
+ from celldetective.utils import ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file
29
29
  from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
30
30
 
31
31
  from stardist import fill_label_holes
@@ -211,7 +211,15 @@ def collect_experiment_metadata(pos_path=None, well_path=None):
211
211
  antibodies = get_experiment_antibodies(experiment)
212
212
  pharmaceutical_agents = get_experiment_pharmaceutical_agents(experiment)
213
213
 
214
- return {"pos_path": pos_path, "position": pos_path, "pos_name": pos_name, "well_path": well_path, "well_name": well_name, "well_nbr": well_nbr, "experiment": experiment, "antibody": antibodies[idx], "concentration": concentrations[idx], "cell_type": cell_types[idx], "pharmaceutical_agent": pharmaceutical_agents[idx]}
214
+ dico = {"pos_path": pos_path, "position": pos_path, "pos_name": pos_name, "well_path": well_path, "well_name": well_name, "well_nbr": well_nbr, "experiment": experiment, "antibody": antibodies[idx], "concentration": concentrations[idx], "cell_type": cell_types[idx], "pharmaceutical_agent": pharmaceutical_agents[idx]}
215
+
216
+ meta = get_experiment_metadata(experiment) # None or dict of metadata
217
+ if meta is not None:
218
+ keys = list(meta.keys())
219
+ for k in keys:
220
+ dico.update({k: meta[k]})
221
+
222
+ return dico
215
223
 
216
224
 
217
225
  def get_experiment_wells(experiment):
@@ -379,6 +387,12 @@ def get_temporal_calibration(experiment):
379
387
 
380
388
  return FrameToMin
381
389
 
390
+ def get_experiment_metadata(experiment):
391
+
392
+ config = get_config(experiment)
393
+ metadata = ConfigSectionMap(config, "Metadata")
394
+ return metadata
395
+
382
396
 
383
397
  def get_experiment_concentrations(experiment, dtype=str):
384
398
 
@@ -957,6 +971,7 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
957
971
  cell_types = get_experiment_cell_types(experiment)
958
972
  antibodies = get_experiment_antibodies(experiment)
959
973
  pharmaceutical_agents = get_experiment_pharmaceutical_agents(experiment)
974
+ metadata = get_experiment_metadata(experiment) # None or dict of metadata
960
975
  well_labels = _extract_labels_from_config(config, len(wells))
961
976
 
962
977
  well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
@@ -1011,15 +1026,23 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
1011
1026
  df_pos['antibody'] = well_antibody
1012
1027
  df_pos['cell_type'] = well_cell_type
1013
1028
  df_pos['pharmaceutical_agent'] = well_pharmaceutical_agent
1029
+ if metadata is not None:
1030
+ keys = list(metadata.keys())
1031
+ for k in keys:
1032
+ df_pos[k] = metadata[k]
1014
1033
 
1015
1034
  df.append(df_pos)
1016
1035
  any_table = True
1017
1036
 
1018
- df_pos_info.append(
1019
- {'pos_path': pos_path, 'pos_index': real_pos_index, 'pos_name': pos_name, 'table_path': table,
1020
- 'stack_path': stack_path,
1021
- 'well_path': well_path, 'well_index': real_well_index, 'well_name': well_name,
1022
- 'well_number': well_number, 'well_alias': well_alias})
1037
+ pos_dict = {'pos_path': pos_path, 'pos_index': real_pos_index, 'pos_name': pos_name, 'table_path': table,
1038
+ 'stack_path': stack_path,'well_path': well_path, 'well_index': real_well_index, 'well_name': well_name,
1039
+ 'well_number': well_number, 'well_alias': well_alias}
1040
+ # if metadata is not None:
1041
+ # keys = list(metadata.keys())
1042
+ # for k in keys:
1043
+ # pos_dict.update({k: metadata[k]})
1044
+
1045
+ df_pos_info.append(pos_dict)
1023
1046
 
1024
1047
  real_pos_index += 1
1025
1048
 
@@ -2851,7 +2874,7 @@ def locate_segmentation_model(name):
2851
2874
 
2852
2875
  main_dir = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0],"celldetective"])
2853
2876
  modelpath = os.sep.join([main_dir, "models", "segmentation*"]) + os.sep
2854
- print(f'Looking for {name} in {modelpath}')
2877
+ #print(f'Looking for {name} in {modelpath}')
2855
2878
  models = glob(modelpath + f'*{os.sep}')
2856
2879
 
2857
2880
  match = None
celldetective/measure.py CHANGED
@@ -24,6 +24,9 @@ import celldetective.extra_properties as extra_properties
24
24
  from celldetective.extra_properties import *
25
25
  from inspect import getmembers, isfunction
26
26
  from skimage.morphology import disk
27
+ from scipy.signal import find_peaks, peak_widths
28
+
29
+ from celldetective.segmentation import filter_image
27
30
 
28
31
  abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'celldetective'])
29
32
 
@@ -341,7 +344,9 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
341
344
  for index, channel in enumerate(channels):
342
345
  if channel == spot_detection['channel']:
343
346
  ind = index
344
- df_spots = blob_detection(img, label, diameter=spot_detection['diameter'],threshold=spot_detection['threshold'], channel_name=spot_detection['channel'], target_channel=ind)
347
+ if "image_preprocessing" not in spot_detection:
348
+ spot_detection.update({'image_preprocessing': None})
349
+ df_spots = blob_detection(img, label, diameter=spot_detection['diameter'],threshold=spot_detection['threshold'], channel_name=spot_detection['channel'], target_channel=ind, image_preprocessing=spot_detection['image_preprocessing'])
345
350
 
346
351
  if normalisation_list:
347
352
  for norm in normalisation_list:
@@ -919,12 +924,15 @@ def normalise_by_cell(image, labels, distance=5, model='median', operation='subt
919
924
  return normalised_frame
920
925
 
921
926
 
922
- def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
927
+ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log", image_preprocessing=None):
923
928
 
924
929
  if np.percentile(image.flatten(),99.9)==0.0:
925
930
  return None
926
931
 
927
- dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
932
+ if isinstance(image_preprocessing, (list, np.ndarray)):
933
+ image = filter_image(image.copy(),filters=image_preprocessing) # apply prefiltering to images before spot detection
934
+
935
+ dilated_image = ndimage.grey_dilation(label, footprint=disk(int(1.2*diameter))) # dilation larger than spot diameter to be safe
928
936
 
929
937
  masked_image = image.copy()
930
938
  masked_image[np.where((dilated_image == 0)|(image!=image))] = 0
@@ -933,7 +941,8 @@ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
933
941
  if method=="dog":
934
942
  blobs = blob_dog(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
935
943
  elif method=="log":
936
- blobs = blob_log(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
944
+ blobs = blob_log(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
945
+
937
946
  # Exclude spots outside of cell masks
938
947
  mask = np.array([label[int(y), int(x)] != 0 for y, x, _ in blobs])
939
948
  if np.any(mask):
@@ -944,14 +953,15 @@ def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
944
953
  return blobs_filtered
945
954
 
946
955
 
947
- def blob_detection(image, label, diameter, threshold=0., channel_name=None, target_channel=0, method="log"):
956
+ def blob_detection(image, label, diameter, threshold=0., channel_name=None, target_channel=0, method="log", image_preprocessing=None):
948
957
 
958
+
949
959
  image = image[:, :, target_channel].copy()
950
960
  if np.percentile(image.flatten(),99.9)==0.0:
951
961
  return None
952
962
 
953
963
  detections = []
954
- blobs_filtered = extract_blobs_in_image(image, label, diameter, threshold=threshold)
964
+ blobs_filtered = extract_blobs_in_image(image, label, diameter, method=method, threshold=threshold, image_preprocessing=image_preprocessing)
955
965
 
956
966
  for lbl in np.unique(label):
957
967
  if lbl>0:
@@ -976,69 +986,6 @@ def blob_detection(image, label, diameter, threshold=0., channel_name=None, targ
976
986
 
977
987
  return detections
978
988
 
979
-
980
- # def blob_detectionv0(image, label, threshold, diameter):
981
- # """
982
- # Perform blob detection on an image based on labeled regions.
983
-
984
- # Parameters:
985
- # - image (numpy.ndarray): The input image data.
986
- # - label (numpy.ndarray): An array specifying labeled regions in the image.
987
- # - threshold (float): The threshold value for blob detection.
988
- # - diameter (float): The expected diameter of blobs.
989
-
990
- # Returns:
991
- # - dict: A dictionary containing information about detected blobs.
992
-
993
- # This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
994
- # and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
995
- # based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
996
- # detected blobs and their mean intensity for each labeled region.
997
-
998
- # Example:
999
- # >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1000
- # >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
1001
- # >>> threshold = 0.1
1002
- # >>> diameter = 5.0
1003
- # >>> result = blob_detection(image, label, threshold, diameter)
1004
- # >>> print(result)
1005
- # {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
1006
-
1007
- # Note:
1008
- # - Blobs are detected using the Difference of Gaussians (DoG) method.
1009
- # - Detected blobs are filtered based on the specified threshold and expected diameter.
1010
- # - The returned dictionary contains information about the number of detected blobs and their mean intensity
1011
- # for each labeled region.
1012
- # """
1013
- # blob_labels = {}
1014
- # dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
1015
- # for mask_index in np.unique(label):
1016
- # if mask_index == 0:
1017
- # continue
1018
- # removed_background = image.copy()
1019
- # one_mask = label.copy()
1020
- # one_mask[np.where(label != mask_index)] = 0
1021
- # dilated_copy = dilated_image.copy()
1022
- # dilated_copy[np.where(dilated_image != mask_index)] = 0
1023
- # removed_background[np.where(dilated_copy == 0)] = 0
1024
- # min_sigma = (1 / (1 + math.sqrt(2))) * diameter
1025
- # max_sigma = math.sqrt(2) * min_sigma
1026
- # blobs = blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
1027
- # max_sigma=max_sigma)
1028
-
1029
- # mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
1030
- # if not np.any(mask):
1031
- # continue
1032
- # blobs_filtered = blobs[mask]
1033
- # binary_blobs = np.zeros_like(label)
1034
- # for blob in blobs_filtered:
1035
- # y, x, r = blob
1036
- # rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
1037
- # binary_blobs[rr, cc] = 1
1038
- # spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
1039
- # blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
1040
- # return blob_labels
1041
-
1042
989
  ### Classification ####
1043
990
 
1044
991
  def estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=0.5):
@@ -1126,7 +1073,7 @@ def estimate_time(df, class_attr, model='step_function', class_of_interest=[2],
1126
1073
  return df
1127
1074
 
1128
1075
 
1129
- def interpret_track_classification(df, class_attr, irreversible_event=False, unique_state=False,r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1076
+ def interpret_track_classification(df, class_attr, irreversible_event=False, unique_state=False, transient_event=False, r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1130
1077
 
1131
1078
  """
1132
1079
  Interpret and classify tracked cells based on their status signals.
@@ -1191,8 +1138,80 @@ def interpret_track_classification(df, class_attr, irreversible_event=False, uni
1191
1138
 
1192
1139
  df = classify_unique_states(df, class_attr, percentile=50, pre_event=pre_event)
1193
1140
 
1141
+ elif transient_event:
1142
+
1143
+ df = classify_transient_events(df, class_attr, pre_event=pre_event)
1144
+
1145
+ return df
1146
+
1147
+
1148
+ def classify_transient_events(data, class_attr, pre_event=None):
1149
+
1150
+ df = data.copy()
1151
+ cols = list(df.columns)
1152
+
1153
+ # Control input
1154
+ assert 'TRACK_ID' in cols,'Please provide tracked data...'
1155
+ if 'position' in cols:
1156
+ sort_cols = ['position', 'TRACK_ID']
1157
+ df = df.sort_values(by=sort_cols+['FRAME'])
1158
+ else:
1159
+ sort_cols = ['TRACK_ID']
1160
+ df = df.sort_values(by=sort_cols+['FRAME'])
1161
+ if pre_event is not None:
1162
+ assert 't_'+pre_event in cols,"Pre-event time does not seem to be a valid column in the DataFrame..."
1163
+ assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1164
+
1165
+ stat_col = class_attr.replace('class','status')
1166
+
1167
+ for tid,track in df.groupby(sort_cols):
1168
+
1169
+ indices = track[class_attr].index
1170
+
1171
+ if pre_event is not None:
1172
+
1173
+ if track['class_'+pre_event].values[0]==1:
1174
+ df.loc[indices, class_attr] = np.nan
1175
+ df.loc[indices, stat_col] = np.nan
1176
+ continue
1177
+ else:
1178
+ # pre-event took place (if left-censored took place at time -1)
1179
+ t_pre_event = track['t_'+pre_event].values[0]
1180
+ indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1181
+ df.loc[indices_pre, stat_col] = np.nan # set to NaN all statuses before pre-event
1182
+ track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1183
+
1184
+ status = track[stat_col].to_numpy()
1185
+ timeline = track['FRAME'].to_numpy()
1186
+ timeline_safe = timeline[status==status]
1187
+ status_safe = list(status[status==status])
1188
+
1189
+ peaks, _ = find_peaks(status_safe)
1190
+ widths, _, left, right = peak_widths(status_safe, peaks, rel_height=1)
1191
+ minimum_weight = 0
1192
+
1193
+ if len(peaks)>0:
1194
+ idx = np.argmax(widths)
1195
+ peak = peaks[idx]; width = widths[idx];
1196
+ if width >= minimum_weight:
1197
+ left = left[idx]; right = right[idx];
1198
+ left = timeline_safe[int(left)]; right = timeline_safe[int(right)];
1199
+
1200
+ df.loc[indices, class_attr] = 0
1201
+ df.loc[indices, class_attr.replace('class_','t_')] = left + (right - left)/2.0
1202
+ else:
1203
+ df.loc[indices, class_attr] = 1
1204
+ df.loc[indices, class_attr.replace('class_','t_')] = -1
1205
+ else:
1206
+ df.loc[indices, class_attr] = 1
1207
+ df.loc[indices, class_attr.replace('class_','t_')] = -1
1208
+
1209
+
1210
+ print("Classes: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1211
+
1194
1212
  return df
1195
1213
 
1214
+
1196
1215
  def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_recovery=50, pre_event=None):
1197
1216
 
1198
1217
  """
@@ -1246,62 +1265,41 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1246
1265
 
1247
1266
  stat_col = class_attr.replace('class','status')
1248
1267
 
1249
- if pre_event is not None:
1250
-
1251
- # Version with pre event; intuition: mask status value before pre-event takes place with NaN
1252
- for tid, track in df.groupby(sort_cols):
1253
-
1254
- indices = track[class_attr].index
1268
+ for tid,track in df.groupby(sort_cols):
1255
1269
 
1270
+ indices = track[class_attr].index
1271
+
1272
+ if pre_event is not None:
1256
1273
  if track['class_'+pre_event].values[0]==1:
1257
- # Pre-event never took place, all NaN
1258
1274
  df.loc[indices, class_attr] = np.nan
1259
1275
  df.loc[indices, stat_col] = np.nan
1276
+ continue
1260
1277
  else:
1261
1278
  # pre-event took place (if left-censored took place at time -1)
1262
1279
  t_pre_event = track['t_'+pre_event].values[0]
1263
1280
  indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1264
1281
  df.loc[indices_pre, stat_col] = np.nan # set to NaN all statuses before pre-event
1265
1282
  track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1266
-
1267
- # The non-NaN part of track (post pre-event)
1268
- track_valid = track.dropna(subset=stat_col, inplace=False)
1269
- status_values = track_valid[stat_col].to_numpy()
1270
-
1271
- if np.all([s==0 for s in status_values]):
1272
- # all negative to condition, event not observed
1273
- df.loc[indices, class_attr] = 1
1274
- elif np.all([s==1 for s in status_values]):
1275
- # all positive, event already observed (left-censored)
1276
- df.loc[indices, class_attr] = 2
1277
- else:
1278
- # ambiguity, possible transition, use `unique_state` technique after
1279
- df.loc[indices, class_attr] = 2
1280
- else:
1281
- for tid,track in df.groupby(sort_cols):
1282
-
1283
- # Set status to 0.0 before first detection
1283
+ else:
1284
+ # set state to 0 before first detection
1284
1285
  t_firstdetection = track['t_firstdetection'].values[0]
1285
1286
  indices_pre_detection = track.loc[track['FRAME']<=t_firstdetection,class_attr].index
1286
1287
  track.loc[indices_pre_detection,stat_col] = 0.0
1287
1288
  df.loc[indices_pre_detection,stat_col] = 0.0
1288
-
1289
- track_valid = track.dropna(subset=stat_col)
1290
-
1291
- indices = track[class_attr].index
1292
- status_values = track_valid[stat_col].to_numpy()
1293
-
1294
- if np.all([s==0 for s in status_values]):
1295
- # all negative, no event
1296
- df.loc[indices, class_attr] = 1
1297
-
1298
- elif np.all([s==1 for s in status_values]):
1299
- # all positive, event already observed
1300
- df.loc[indices, class_attr] = 2
1301
- #df.loc[indices, class_attr.replace('class','status')] = 2
1302
- else:
1303
- # ambiguity, possible transition
1304
- df.loc[indices, class_attr] = 2
1289
+
1290
+ # The non-NaN part of track (post pre-event)
1291
+ track_valid = track.dropna(subset=stat_col, inplace=False)
1292
+ status_values = track_valid[stat_col].to_numpy()
1293
+
1294
+ if np.all([s==0 for s in status_values]):
1295
+ # all negative to condition, event not observed
1296
+ df.loc[indices, class_attr] = 1
1297
+ elif np.all([s==1 for s in status_values]):
1298
+ # all positive, event already observed (left-censored)
1299
+ df.loc[indices, class_attr] = 2
1300
+ else:
1301
+ # ambiguity, possible transition, use `unique_state` technique after
1302
+ df.loc[indices, class_attr] = 2
1305
1303
 
1306
1304
  print("Classes after initial pass: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1307
1305
 
@@ -1316,6 +1314,7 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1316
1314
 
1317
1315
  return df
1318
1316
 
1317
+
1319
1318
  def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1320
1319
 
1321
1320
  """
@@ -1364,62 +1363,38 @@ def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1364
1363
  assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1365
1364
 
1366
1365
  stat_col = class_attr.replace('class','status')
1366
+
1367
+ for tid, track in df.groupby(sort_cols):
1367
1368
 
1368
- if pre_event is not None:
1369
-
1370
- for tid, track in df.groupby(sort_cols):
1371
-
1372
- indices = track[class_attr].index
1369
+ indices = track[class_attr].index
1373
1370
 
1371
+ if pre_event is not None:
1374
1372
  if track['class_'+pre_event].values[0]==1:
1375
- # then pre event not satisfied, class/status is NaN
1376
1373
  df.loc[indices, class_attr] = np.nan
1377
1374
  df.loc[indices, stat_col] = np.nan
1378
1375
  df.loc[indices, stat_col.replace('status_','t_')] = -1
1376
+ continue
1379
1377
  else:
1380
- # Pre event might happen, set to NaN observations before pre event
1381
1378
  t_pre_event = track['t_'+pre_event].values[0]
1382
- indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1379
+ indices_pre = track.loc[track['FRAME']<=t_pre_event, class_attr].index
1383
1380
  df.loc[indices_pre, stat_col] = np.nan
1384
1381
  track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1385
1382
 
1386
- # Post pre-event track
1387
- track_valid = track.dropna(subset=stat_col, inplace=False)
1388
- status_values = track_valid[stat_col].to_numpy()
1389
-
1390
- frames = track_valid['FRAME'].to_numpy() # from t_pre-event to T
1391
- t_first = track['t_firstdetection'].to_numpy()[0]
1392
- perc_status = np.nanpercentile(status_values[frames>=t_first], percentile)
1393
-
1394
- if perc_status==perc_status:
1395
- c = ceil(perc_status)
1396
- if c==0:
1397
- df.loc[indices, class_attr] = 1
1398
- df.loc[indices, class_attr.replace('class','t')] = -1
1399
- elif c==1:
1400
- df.loc[indices, class_attr] = 2
1401
- df.loc[indices, class_attr.replace('class','t')] = -1
1402
- else:
1403
- for tid,track in df.groupby(sort_cols):
1404
-
1405
- track_valid = track.dropna(subset=stat_col)
1406
- indices_valid = track_valid[class_attr].index
1407
-
1408
- indices = track[class_attr].index
1409
- status_values = track_valid[stat_col].to_numpy()
1410
-
1411
- frames = track_valid['FRAME'].to_numpy()
1412
- t_first = track['t_firstdetection'].to_numpy()[0]
1413
- perc_status = np.nanpercentile(status_values[frames>=t_first], percentile)
1414
-
1415
- if perc_status==perc_status:
1416
- c = ceil(perc_status)
1417
- if c==0:
1418
- df.loc[indices, class_attr] = 1
1419
- df.loc[indices, class_attr.replace('class','t')] = -1
1420
- elif c==1:
1421
- df.loc[indices, class_attr] = 2
1422
- df.loc[indices, class_attr.replace('class','t')] = -1
1383
+ # Post pre-event track
1384
+ track_valid = track.dropna(subset=stat_col, inplace=False)
1385
+ status_values = track_valid[stat_col].to_numpy()
1386
+ frames = track_valid['FRAME'].to_numpy()
1387
+ t_first = track['t_firstdetection'].to_numpy()[0]
1388
+ perc_status = np.nanpercentile(status_values[frames>=t_first], percentile)
1389
+
1390
+ if perc_status==perc_status:
1391
+ c = ceil(perc_status)
1392
+ if c==0:
1393
+ df.loc[indices, class_attr] = 1
1394
+ df.loc[indices, class_attr.replace('class','t')] = -1
1395
+ elif c==1:
1396
+ df.loc[indices, class_attr] = 2
1397
+ df.loc[indices, class_attr.replace('class','t')] = -1
1423
1398
  return df
1424
1399
 
1425
1400
  def classify_cells_from_query(df, status_attr, query):
@@ -5,7 +5,7 @@ Copright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
5
5
  import argparse
6
6
  import os
7
7
  import json
8
- from celldetective.io import auto_load_number_of_frames, load_frames, fix_missing_labels, locate_labels
8
+ from celldetective.io import auto_load_number_of_frames, load_frames, fix_missing_labels, locate_labels, extract_position_name
9
9
  from celldetective.utils import extract_experiment_channels, ConfigSectionMap, _get_img_num_per_channel, extract_experiment_channels
10
10
  from celldetective.utils import remove_redundant_features, remove_trajectory_measurements
11
11
  from celldetective.measure import drop_tonal_features, measure_features, measure_isotropic_intensity
@@ -50,7 +50,10 @@ parent1 = Path(pos).parent
50
50
  expfolder = parent1.parent
51
51
  config = PurePath(expfolder,Path("config.ini"))
52
52
  assert os.path.exists(config),'The configuration file for the experiment could not be located. Abort.'
53
+
54
+ print(f"Position: {extract_position_name(pos)}...")
53
55
  print("Configuration file: ",config)
56
+ print(f"Population: {mode}...")
54
57
 
55
58
  # from exp config fetch spatial calib, channel names
56
59
  movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
@@ -62,11 +65,15 @@ nbr_channels = len(channel_names)
62
65
 
63
66
  # from tracking instructions, fetch btrack config, features, haralick, clean_traj, idea: fetch custom timeline?
64
67
  instr_path = PurePath(expfolder,Path(f"{instruction_file}"))
68
+ print('Looking for measurement instruction file...')
69
+
65
70
  if os.path.exists(instr_path):
66
- print(f"Tracking instructions for the {mode} population has been successfully located.")
71
+
67
72
  with open(instr_path, 'r') as f:
68
73
  instructions = json.load(f)
69
- print("Reading the following instructions: ", instructions)
74
+ print(f"Measurement instruction file successfully loaded...")
75
+ print(f"Instructions: {instructions}...")
76
+
70
77
  if 'background_correction' in instructions:
71
78
  background_correction = instructions['background_correction']
72
79
  else:
@@ -144,7 +151,7 @@ except IndexError:
144
151
  # Load trajectories, add centroid if not in trajectory
145
152
  trajectories = pos+os.sep.join(['output','tables', table_name])
146
153
  if os.path.exists(trajectories):
147
- print('trajectory exists...')
154
+ print('A trajectory table was found...')
148
155
  trajectories = pd.read_csv(trajectories)
149
156
  if 'TRACK_ID' not in list(trajectories.columns):
150
157
  do_iso_intensities = False
@@ -289,18 +296,26 @@ def measure_index(indices):
289
296
  measurements_at_t[column_labels['time']] = t
290
297
  timestep_dataframes.append(measurements_at_t)
291
298
 
299
+ return
300
+
301
+
302
+ print(f"Starting the measurements with {n_threads} thread(s)...")
303
+
304
+ import concurrent.futures
292
305
 
293
306
  # Multithreading
294
307
  indices = list(range(img_num_channels.shape[1]))
295
308
  chunks = np.array_split(indices, n_threads)
296
- threads = []
297
- for i in range(n_threads):
298
- thread_i = threading.Thread(target=measure_index, args=[chunks[i]])
299
- threads.append(thread_i)
300
- for th in threads:
301
- th.start()
302
- for th in threads:
303
- th.join()
309
+
310
+ with concurrent.futures.ThreadPoolExecutor() as executor:
311
+ results = executor.map(measure_index, chunks)
312
+ try:
313
+ for i,return_value in enumerate(results):
314
+ print(f"Thread {i} output check: ",return_value)
315
+ except Exception as e:
316
+ print("Exception: ", e)
317
+
318
+ print('Done.')
304
319
 
305
320
 
306
321
  if len(timestep_dataframes)>0:
@@ -320,7 +335,7 @@ if len(timestep_dataframes)>0:
320
335
  df = df.drop(invalid_cols, axis=1)
321
336
 
322
337
  df.to_csv(pos+os.sep.join(["output", "tables", table_name]), index=False)
323
- print(f'Measurements successfully written in table {pos+os.sep.join(["output", "tables", table_name])}')
338
+ print(f'Measurement table successfully exported in {os.sep.join(["output", "tables"])}...')
324
339
  print('Done.')
325
340
  else:
326
341
  print('No measurement could be performed. Check your inputs.')