celldetective 1.3.6.post1__py3-none-any.whl → 1.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. celldetective/_version.py +1 -1
  2. celldetective/events.py +4 -0
  3. celldetective/filters.py +11 -2
  4. celldetective/gui/InitWindow.py +23 -9
  5. celldetective/gui/control_panel.py +19 -11
  6. celldetective/gui/generic_signal_plot.py +5 -0
  7. celldetective/gui/gui_utils.py +2 -2
  8. celldetective/gui/help/DL-segmentation-strategy.json +17 -17
  9. celldetective/gui/help/Threshold-vs-DL.json +11 -11
  10. celldetective/gui/help/cell-populations.json +5 -5
  11. celldetective/gui/help/exp-structure.json +15 -15
  12. celldetective/gui/help/feature-btrack.json +5 -5
  13. celldetective/gui/help/neighborhood.json +7 -7
  14. celldetective/gui/help/prefilter-for-segmentation.json +7 -7
  15. celldetective/gui/help/preprocessing.json +19 -19
  16. celldetective/gui/help/propagate-classification.json +7 -7
  17. celldetective/gui/neighborhood_options.py +1 -1
  18. celldetective/gui/plot_signals_ui.py +13 -9
  19. celldetective/gui/process_block.py +63 -14
  20. celldetective/gui/retrain_segmentation_model_options.py +21 -8
  21. celldetective/gui/retrain_signal_model_options.py +12 -2
  22. celldetective/gui/signal_annotator.py +9 -0
  23. celldetective/gui/signal_annotator2.py +25 -17
  24. celldetective/gui/styles.py +1 -0
  25. celldetective/gui/tableUI.py +1 -1
  26. celldetective/gui/workers.py +136 -0
  27. celldetective/io.py +54 -28
  28. celldetective/measure.py +112 -14
  29. celldetective/scripts/measure_cells.py +36 -46
  30. celldetective/scripts/segment_cells.py +35 -78
  31. celldetective/scripts/segment_cells_thresholds.py +21 -22
  32. celldetective/scripts/track_cells.py +43 -32
  33. celldetective/segmentation.py +16 -62
  34. celldetective/signals.py +11 -7
  35. celldetective/utils.py +587 -67
  36. {celldetective-1.3.6.post1.dist-info → celldetective-1.3.7.dist-info}/METADATA +1 -1
  37. {celldetective-1.3.6.post1.dist-info → celldetective-1.3.7.dist-info}/RECORD +41 -40
  38. {celldetective-1.3.6.post1.dist-info → celldetective-1.3.7.dist-info}/LICENSE +0 -0
  39. {celldetective-1.3.6.post1.dist-info → celldetective-1.3.7.dist-info}/WHEEL +0 -0
  40. {celldetective-1.3.6.post1.dist-info → celldetective-1.3.7.dist-info}/entry_points.txt +0 -0
  41. {celldetective-1.3.6.post1.dist-info → celldetective-1.3.7.dist-info}/top_level.txt +0 -0
celldetective/io.py CHANGED
@@ -19,16 +19,16 @@ from csbdeep.io import save_tiff_imagej_compatible
19
19
  import skimage.io as skio
20
20
  from skimage.measure import regionprops_table, label
21
21
 
22
- from scipy.ndimage import zoom
23
22
  from btrack.datasets import cell_config
24
23
  from magicgui import magicgui
25
24
  from pathlib import Path, PurePath
26
25
  from shutil import copyfile, rmtree
27
26
 
28
- from celldetective.utils import ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file
29
- from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
27
+ from celldetective.utils import _rearrange_multichannel_frame, _fix_no_contrast, zoom_multiframes,ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file
28
+ from celldetective.utils import interpolate_nan_multichannel, _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
30
29
 
31
30
  from stardist import fill_label_holes
31
+ from skimage.transform import resize
32
32
 
33
33
 
34
34
  def extract_experiment_from_well(well_path):
@@ -1190,7 +1190,11 @@ def locate_labels(position, population='target', frames=None):
1190
1190
  elif isinstance(frames, (int,float, np.int_)):
1191
1191
 
1192
1192
  tzfill = str(int(frames)).zfill(4)
1193
- idx = label_names.index(f"{tzfill}.tif")
1193
+ try:
1194
+ idx = label_names.index(f"{tzfill}.tif")
1195
+ except:
1196
+ idx = -1
1197
+
1194
1198
  if idx==-1:
1195
1199
  labels = None
1196
1200
  else:
@@ -1200,7 +1204,11 @@ def locate_labels(position, population='target', frames=None):
1200
1204
  labels = []
1201
1205
  for f in frames:
1202
1206
  tzfill = str(int(f)).zfill(4)
1203
- idx = label_names.index(f"{tzfill}.tif")
1207
+ try:
1208
+ idx = label_names.index(f"{tzfill}.tif")
1209
+ except:
1210
+ idx = -1
1211
+
1204
1212
  if idx==-1:
1205
1213
  labels.append(None)
1206
1214
  else:
@@ -2842,7 +2850,7 @@ def get_segmentation_models_list(mode='targets', return_path=False):
2842
2850
  return available_models, modelpath
2843
2851
 
2844
2852
 
2845
- def locate_segmentation_model(name):
2853
+ def locate_segmentation_model(name, download=True):
2846
2854
 
2847
2855
  """
2848
2856
  Locates a specified segmentation model within the local 'celldetective' directory or
@@ -2874,7 +2882,7 @@ def locate_segmentation_model(name):
2874
2882
 
2875
2883
  main_dir = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0],"celldetective"])
2876
2884
  modelpath = os.sep.join([main_dir, "models", "segmentation*"]) + os.sep
2877
- print(f'Looking for {name} in {modelpath}')
2885
+ #print(f'Looking for {name} in {modelpath}')
2878
2886
  models = glob(modelpath + f'*{os.sep}')
2879
2887
 
2880
2888
  match = None
@@ -2882,13 +2890,15 @@ def locate_segmentation_model(name):
2882
2890
  if name == m.replace('\\', os.sep).split(os.sep)[-2]:
2883
2891
  match = m
2884
2892
  return match
2885
- # else no match, try zenodo
2886
- files, categories = get_zenodo_files()
2887
- if name in files:
2888
- index = files.index(name)
2889
- cat = categories[index]
2890
- download_zenodo_file(name, os.sep.join([main_dir, cat]))
2891
- match = os.sep.join([main_dir, cat, name]) + os.sep
2893
+ if download:
2894
+ # else no match, try zenodo
2895
+ files, categories = get_zenodo_files()
2896
+ if name in files:
2897
+ index = files.index(name)
2898
+ cat = categories[index]
2899
+ download_zenodo_file(name, os.sep.join([main_dir, cat]))
2900
+ match = os.sep.join([main_dir, cat, name]) + os.sep
2901
+
2892
2902
  return match
2893
2903
 
2894
2904
 
@@ -3306,27 +3316,16 @@ def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=fl
3306
3316
  f'Error in loading the frame {img_nums} {e}. Please check that the experiment channel information is consistent with the movie being read.')
3307
3317
  return None
3308
3318
 
3309
- if frames.ndim == 3:
3310
- # Systematically move channel axis to the end
3311
- channel_axis = np.argmin(frames.shape)
3312
- frames = np.moveaxis(frames, channel_axis, -1)
3313
-
3314
- if frames.ndim==2:
3315
- frames = frames[:,:,np.newaxis].astype(float)
3319
+ frames = _rearrange_multichannel_frame(frames)
3316
3320
 
3317
3321
  if normalize_input:
3318
3322
  frames = normalize_multichannel(frames, **normalize_kwargs)
3319
3323
 
3320
3324
  if scale is not None:
3321
- frames = [zoom(frames[:,:,c].copy(), [scale,scale], order=3, prefilter=False) for c in range(frames.shape[-1])]
3322
- frames = np.moveaxis(frames,0,-1)
3325
+ frames = zoom_multiframes(frames, scale)
3323
3326
 
3324
3327
  # add a fake pixel to prevent auto normalization errors on images that are uniform
3325
- # to revisit
3326
- for k in range(frames.shape[2]):
3327
- unique_values = np.unique(frames[:, :, k])
3328
- if len(unique_values) == 1:
3329
- frames[0, 0, k] += 1
3328
+ frames = _fix_no_contrast(frames)
3330
3329
 
3331
3330
  return frames.astype(dtype)
3332
3331
 
@@ -3532,6 +3531,33 @@ def extract_experiment_folder_output(experiment_folder, destination_folder):
3532
3531
  for t in tab_path:
3533
3532
  copyfile(t, os.sep.join([output_tables_folder, os.path.split(t)[-1]]))
3534
3533
 
3534
+ def _load_frames_to_segment(file, indices, scale_model=None, normalize_kwargs=None):
3535
+
3536
+ frames = load_frames(indices, file, scale=scale_model, normalize_input=True, normalize_kwargs=normalize_kwargs)
3537
+ frames = interpolate_nan_multichannel(frames)
3538
+
3539
+ if np.any(indices==-1):
3540
+ frames[:,:,np.where(indices==-1)[0]] = 0.
3541
+
3542
+ return frames
3543
+
3544
+ def _load_frames_to_measure(file, indices):
3545
+ return load_frames(indices, file, scale=None, normalize_input=False)
3546
+
3547
+
3548
+ def _check_label_dims(lbl, file=None, template=None):
3549
+
3550
+ if file is not None:
3551
+ template = load_frames(0,file,scale=1,normalize_input=False)
3552
+ elif template is not None:
3553
+ template = template
3554
+ else:
3555
+ return lbl
3556
+
3557
+ if lbl.shape != template.shape[:2]:
3558
+ lbl = resize(lbl, template.shape[:2], order=0)
3559
+ return lbl
3560
+
3535
3561
 
3536
3562
  if __name__ == '__main__':
3537
3563
  control_segmentation_napari("/home/limozin/Documents/Experiments/MinimumJan/W4/401/", prefix='Aligned',
celldetective/measure.py CHANGED
@@ -311,7 +311,7 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
311
311
 
312
312
  if isinstance(features, list):
313
313
  features = features.copy()
314
-
314
+
315
315
  if features is None:
316
316
  features = []
317
317
 
@@ -986,6 +986,69 @@ def blob_detection(image, label, diameter, threshold=0., channel_name=None, targ
986
986
 
987
987
  return detections
988
988
 
989
+
990
+ # def blob_detectionv0(image, label, threshold, diameter):
991
+ # """
992
+ # Perform blob detection on an image based on labeled regions.
993
+
994
+ # Parameters:
995
+ # - image (numpy.ndarray): The input image data.
996
+ # - label (numpy.ndarray): An array specifying labeled regions in the image.
997
+ # - threshold (float): The threshold value for blob detection.
998
+ # - diameter (float): The expected diameter of blobs.
999
+
1000
+ # Returns:
1001
+ # - dict: A dictionary containing information about detected blobs.
1002
+
1003
+ # This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
1004
+ # and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
1005
+ # based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
1006
+ # detected blobs and their mean intensity for each labeled region.
1007
+
1008
+ # Example:
1009
+ # >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1010
+ # >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
1011
+ # >>> threshold = 0.1
1012
+ # >>> diameter = 5.0
1013
+ # >>> result = blob_detection(image, label, threshold, diameter)
1014
+ # >>> print(result)
1015
+ # {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
1016
+
1017
+ # Note:
1018
+ # - Blobs are detected using the Difference of Gaussians (DoG) method.
1019
+ # - Detected blobs are filtered based on the specified threshold and expected diameter.
1020
+ # - The returned dictionary contains information about the number of detected blobs and their mean intensity
1021
+ # for each labeled region.
1022
+ # """
1023
+ # blob_labels = {}
1024
+ # dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
1025
+ # for mask_index in np.unique(label):
1026
+ # if mask_index == 0:
1027
+ # continue
1028
+ # removed_background = image.copy()
1029
+ # one_mask = label.copy()
1030
+ # one_mask[np.where(label != mask_index)] = 0
1031
+ # dilated_copy = dilated_image.copy()
1032
+ # dilated_copy[np.where(dilated_image != mask_index)] = 0
1033
+ # removed_background[np.where(dilated_copy == 0)] = 0
1034
+ # min_sigma = (1 / (1 + math.sqrt(2))) * diameter
1035
+ # max_sigma = math.sqrt(2) * min_sigma
1036
+ # blobs = blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
1037
+ # max_sigma=max_sigma)
1038
+
1039
+ # mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
1040
+ # if not np.any(mask):
1041
+ # continue
1042
+ # blobs_filtered = blobs[mask]
1043
+ # binary_blobs = np.zeros_like(label)
1044
+ # for blob in blobs_filtered:
1045
+ # y, x, r = blob
1046
+ # rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
1047
+ # binary_blobs[rr, cc] = 1
1048
+ # spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
1049
+ # blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
1050
+ # return blob_labels
1051
+
989
1052
  ### Classification ####
990
1053
 
991
1054
  def estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=0.5):
@@ -1163,13 +1226,15 @@ def classify_transient_events(data, class_attr, pre_event=None):
1163
1226
  assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1164
1227
 
1165
1228
  stat_col = class_attr.replace('class','status')
1229
+ continuous_stat_col = stat_col.replace('status_','smooth_status_')
1230
+ df[continuous_stat_col] = df[stat_col].copy()
1166
1231
 
1167
1232
  for tid,track in df.groupby(sort_cols):
1168
1233
 
1169
1234
  indices = track[class_attr].index
1170
1235
 
1171
1236
  if pre_event is not None:
1172
-
1237
+
1173
1238
  if track['class_'+pre_event].values[0]==1:
1174
1239
  df.loc[indices, class_attr] = np.nan
1175
1240
  df.loc[indices, stat_col] = np.nan
@@ -1180,7 +1245,8 @@ def classify_transient_events(data, class_attr, pre_event=None):
1180
1245
  indices_pre = track.loc[track['FRAME']<=t_pre_event,class_attr].index
1181
1246
  df.loc[indices_pre, stat_col] = np.nan # set to NaN all statuses before pre-event
1182
1247
  track.loc[track['FRAME']<=t_pre_event, stat_col] = np.nan
1183
-
1248
+ track.loc[track['FRAME']<=t_pre_event, continuous_stat_col] = np.nan
1249
+
1184
1250
  status = track[stat_col].to_numpy()
1185
1251
  timeline = track['FRAME'].to_numpy()
1186
1252
  timeline_safe = timeline[status==status]
@@ -1189,24 +1255,35 @@ def classify_transient_events(data, class_attr, pre_event=None):
1189
1255
  peaks, _ = find_peaks(status_safe)
1190
1256
  widths, _, left, right = peak_widths(status_safe, peaks, rel_height=1)
1191
1257
  minimum_weight = 0
1192
-
1258
+
1193
1259
  if len(peaks)>0:
1194
1260
  idx = np.argmax(widths)
1195
- peak = peaks[idx]; width = widths[idx];
1261
+ peak = peaks[idx]; width = widths[idx];
1196
1262
  if width >= minimum_weight:
1197
1263
  left = left[idx]; right = right[idx];
1198
1264
  left = timeline_safe[int(left)]; right = timeline_safe[int(right)];
1199
-
1265
+
1200
1266
  df.loc[indices, class_attr] = 0
1201
- df.loc[indices, class_attr.replace('class_','t_')] = left + (right - left)/2.0
1267
+ t0 = left #take onset + (right - left)/2.0
1268
+ df.loc[indices, class_attr.replace('class_','t_')] = t0
1269
+ df.loc[track.loc[track[stat_col].isnull(),class_attr].index, continuous_stat_col] = np.nan
1270
+ df.loc[track.loc[track['FRAME']<t0,class_attr].index, continuous_stat_col] = 0
1271
+ df.loc[track.loc[track['FRAME']>=t0,class_attr].index, continuous_stat_col] = 1
1202
1272
  else:
1203
1273
  df.loc[indices, class_attr] = 1
1204
- df.loc[indices, class_attr.replace('class_','t_')] = -1
1274
+ df.loc[indices, class_attr.replace('class_','t_')] = -1
1275
+ df.loc[indices, continuous_stat_col] = 0
1205
1276
  else:
1206
1277
  df.loc[indices, class_attr] = 1
1207
1278
  df.loc[indices, class_attr.replace('class_','t_')] = -1
1208
-
1209
-
1279
+ df.loc[indices, continuous_stat_col] = 0
1280
+
1281
+ # restate NaN for out of scope timepoints
1282
+ df.loc[df[stat_col].isnull(),continuous_stat_col] = np.nan
1283
+ if 'inst_'+stat_col in list(df.columns):
1284
+ df = df.drop(columns=['inst_'+stat_col])
1285
+ df = df.rename(columns={stat_col: 'inst_'+stat_col})
1286
+ df = df.rename(columns={continuous_stat_col: stat_col})
1210
1287
  print("Classes: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1211
1288
 
1212
1289
  return df
@@ -1286,7 +1363,7 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1286
1363
  indices_pre_detection = track.loc[track['FRAME']<=t_firstdetection,class_attr].index
1287
1364
  track.loc[indices_pre_detection,stat_col] = 0.0
1288
1365
  df.loc[indices_pre_detection,stat_col] = 0.0
1289
-
1366
+
1290
1367
  # The non-NaN part of track (post pre-event)
1291
1368
  track_valid = track.dropna(subset=stat_col, inplace=False)
1292
1369
  status_values = track_valid[stat_col].to_numpy()
@@ -1300,7 +1377,7 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1300
1377
  else:
1301
1378
  # ambiguity, possible transition, use `unique_state` technique after
1302
1379
  df.loc[indices, class_attr] = 2
1303
-
1380
+
1304
1381
  print("Classes after initial pass: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1305
1382
 
1306
1383
  df.loc[df[class_attr]!=2, class_attr.replace('class', 't')] = -1
@@ -1363,7 +1440,7 @@ def classify_unique_states(df, class_attr, percentile=50, pre_event=None):
1363
1440
  assert 'class_'+pre_event in cols,"Pre-event class does not seem to be a valid column in the DataFrame..."
1364
1441
 
1365
1442
  stat_col = class_attr.replace('class','status')
1366
-
1443
+
1367
1444
  for tid, track in df.groupby(sort_cols):
1368
1445
 
1369
1446
  indices = track[class_attr].index
@@ -1488,4 +1565,25 @@ def classify_tracks_from_query(df, event_name, query, irreversible_event=True, u
1488
1565
 
1489
1566
  df = interpret_track_classification(df, class_attr, irreversible_event=irreversible_event, unique_state=unique_state, r2_threshold=r2_threshold, percentile_recovery=percentile_recovery)
1490
1567
 
1491
- return df
1568
+ return df
1569
+
1570
+ def measure_radial_distance_to_center(df, volume, column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}):
1571
+
1572
+ try:
1573
+ df['radial_distance'] = np.sqrt((df[column_labels['x']] - volume[0] / 2) ** 2 + (df[column_labels['y']] - volume[1] / 2) ** 2)
1574
+ except Exception as e:
1575
+ print(f"{e=}")
1576
+
1577
+ return df
1578
+
1579
+ def center_of_mass_to_abs_coordinates(df):
1580
+
1581
+ center_of_mass_x_cols = [c for c in list(df.columns) if c.endswith('centre_of_mass_x')]
1582
+ center_of_mass_y_cols = [c for c in list(df.columns) if c.endswith('centre_of_mass_y')]
1583
+ for c in center_of_mass_x_cols:
1584
+ df.loc[:,c.replace('_x','_POSITION_X')] = df[c] + df['POSITION_X']
1585
+ for c in center_of_mass_y_cols:
1586
+ df.loc[:,c.replace('_y','_POSITION_Y')] = df[c] + df['POSITION_Y']
1587
+ df = df.drop(columns = center_of_mass_x_cols+center_of_mass_y_cols)
1588
+
1589
+ return df
@@ -5,10 +5,10 @@ Copright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
5
5
  import argparse
6
6
  import os
7
7
  import json
8
- from celldetective.io import auto_load_number_of_frames, load_frames, fix_missing_labels, locate_labels
8
+ from celldetective.io import auto_load_number_of_frames, load_frames, fix_missing_labels, locate_labels, extract_position_name
9
9
  from celldetective.utils import extract_experiment_channels, ConfigSectionMap, _get_img_num_per_channel, extract_experiment_channels
10
- from celldetective.utils import remove_redundant_features, remove_trajectory_measurements
11
- from celldetective.measure import drop_tonal_features, measure_features, measure_isotropic_intensity
10
+ from celldetective.utils import _remove_invalid_cols, remove_redundant_features, remove_trajectory_measurements, _extract_coordinates_from_features
11
+ from celldetective.measure import drop_tonal_features, measure_features, measure_isotropic_intensity, center_of_mass_to_abs_coordinates, measure_radial_distance_to_center
12
12
  from pathlib import Path, PurePath
13
13
  from glob import glob
14
14
  from tqdm import tqdm
@@ -16,7 +16,6 @@ import numpy as np
16
16
  import pandas as pd
17
17
  from natsort import natsorted
18
18
  from art import tprint
19
- import threading
20
19
  import datetime
21
20
 
22
21
  tprint("Measure")
@@ -50,7 +49,10 @@ parent1 = Path(pos).parent
50
49
  expfolder = parent1.parent
51
50
  config = PurePath(expfolder,Path("config.ini"))
52
51
  assert os.path.exists(config),'The configuration file for the experiment could not be located. Abort.'
52
+
53
+ print(f"Position: {extract_position_name(pos)}...")
53
54
  print("Configuration file: ",config)
55
+ print(f"Population: {mode}...")
54
56
 
55
57
  # from exp config fetch spatial calib, channel names
56
58
  movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
@@ -62,11 +64,15 @@ nbr_channels = len(channel_names)
62
64
 
63
65
  # from tracking instructions, fetch btrack config, features, haralick, clean_traj, idea: fetch custom timeline?
64
66
  instr_path = PurePath(expfolder,Path(f"{instruction_file}"))
67
+ print('Looking for measurement instruction file...')
68
+
65
69
  if os.path.exists(instr_path):
66
- print(f"Tracking instructions for the {mode} population has been successfully located.")
70
+
67
71
  with open(instr_path, 'r') as f:
68
72
  instructions = json.load(f)
69
- print("Reading the following instructions: ", instructions)
73
+ print(f"Measurement instruction file successfully loaded...")
74
+ print(f"Instructions: {instructions}...")
75
+
70
76
  if 'background_correction' in instructions:
71
77
  background_correction = instructions['background_correction']
72
78
  else:
@@ -144,7 +150,7 @@ except IndexError:
144
150
  # Load trajectories, add centroid if not in trajectory
145
151
  trajectories = pos+os.sep.join(['output','tables', table_name])
146
152
  if os.path.exists(trajectories):
147
- print('trajectory exists...')
153
+ print('A trajectory table was found...')
148
154
  trajectories = pd.read_csv(trajectories)
149
155
  if 'TRACK_ID' not in list(trajectories.columns):
150
156
  do_iso_intensities = False
@@ -164,14 +170,6 @@ else:
164
170
  features += ['centroid']
165
171
  do_iso_intensities = False
166
172
 
167
- # if 'centroid' not in features:
168
- # features += ['centroid']
169
-
170
- # if (features is not None) and (trajectories is not None):
171
- # features = remove_redundant_features(features,
172
- # trajectories.columns,
173
- # channel_names=channel_names
174
- # )
175
173
 
176
174
  len_movie_auto = auto_load_number_of_frames(file)
177
175
  if len_movie_auto is not None:
@@ -229,7 +227,7 @@ with open(pos + f'log_{mode}.json', 'a') as f:
229
227
 
230
228
  def measure_index(indices):
231
229
 
232
- global column_labels
230
+ #global column_labels
233
231
 
234
232
  for t in tqdm(indices,desc="frame"):
235
233
 
@@ -251,10 +249,7 @@ def measure_index(indices):
251
249
  channels=channel_names, haralick_options=haralick_options, verbose=False,
252
250
  normalisation_list=background_correction, spot_detection=spot_detection)
253
251
  if trajectories is None:
254
- positions_at_t = feature_table[['centroid-1', 'centroid-0', 'class_id']].copy()
255
- positions_at_t['ID'] = np.arange(len(positions_at_t)) # temporary ID for the cells, that will be reset at the end since they are not tracked
256
- positions_at_t.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y'}, inplace=True)
257
- positions_at_t['FRAME'] = int(t)
252
+ positions_at_t = _extract_coordinates_from_features(feature_table, timepoint=t)
258
253
  column_labels = {'track': "ID", 'time': column_labels['time'], 'x': column_labels['x'],
259
254
  'y': column_labels['y']}
260
255
  feature_table.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y'}, inplace=True)
@@ -271,36 +266,33 @@ def measure_index(indices):
271
266
  measurements_at_t = positions_at_t.merge(feature_table, how='outer', on='class_id',suffixes=('_delme', ''))
272
267
  measurements_at_t = measurements_at_t[[c for c in measurements_at_t.columns if not c.endswith('_delme')]]
273
268
 
274
- center_of_mass_x_cols = [c for c in list(measurements_at_t.columns) if c.endswith('centre_of_mass_x')]
275
- center_of_mass_y_cols = [c for c in list(measurements_at_t.columns) if c.endswith('centre_of_mass_y')]
276
- for c in center_of_mass_x_cols:
277
- measurements_at_t.loc[:,c.replace('_x','_POSITION_X')] = measurements_at_t[c] + measurements_at_t['POSITION_X']
278
- for c in center_of_mass_y_cols:
279
- measurements_at_t.loc[:,c.replace('_y','_POSITION_Y')] = measurements_at_t[c] + measurements_at_t['POSITION_Y']
280
- measurements_at_t = measurements_at_t.drop(columns = center_of_mass_x_cols+center_of_mass_y_cols)
281
-
282
- try:
283
- measurements_at_t['radial_distance'] = np.sqrt((measurements_at_t[column_labels['x']] - img.shape[0] / 2) ** 2 + (
284
- measurements_at_t[column_labels['y']] - img.shape[1] / 2) ** 2)
285
- except Exception as e:
286
- print(f"{e=}")
269
+ measurements_at_t = center_of_mass_to_abs_coordinates(measurements_at_t)
270
+ measurements_at_t = measure_radial_distance_to_center(measurements_at_t, volume=img.shape, column_labels=column_labels)
287
271
 
288
272
  if measurements_at_t is not None:
289
273
  measurements_at_t[column_labels['time']] = t
290
274
  timestep_dataframes.append(measurements_at_t)
291
275
 
276
+ return
277
+
278
+
279
+ print(f"Starting the measurements with {n_threads} thread(s)...")
280
+
281
+ import concurrent.futures
292
282
 
293
283
  # Multithreading
294
284
  indices = list(range(img_num_channels.shape[1]))
295
285
  chunks = np.array_split(indices, n_threads)
296
- threads = []
297
- for i in range(n_threads):
298
- thread_i = threading.Thread(target=measure_index, args=[chunks[i]])
299
- threads.append(thread_i)
300
- for th in threads:
301
- th.start()
302
- for th in threads:
303
- th.join()
286
+
287
+ with concurrent.futures.ThreadPoolExecutor() as executor:
288
+ results = executor.map(measure_index, chunks)
289
+ try:
290
+ for i,return_value in enumerate(results):
291
+ print(f"Thread {i} output check: ",return_value)
292
+ except Exception as e:
293
+ print("Exception: ", e)
294
+
295
+ print('Done.')
304
296
 
305
297
 
306
298
  if len(timestep_dataframes)>0:
@@ -312,15 +304,13 @@ if len(timestep_dataframes)>0:
312
304
  df = df.dropna(subset=[column_labels['track']])
313
305
  else:
314
306
  df['ID'] = np.arange(len(df))
307
+ df = df.sort_values(by=[column_labels['time'], 'ID'])
315
308
 
316
309
  df = df.reset_index(drop=True)
317
-
318
- invalid_cols = [c for c in list(df.columns) if c.startswith('Unnamed')]
319
- if len(invalid_cols)>0:
320
- df = df.drop(invalid_cols, axis=1)
310
+ df = _remove_invalid_cols(df)
321
311
 
322
312
  df.to_csv(pos+os.sep.join(["output", "tables", table_name]), index=False)
323
- print(f'Measurements successfully written in table {pos+os.sep.join(["output", "tables", table_name])}')
313
+ print(f'Measurement table successfully exported in {os.sep.join(["output", "tables"])}...')
324
314
  print('Done.')
325
315
  else:
326
316
  print('No measurement could be performed. Check your inputs.')