celldetective 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. celldetective/__main__.py +12 -5
  2. celldetective/events.py +28 -2
  3. celldetective/gui/about.py +0 -1
  4. celldetective/gui/analyze_block.py +3 -18
  5. celldetective/gui/btrack_options.py +126 -21
  6. celldetective/gui/classifier_widget.py +68 -107
  7. celldetective/gui/configure_new_exp.py +37 -4
  8. celldetective/gui/control_panel.py +14 -30
  9. celldetective/gui/generic_signal_plot.py +793 -0
  10. celldetective/gui/gui_utils.py +401 -226
  11. celldetective/gui/json_readers.py +0 -2
  12. celldetective/gui/layouts.py +269 -25
  13. celldetective/gui/measurement_options.py +14 -23
  14. celldetective/gui/neighborhood_options.py +6 -16
  15. celldetective/gui/plot_measurements.py +10 -23
  16. celldetective/gui/plot_signals_ui.py +53 -687
  17. celldetective/gui/process_block.py +320 -186
  18. celldetective/gui/retrain_segmentation_model_options.py +30 -47
  19. celldetective/gui/retrain_signal_model_options.py +5 -14
  20. celldetective/gui/seg_model_loader.py +129 -113
  21. celldetective/gui/signal_annotator.py +93 -103
  22. celldetective/gui/signal_annotator2.py +9 -13
  23. celldetective/gui/styles.py +32 -0
  24. celldetective/gui/survival_ui.py +49 -712
  25. celldetective/gui/tableUI.py +4 -39
  26. celldetective/gui/thresholds_gui.py +38 -11
  27. celldetective/gui/viewers.py +6 -7
  28. celldetective/io.py +62 -84
  29. celldetective/measure.py +374 -15
  30. celldetective/models/segmentation_effectors/ricm-bimodal/config_input.json +130 -0
  31. celldetective/models/segmentation_effectors/ricm-bimodal/ricm-bimodal +0 -0
  32. celldetective/models/segmentation_effectors/ricm-bimodal/training_instructions.json +37 -0
  33. celldetective/neighborhood.py +3 -7
  34. celldetective/preprocessing.py +2 -4
  35. celldetective/relative_measurements.py +0 -3
  36. celldetective/scripts/analyze_signals.py +0 -1
  37. celldetective/scripts/measure_cells.py +1 -3
  38. celldetective/scripts/measure_relative.py +1 -2
  39. celldetective/scripts/segment_cells.py +16 -12
  40. celldetective/scripts/segment_cells_thresholds.py +17 -10
  41. celldetective/scripts/track_cells.py +18 -18
  42. celldetective/scripts/train_segmentation_model.py +1 -2
  43. celldetective/scripts/train_signal_model.py +0 -3
  44. celldetective/segmentation.py +1 -1
  45. celldetective/signals.py +20 -8
  46. celldetective/tracking.py +2 -1
  47. celldetective/utils.py +126 -18
  48. {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/METADATA +19 -12
  49. celldetective-1.2.2.dist-info/RECORD +92 -0
  50. {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/WHEEL +1 -1
  51. celldetective-1.2.0.dist-info/RECORD +0 -88
  52. {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/LICENSE +0 -0
  53. {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/entry_points.txt +0 -0
  54. {celldetective-1.2.0.dist-info → celldetective-1.2.2.dist-info}/top_level.txt +0 -0
@@ -2,10 +2,9 @@ from PyQt5.QtWidgets import QRadioButton, QButtonGroup, QMainWindow, QTableView,
2
2
  from PyQt5.QtCore import Qt, QAbstractTableModel
3
3
  import pandas as pd
4
4
  import matplotlib.pyplot as plt
5
- from matplotlib.cm import viridis
6
5
  plt.rcParams['svg.fonttype'] = 'none'
7
6
  from celldetective.gui.gui_utils import FigureCanvas, center_window
8
- from celldetective.utils import differentiate_per_track
7
+ from celldetective.utils import differentiate_per_track, collapse_trajectories_by_status
9
8
  import numpy as np
10
9
  import seaborn as sns
11
10
  import matplotlib.cm as mcm
@@ -335,7 +334,7 @@ class TableUI(QMainWindow, Styles):
335
334
  self.tracks = False
336
335
 
337
336
  if self.population=='pairs':
338
- self.groupby_cols = ['position','reference_population', 'neighbor_population','REFERENCE_ID', 'NEIGHBOR_ID', 'FRAME']
337
+ self.groupby_cols = ['position','reference_population', 'neighbor_population','REFERENCE_ID', 'NEIGHBOR_ID']
339
338
  self.tracks = True # for now
340
339
  else:
341
340
  if 'TRACK_ID' in data.columns:
@@ -959,7 +958,7 @@ class TableUI(QMainWindow, Styles):
959
958
  pass
960
959
 
961
960
  if self.population=='pairs':
962
- for col in self.groupby_cols[1:]: #['neighbor_population', 'reference_population', 'NEIGHBOR_ID', 'REFERENCE_ID']
961
+ for col in reversed(self.groupby_cols): #['neighbor_population', 'reference_population', 'NEIGHBOR_ID', 'REFERENCE_ID']
963
962
  if col in group_table:
964
963
  first_column = group_table.pop(col)
965
964
  group_table.insert(0, col, first_column)
@@ -1005,41 +1004,7 @@ class TableUI(QMainWindow, Styles):
1005
1004
 
1006
1005
  elif self.per_status_option.isChecked():
1007
1006
 
1008
- status_of_interest = self.per_status_cb.currentText()
1009
- self.projection_mode = f'{self.status_operation.currentText()} per {status_of_interest}'
1010
- self.data = self.data.dropna(subset=status_of_interest,ignore_index=True)
1011
- unique_statuses = np.unique(self.data[status_of_interest].to_numpy())
1012
-
1013
- df_sections = []
1014
- for s in unique_statuses:
1015
- subtab = self.data.loc[self.data[status_of_interest]==s,:]
1016
- op = getattr(subtab.groupby(self.groupby_cols), self.status_operation.currentText())
1017
- subtab_projected = op(subtab.groupby(self.groupby_cols))
1018
- frame_duration = subtab.groupby(self.groupby_cols).size().to_numpy()
1019
- for c in self.static_columns:
1020
- try:
1021
- subtab_projected[c] = subtab.groupby(self.groupby_cols)[c].apply(lambda x: x.unique()[0])
1022
- except Exception as e:
1023
- print(e)
1024
- pass
1025
- subtab_projected['duration_in_state'] = frame_duration
1026
- df_sections.append(subtab_projected)
1027
-
1028
- group_table = pd.concat(df_sections,axis=0,ignore_index=True)
1029
-
1030
- if self.population=='pairs':
1031
- for col in ['duration_in_state',status_of_interest, 'neighbor_population', 'reference_population', 'NEIGHBOR_ID', 'REFERENCE_ID']:
1032
- first_column = group_table.pop(col)
1033
- group_table.insert(0, col, first_column)
1034
- else:
1035
- for col in ['duration_in_state',status_of_interest,'TRACK_ID']:
1036
- first_column = group_table.pop(col)
1037
- group_table.insert(0, col, first_column)
1038
-
1039
- group_table.pop('FRAME')
1040
- group_table = group_table.sort_values(by=self.groupby_cols + [status_of_interest],ignore_index=True)
1041
- group_table = group_table.reset_index(drop=True)
1042
-
1007
+ group_table = collapse_trajectories_by_status(self.data, status=self.per_status_cb.currentText(),population=self.population, projection=self.status_operation.currentText(), groupby_columns=self.groupby_cols)
1043
1008
 
1044
1009
  self.subtable = TableUI(group_table,f"Group by tracks: {self.projection_mode}", plot_mode="static")
1045
1010
  self.subtable.show()
@@ -4,19 +4,15 @@ import skimage
4
4
  from PyQt5.QtWidgets import QAction, QMenu, QMainWindow, QMessageBox, QLabel, QWidget, QFileDialog, QHBoxLayout, \
5
5
  QGridLayout, QLineEdit, QScrollArea, QVBoxLayout, QComboBox, QPushButton, QApplication, QPushButton, QRadioButton, QButtonGroup
6
6
  from PyQt5.QtGui import QDoubleValidator, QIntValidator
7
- from matplotlib.backends.backend_qt import NavigationToolbar2QT
8
7
  from matplotlib.patches import Circle
9
- from mpl_toolkits.axes_grid1 import make_axes_locatable
10
8
  from scipy import ndimage
11
9
  from skimage.morphology import disk
12
10
 
13
11
  from celldetective.filters import std_filter, gauss_filter
14
- from celldetective.gui.gui_utils import center_window, FigureCanvas, ListWidget, FilterChoice, color_from_class
12
+ from celldetective.gui.gui_utils import center_window, FigureCanvas, ListWidget, FilterChoice, color_from_class, help_generic
15
13
  from celldetective.utils import get_software_location, extract_experiment_channels, rename_intensity_column, estimate_unreliable_edge
16
14
  from celldetective.io import auto_load_number_of_frames, load_frames
17
- from celldetective.segmentation import threshold_image, identify_markers_from_binary, apply_watershed, \
18
- segment_frame_from_thresholds
19
- from scipy.ndimage import binary_fill_holes
15
+ from celldetective.segmentation import threshold_image, identify_markers_from_binary, apply_watershed
20
16
  import scipy.ndimage as ndi
21
17
  from PyQt5.QtCore import Qt, QSize
22
18
  from glob import glob
@@ -31,7 +27,6 @@ from skimage.measure import regionprops_table
31
27
  import json
32
28
  import os
33
29
 
34
- from celldetective.gui.viewers import StackVisualizer
35
30
  from celldetective.gui import Styles
36
31
 
37
32
  class ThresholdConfigWizard(QMainWindow, Styles):
@@ -50,6 +45,7 @@ class ThresholdConfigWizard(QMainWindow, Styles):
50
45
  self.setMinimumHeight(int(0.8 * self.screen_height))
51
46
  self.setWindowTitle("Threshold configuration wizard")
52
47
  center_window(self)
48
+ self.setWindowIcon(self.celldetective_icon)
53
49
  self._createActions()
54
50
  self._createMenuBar()
55
51
 
@@ -150,7 +146,7 @@ class ThresholdConfigWizard(QMainWindow, Styles):
150
146
 
151
147
  def populate_left_panel(self):
152
148
 
153
- self.filters_qlist = ListWidget(self, FilterChoice, [])
149
+ self.filters_qlist = ListWidget(FilterChoice, [])
154
150
 
155
151
  grid_preprocess = QGridLayout()
156
152
  grid_preprocess.setContentsMargins(20, 20, 20, 20)
@@ -174,9 +170,17 @@ class ThresholdConfigWizard(QMainWindow, Styles):
174
170
  self.add_filter.setIconSize(QSize(20, 20))
175
171
  self.add_filter.clicked.connect(self.filters_qlist.addItem)
176
172
 
173
+ self.help_prefilter_btn = QPushButton()
174
+ self.help_prefilter_btn.setIcon(icon(MDI6.help_circle,color=self.help_color))
175
+ self.help_prefilter_btn.setIconSize(QSize(20, 20))
176
+ self.help_prefilter_btn.clicked.connect(self.help_prefilter)
177
+ self.help_prefilter_btn.setStyleSheet(self.button_select_all)
178
+ self.help_prefilter_btn.setToolTip("Help.")
179
+
177
180
  # filter_list_option_grid.addWidget(QLabel(""),90)
178
181
  filter_list_option_grid.addWidget(self.delete_filter, 5)
179
182
  filter_list_option_grid.addWidget(self.add_filter, 5)
183
+ filter_list_option_grid.addWidget(self.help_prefilter_btn, 5)
180
184
 
181
185
  grid_preprocess.addLayout(filter_list_option_grid, 0, 0, 1, 3)
182
186
  grid_preprocess.addWidget(self.filters_qlist, 1, 0, 1, 3)
@@ -259,6 +263,30 @@ class ThresholdConfigWizard(QMainWindow, Styles):
259
263
  for p in self.properties_box_widgets:
260
264
  p.setEnabled(False)
261
265
 
266
+ def help_prefilter(self):
267
+
268
+ """
269
+ Helper for prefiltering strategy
270
+ """
271
+
272
+ dict_path = os.sep.join([get_software_location(),'celldetective','gui','help','prefilter-for-segmentation.json'])
273
+
274
+ with open(dict_path) as f:
275
+ d = json.load(f)
276
+
277
+ suggestion = help_generic(d)
278
+ if isinstance(suggestion, str):
279
+ print(f"{suggestion=}")
280
+ msgBox = QMessageBox()
281
+ msgBox.setIcon(QMessageBox.Information)
282
+ msgBox.setTextFormat(Qt.RichText)
283
+ msgBox.setText(f"The suggested technique is to {suggestion}.\nSee a tutorial <a href='https://celldetective.readthedocs.io/en/latest/segment.html'>here</a>.")
284
+ msgBox.setWindowTitle("Info")
285
+ msgBox.setStandardButtons(QMessageBox.Ok)
286
+ returnValue = msgBox.exec()
287
+ if returnValue == QMessageBox.Ok:
288
+ return None
289
+
262
290
  def generate_marker_contents(self):
263
291
 
264
292
  marker_box = QVBoxLayout()
@@ -412,7 +440,6 @@ class ThresholdConfigWizard(QMainWindow, Styles):
412
440
 
413
441
  """
414
442
 
415
- print("this is the loaded position: ", self.pos)
416
443
  if isinstance(self.pos, str):
417
444
  movies = glob(self.pos + f"movie/{self.parent_window.parent_window.parent_window.movie_prefix}*.tif")
418
445
 
@@ -438,6 +465,7 @@ class ThresholdConfigWizard(QMainWindow, Styles):
438
465
  self.close()
439
466
  else:
440
467
  self.stack_path = movies[0]
468
+ print(f'Attempt to read stack {os.path.split(self.stack_path)[-1]}')
441
469
  self.len_movie = self.parent_window.parent_window.parent_window.len_movie
442
470
  len_movie_auto = auto_load_number_of_frames(self.stack_path)
443
471
  if len_movie_auto is not None:
@@ -449,8 +477,7 @@ class ThresholdConfigWizard(QMainWindow, Styles):
449
477
  self.nbr_channels = len(self.channels)
450
478
  self.current_channel = 0
451
479
  self.img = load_frames(0, self.stack_path, normalize_input=False)
452
- print(self.img.shape)
453
- print(f'{self.stack_path} successfully located.')
480
+ print(f'Detected image shape: {self.img.shape}...')
454
481
 
455
482
  def show_image(self):
456
483
 
@@ -6,15 +6,14 @@ from celldetective.measure import contour_of_instance_segmentation
6
6
  from celldetective.utils import _get_img_num_per_channel, estimate_unreliable_edge
7
7
  from tifffile import imread
8
8
  import matplotlib.pyplot as plt
9
- from stardist import fill_label_holes
10
9
  from pathlib import Path
11
10
  from natsort import natsorted
12
11
  from glob import glob
13
12
  import os
14
13
 
15
- from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QLabel, QComboBox, QLineEdit, QListWidget
14
+ from PyQt5.QtWidgets import QWidget, QHBoxLayout, QPushButton, QLabel, QComboBox, QLineEdit, QListWidget
16
15
  from PyQt5.QtCore import Qt, QSize
17
- from celldetective.gui.gui_utils import FigureCanvas, QuickSliderLayout, center_window
16
+ from celldetective.gui.gui_utils import FigureCanvas, center_window, QuickSliderLayout
18
17
  from celldetective.gui import Styles
19
18
  from superqt import QLabeledDoubleSlider, QLabeledSlider, QLabeledDoubleRangeSlider
20
19
  from superqt.fonticon import icon
@@ -89,9 +88,9 @@ class StackVisualizer(QWidget, Styles):
89
88
  if self.create_frame_slider:
90
89
  self.generate_frame_slider()
91
90
 
92
- center_window(self)
93
91
  self.canvas.layout.setContentsMargins(15,15,15,30)
94
92
  self.setAttribute(Qt.WA_DeleteOnClose)
93
+ center_window(self)
95
94
 
96
95
  def show(self):
97
96
  # Display the widget
@@ -142,7 +141,7 @@ class StackVisualizer(QWidget, Styles):
142
141
  def generate_figure_canvas(self):
143
142
  # Generate the figure canvas for displaying images
144
143
 
145
- self.fig, self.ax = plt.subplots(tight_layout=True) #figsize=(5, 5)
144
+ self.fig, self.ax = plt.subplots(figsize=(5,5),tight_layout=True) #figsize=(5, 5)
146
145
  self.canvas = FigureCanvas(self.fig, title=self.window_title, interactive=True)
147
146
  self.ax.clear()
148
147
  self.im = self.ax.imshow(self.init_frame, cmap='gray', interpolation='none', **self.imshow_kwargs)
@@ -197,13 +196,13 @@ class StackVisualizer(QWidget, Styles):
197
196
  contrast_layout = QuickSliderLayout(
198
197
  label='Contrast: ',
199
198
  slider=self.contrast_slider,
200
- slider_initial_value=[np.nanpercentile(self.init_frame, 1),np.nanpercentile(self.init_frame, 99.99)],
199
+ slider_initial_value=[np.nanpercentile(self.init_frame, 0.1),np.nanpercentile(self.init_frame, 99.99)],
201
200
  slider_range=(np.nanmin(self.init_frame),np.nanmax(self.init_frame)),
202
201
  decimal_option=True,
203
202
  precision=1.0E-05,
204
203
  )
205
204
  contrast_layout.setContentsMargins(15,0,15,0)
206
- self.im.set_clim(vmin=np.nanpercentile(self.init_frame, 1),vmax=np.nanpercentile(self.init_frame, 99.99))
205
+ self.im.set_clim(vmin=np.nanpercentile(self.init_frame, 0.1),vmax=np.nanpercentile(self.init_frame, 99.99))
207
206
  self.contrast_slider.valueChanged.connect(self.change_contrast)
208
207
  self.canvas.layout.addLayout(contrast_layout)
209
208
 
celldetective/io.py CHANGED
@@ -17,15 +17,11 @@ from pathlib import Path, PurePath
17
17
  from shutil import copyfile
18
18
  from celldetective.utils import ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file
19
19
  import json
20
- import threading
21
20
  from skimage.measure import regionprops_table
22
21
  from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
23
- import matplotlib.pyplot as plt
24
- from celldetective.filters import std_filter, median_filter, gauss_filter
25
- from stardist import fill_label_holes
26
22
  from celldetective.utils import interpolate_nan
27
- from scipy.interpolate import griddata
28
-
23
+ import concurrent.futures
24
+ from tifffile import imwrite
29
25
 
30
26
  def get_experiment_wells(experiment):
31
27
 
@@ -628,7 +624,7 @@ def locate_stack(position, prefix='Aligned'):
628
624
 
629
625
  stack_path = glob(position + os.sep.join(['movie', f'{prefix}*.tif']))
630
626
  assert len(stack_path) > 0, f"No movie with prefix {prefix} found..."
631
- stack = imread(stack_path[0].replace('\\', '/'), is_mmstack=False)
627
+ stack = imread(stack_path[0].replace('\\', '/'))
632
628
  if stack.ndim == 4:
633
629
  stack = np.moveaxis(stack, 1, -1)
634
630
  elif stack.ndim == 3:
@@ -681,6 +677,49 @@ def locate_labels(position, population='target'):
681
677
 
682
678
  return labels
683
679
 
680
+ def fix_missing_labels(position, population='target', prefix='Aligned'):
681
+
682
+ """
683
+ Fix missing label files by creating empty label images for frames that do not have corresponding label files.
684
+
685
+ This function locates missing label files in a sequence of frames and creates empty labels (filled with zeros)
686
+ for the frames that are missing. The function works for two types of populations: 'target' or 'effector'.
687
+
688
+ Parameters
689
+ ----------
690
+ position : str
691
+ The file path to the folder containing the images/label files. This is the root directory where
692
+ the label files are expected to be found.
693
+ population : str, optional
694
+ Specifies whether to look for 'target' or 'effector' labels. Accepts 'target' or 'effector'
695
+ as valid values. Default is 'target'.
696
+ prefix : str, optional
697
+ The prefix used to locate the image stack (default is 'Aligned').
698
+
699
+ Returns
700
+ -------
701
+ None
702
+ The function creates new label files in the corresponding folder for any frames missing label files.
703
+ """
704
+
705
+ if not position.endswith(os.sep):
706
+ position += os.sep
707
+
708
+ stack = locate_stack(position, prefix=prefix)
709
+ template = np.zeros((stack[0].shape[0], stack[0].shape[1]))
710
+ all_frames = np.arange(len(stack))
711
+
712
+ if population.lower() == "target" or population.lower() == "targets":
713
+ label_path = natsorted(glob(position + os.sep.join(["labels_targets", "*.tif"])))
714
+ elif population.lower() == "effector" or population.lower() == "effectors":
715
+ label_path = natsorted(glob(position + os.sep.join(["labels_effectors", "*.tif"])))
716
+
717
+ path = os.path.split(label_path[0])[0]
718
+ int_valid = [int(lbl.split(os.sep)[-1].split('.')[0]) for lbl in label_path]
719
+ to_create = [x for x in all_frames if x not in int_valid]
720
+ to_create = [str(x).zfill(4)+'.tif' for x in to_create]
721
+ for file in to_create:
722
+ imwrite(os.sep.join([path, file]), template)
684
723
 
685
724
 
686
725
  def locate_stack_and_labels(position, prefix='Aligned', population="target"):
@@ -832,7 +871,6 @@ def auto_load_number_of_frames(stack_path):
832
871
  nslices = int(attr[np.argmax([s.startswith("frames") for s in attr])].split("=")[-1])
833
872
  if nslices > 1:
834
873
  len_movie = nslices
835
- print(f"Auto-detected movie length movie: {len_movie}")
836
874
  else:
837
875
  break_the_code()
838
876
  except:
@@ -840,7 +878,6 @@ def auto_load_number_of_frames(stack_path):
840
878
  # try nslices
841
879
  frames = int(attr[np.argmax([s.startswith("slices") for s in attr])].split("=")[-1])
842
880
  len_movie = frames
843
- print(f"Auto-detected movie length movie: {len_movie}")
844
881
  except:
845
882
  pass
846
883
 
@@ -1123,96 +1160,41 @@ def relabel_segmentation(labels, data, properties, column_labels={'track': "trac
1123
1160
  else:
1124
1161
  df = pd.DataFrame(data,columns=[column_labels['track'],column_labels['frame'],'z', column_labels['y'],column_labels['x']])
1125
1162
  df = df.drop(columns=['z'])
1163
+
1126
1164
  df = df.merge(pd.DataFrame(properties),left_index=True, right_index=True)
1127
1165
  df = df.sort_values(by=[column_labels['track'],column_labels['frame']])
1166
+ df.loc[df['dummy'],column_labels['label']] = np.nan
1128
1167
 
1129
1168
  new_labels = np.zeros_like(labels)
1130
1169
 
1131
1170
  def rewrite_labels(indices):
1132
1171
 
1133
1172
  for t in tqdm(indices):
1173
+
1134
1174
  f = int(t)
1135
- tracks_at_t = df.loc[df[column_labels['frame']] == f, column_labels['track']].to_numpy()
1136
- identities = df.loc[df[column_labels['frame']] == f, column_labels['label']].to_numpy()
1175
+ cells = df.loc[df[column_labels['frame']] == f, [column_labels['track'], column_labels['label']]].to_numpy()
1176
+ tracks_at_t = cells[:,0]
1177
+ identities = cells[:,1]
1137
1178
 
1179
+ # exclude NaN
1138
1180
  tracks_at_t = tracks_at_t[identities == identities]
1139
1181
  identities = identities[identities == identities]
1140
1182
 
1141
1183
  for k in range(len(identities)):
1142
1184
  loc_i, loc_j = np.where(labels[f] == identities[k])
1143
- new_labels[f, loc_i, loc_j] = int(tracks_at_t[k])
1185
+ new_labels[f, loc_i, loc_j] = round(tracks_at_t[k])
1144
1186
 
1145
1187
  # Multithreading
1146
1188
  indices = list(df[column_labels['frame']].unique())
1147
1189
  chunks = np.array_split(indices, n_threads)
1148
- threads = []
1149
- for i in range(n_threads):
1150
- thread_i = threading.Thread(target=rewrite_labels, args=[chunks[i]])
1151
- threads.append(thread_i)
1152
- for th in threads:
1153
- th.start()
1154
- for th in threads:
1155
- th.join()
1156
1190
 
1157
- return new_labels
1158
-
1159
- # def relabel_segmentation(labels, data, properties, column_labels={'track': "track", 'frame': 'frame', 'y': 'y', 'x': 'x', 'label': 'class_id'}, threads=1):
1160
-
1161
- # """
1162
-
1163
- # Relabel the segmentation labels based on the provided tracking data and properties.
1164
-
1165
- # Parameters
1166
- # ----------
1167
- # labels : ndarray
1168
- # The original segmentation labels.
1169
- # data : ndarray
1170
- # The tracking data containing information about tracks, frames, y-coordinates, and x-coordinates.
1171
- # properties : ndarray
1172
- # The properties associated with the tracking data.
1173
- # column_labels : dict, optional
1174
- # A dictionary specifying the column labels for the tracking data. The default is {'track': "track",
1175
- # 'frame': 'frame', 'y': 'y', 'x': 'x', 'label': 'class_id'}.
1176
-
1177
- # Returns
1178
- # -------
1179
- # ndarray
1180
- # The relabeled segmentation labels.
1181
-
1182
- # Notes
1183
- # -----
1184
- # This function relabels the segmentation labels based on the provided tracking data and properties.
1185
- # It creates a DataFrame from the tracking data and properties, merges them based on the indices, and sorts them by track and frame.
1186
- # Then, it iterates over unique frames in the DataFrame, retrieves the tracks and identities at each frame,
1187
- # and updates the corresponding labels with the new track values.
1188
-
1189
- # Examples
1190
- # --------
1191
- # >>> relabeled = relabel_segmentation(labels, data, properties, column_labels={'track': "track", 'frame': 'frame',
1192
- # ... 'y': 'y', 'x': 'x', 'label': 'class_id'})
1193
- # # Relabel the segmentation labels based on the provided tracking data and properties.
1191
+ with concurrent.futures.ThreadPoolExecutor() as executor:
1192
+ executor.map(rewrite_labels, chunks)
1194
1193
 
1195
- # """
1196
-
1197
- # df = pd.DataFrame(data,columns=[column_labels['track'],column_labels['frame'],column_labels['y'],column_labels['x']])
1198
- # df = df.merge(pd.DataFrame(properties),left_index=True, right_index=True)
1199
- # df = df.sort_values(by=[column_labels['track'],column_labels['frame']])
1200
-
1201
- # new_labels = np.zeros_like(labels)
1202
-
1203
- # for t in tqdm(df[column_labels['frame']].unique()):
1204
- # f = int(t)
1205
- # tracks_at_t = df.loc[df[column_labels['frame']]==f, column_labels['track']].to_numpy()
1206
- # identities = df.loc[df[column_labels['frame']]==f, column_labels['label']].to_numpy()
1207
-
1208
- # tracks_at_t = tracks_at_t[identities==identities]
1209
- # identities = identities[identities==identities]
1194
+ print("\nDone.")
1210
1195
 
1211
- # for k in range(len(identities)):
1212
- # loc_i,loc_j = np.where(labels[f]==identities[k])
1213
- # new_labels[f,loc_i,loc_j] = int(tracks_at_t[k])
1196
+ return new_labels
1214
1197
 
1215
- # return new_labels
1216
1198
 
1217
1199
  def control_tracking_btrack(position, prefix="Aligned", population="target", relabel=True, flush_memory=True, threads=1):
1218
1200
 
@@ -1280,16 +1262,13 @@ def view_on_napari_btrack(data, properties, graph, stack=None, labels=None, rela
1280
1262
  """
1281
1263
 
1282
1264
  if (labels is not None) * relabel:
1283
- print('Relabeling the cell masks with the track ID.')
1265
+ print('Replacing the cell mask labels with the track ID...')
1284
1266
  labels = relabel_segmentation(labels, data, properties, threads=threads)
1285
1267
 
1286
- if data.shape[1]==4:
1287
- vertices = data[:, 1:]
1288
- else:
1289
- vertices = data[:, 2:]
1268
+ vertices = data[:, [1,-2,-1]]
1269
+
1290
1270
  viewer = napari.Viewer()
1291
1271
  if stack is not None:
1292
- print(f'{stack.shape=}')
1293
1272
  viewer.add_image(stack, channel_axis=-1, colormap=["gray"] * stack.shape[-1])
1294
1273
  if labels is not None:
1295
1274
  viewer.add_labels(labels, name='segmentation', opacity=0.4)
@@ -1297,7 +1276,6 @@ def view_on_napari_btrack(data, properties, graph, stack=None, labels=None, rela
1297
1276
  if data.shape[1]==4:
1298
1277
  viewer.add_tracks(data, properties=properties, graph=graph, name='tracks')
1299
1278
  else:
1300
- print(data)
1301
1279
  viewer.add_tracks(data[:,[0,1,3,4]], properties=properties, graph=graph, name='tracks')
1302
1280
  viewer.show(block=True)
1303
1281
 
@@ -1616,7 +1594,7 @@ def correct_annotation(filename):
1616
1594
  def save_widget():
1617
1595
  return export_labels()
1618
1596
 
1619
- img = imread(filename.replace('\\','/'),is_mmstack=False)
1597
+ img = imread(filename.replace('\\','/'))
1620
1598
  if img.ndim==3:
1621
1599
  img = np.moveaxis(img, 0, -1)
1622
1600
  elif img.ndim==2: