celldetective 1.3.6.post2__py3-none-any.whl → 1.3.7.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. celldetective/_version.py +1 -1
  2. celldetective/events.py +4 -0
  3. celldetective/gui/InitWindow.py +23 -9
  4. celldetective/gui/control_panel.py +19 -11
  5. celldetective/gui/generic_signal_plot.py +5 -0
  6. celldetective/gui/help/DL-segmentation-strategy.json +17 -17
  7. celldetective/gui/help/Threshold-vs-DL.json +11 -11
  8. celldetective/gui/help/cell-populations.json +5 -5
  9. celldetective/gui/help/exp-structure.json +15 -15
  10. celldetective/gui/help/feature-btrack.json +5 -5
  11. celldetective/gui/help/neighborhood.json +7 -7
  12. celldetective/gui/help/prefilter-for-segmentation.json +7 -7
  13. celldetective/gui/help/preprocessing.json +19 -19
  14. celldetective/gui/help/propagate-classification.json +7 -7
  15. celldetective/gui/plot_signals_ui.py +13 -9
  16. celldetective/gui/process_block.py +63 -14
  17. celldetective/gui/retrain_segmentation_model_options.py +21 -8
  18. celldetective/gui/retrain_signal_model_options.py +12 -2
  19. celldetective/gui/signal_annotator.py +9 -0
  20. celldetective/gui/signal_annotator2.py +8 -0
  21. celldetective/gui/styles.py +1 -0
  22. celldetective/gui/tableUI.py +1 -1
  23. celldetective/gui/workers.py +136 -0
  24. celldetective/io.py +54 -28
  25. celldetective/measure.py +112 -14
  26. celldetective/scripts/measure_cells.py +10 -35
  27. celldetective/scripts/segment_cells.py +15 -62
  28. celldetective/scripts/segment_cells_thresholds.py +1 -2
  29. celldetective/scripts/track_cells.py +16 -19
  30. celldetective/segmentation.py +16 -62
  31. celldetective/signals.py +11 -7
  32. celldetective/utils.py +587 -67
  33. {celldetective-1.3.6.post2.dist-info → celldetective-1.3.7.post1.dist-info}/METADATA +1 -1
  34. {celldetective-1.3.6.post2.dist-info → celldetective-1.3.7.post1.dist-info}/RECORD +38 -37
  35. {celldetective-1.3.6.post2.dist-info → celldetective-1.3.7.post1.dist-info}/WHEEL +1 -1
  36. {celldetective-1.3.6.post2.dist-info → celldetective-1.3.7.post1.dist-info}/LICENSE +0 -0
  37. {celldetective-1.3.6.post2.dist-info → celldetective-1.3.7.post1.dist-info}/entry_points.txt +0 -0
  38. {celldetective-1.3.6.post2.dist-info → celldetective-1.3.7.post1.dist-info}/top_level.txt +0 -0
celldetective/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.3.6.post2"
1
+ __version__ = "1.3.7.post1"
celldetective/events.py CHANGED
@@ -209,7 +209,11 @@ def compute_survival(df, class_of_interest, t_event, t_reference=None, FrameToMi
209
209
  assert t_reference in cols,"The reference time cannot be found in the dataframe..."
210
210
  first_detections = df.groupby(groupby_cols)[t_reference].max().values
211
211
 
212
+
213
+ print(f"{classes=} {event_times=} {max_times=} {first_detections=}")
212
214
  events, survival_times = switch_to_events(classes, event_times, max_times, origin_times=first_detections, left_censored=left_censored, FrameToMin=FrameToMin, cut_observation_time=cut_observation_time)
215
+ print(f"{events=} {survival_times=}")
216
+
213
217
  ks = KaplanMeierFitter()
214
218
  if len(events)>0:
215
219
  ks.fit(survival_times, event_observed=events)
@@ -1,7 +1,7 @@
1
1
  import os
2
2
 
3
3
  from PyQt5.QtWidgets import QApplication, QMainWindow
4
- from PyQt5.QtWidgets import QFileDialog, QWidget, QVBoxLayout, QCheckBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QMessageBox, QMenu, QAction
4
+ from PyQt5.QtWidgets import QFileDialog, QDialog, QWidget, QVBoxLayout, QCheckBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QMessageBox, QMenu, QAction
5
5
  from PyQt5.QtCore import Qt, QUrl
6
6
  from PyQt5.QtGui import QIcon, QDesktopServices, QIntValidator
7
7
 
@@ -20,6 +20,9 @@ from subprocess import check_output, Popen
20
20
  from psutil import cpu_count
21
21
  import json
22
22
 
23
+ from celldetective.gui.processes.downloader import DownloadProcess
24
+ from celldetective.gui.workers import ProgressWindow
25
+
23
26
  class AppInitWindow(QMainWindow, Styles):
24
27
 
25
28
  """
@@ -42,11 +45,11 @@ class AppInitWindow(QMainWindow, Styles):
42
45
  except Exception: # this command not being found can raise quite a few different errors depending on the configuration
43
46
  print('No NVIDIA GPU detected...')
44
47
  self.use_gpu = False
45
-
48
+
46
49
  self.soft_path = software_location
47
50
  self.onlyInt = QIntValidator()
48
51
  self.setWindowIcon(QIcon(os.sep.join([self.soft_path,'celldetective','icons','logo.png'])))
49
- center_window(self)
52
+
50
53
  self._createActions()
51
54
  self._createMenuBar()
52
55
 
@@ -63,6 +66,8 @@ class AppInitWindow(QMainWindow, Styles):
63
66
  self.create_buttons_hbox()
64
67
  self.setCentralWidget(central_widget)
65
68
  self.reload_previous_gpu_threads()
69
+ center_window(self)
70
+
66
71
  self.show()
67
72
 
68
73
  def closeEvent(self, event):
@@ -188,18 +193,27 @@ class AppInitWindow(QMainWindow, Styles):
188
193
  self.openCytotoxicityAssayDemo.triggered.connect(self.download_cytotoxicity_assay_demo)
189
194
 
190
195
  def download_spreading_assay_demo(self):
191
-
196
+
192
197
  self.target_dir = str(QFileDialog.getExistingDirectory(self, 'Select Folder for Download'))
193
198
  if self.target_dir=='':
194
199
  return None
195
-
200
+
196
201
  if not os.path.exists(os.sep.join([self.target_dir,'demo_ricm'])):
197
- download_zenodo_file('demo_ricm', self.target_dir)
202
+ self.output_dir = self.target_dir
203
+ self.file = 'demo_ricm'
204
+ process_args = {"output_dir": self.output_dir, "file": self.file}
205
+ self.job = ProgressWindow(DownloadProcess, parent_window=self, title="Download", position_info=False, process_args=process_args)
206
+ result = self.job.exec_()
207
+ if result == QDialog.Accepted:
208
+ pass
209
+ elif result == QDialog.Rejected:
210
+ return None
211
+ #download_zenodo_file('demo_ricm', self.target_dir)
198
212
  self.experiment_path_selection.setText(os.sep.join([self.target_dir, 'demo_ricm']))
199
213
  self.validate_button.click()
200
214
 
201
215
  def download_cytotoxicity_assay_demo(self):
202
-
216
+
203
217
  self.target_dir = str(QFileDialog.getExistingDirectory(self, 'Select Folder for Download'))
204
218
  if self.target_dir=='':
205
219
  return None
@@ -290,7 +304,7 @@ class AppInitWindow(QMainWindow, Styles):
290
304
 
291
305
 
292
306
  def open_experiment(self):
293
-
307
+
294
308
  self.browse_experiment_folder()
295
309
  if self.experiment_path_selection.text()!='':
296
310
  self.open_directory()
@@ -310,7 +324,7 @@ class AppInitWindow(QMainWindow, Styles):
310
324
  QDesktopServices.openUrl(doc_url)
311
325
 
312
326
  def open_models_folder(self):
313
-
327
+
314
328
  path = os.sep.join([self.soft_path,'celldetective','models',os.sep])
315
329
  try:
316
330
  Popen(f'explorer {os.path.realpath(path)}')
@@ -31,7 +31,6 @@ class ControlPanel(QMainWindow, Styles):
31
31
  self.setWindowTitle("celldetective")
32
32
  self.setWindowIcon(self.celldetective_icon)
33
33
  self.parent_window = parent_window
34
- center_window(self)
35
34
 
36
35
  self.init_wells_and_positions()
37
36
  self.load_configuration()
@@ -92,6 +91,10 @@ class ControlPanel(QMainWindow, Styles):
92
91
  self.screen_height = desktop.screenGeometry().height()
93
92
  self.screen_width = desktop.screenGeometry().width()
94
93
  self.scroll.setMinimumWidth(440)
94
+
95
+ self.setAttribute(Qt.WA_DeleteOnClose)
96
+ center_window(self)
97
+
95
98
 
96
99
  self.well_list.setCurrentIndex(0)
97
100
  #self.position_list.setCurrentIndex(0)
@@ -536,14 +539,14 @@ class ControlPanel(QMainWindow, Styles):
536
539
  # if os.path.exists(os.sep.join([self.pos,'labels_targets', os.sep])):
537
540
  self.ProcessTargets.check_seg_btn.setEnabled(True)
538
541
 
539
- if os.path.exists(os.sep.join([self.pos,'output','tables','napari_target_trajectories.npy'])):
540
- self.ProcessTargets.check_tracking_result_btn.setEnabled(True)
541
- else:
542
- self.ProcessTargets.check_tracking_result_btn.setEnabled(False)
543
- if os.path.exists(os.sep.join([self.pos,'output','tables','napari_effector_trajectories.npy'])):
544
- self.ProcessEffectors.check_tracking_result_btn.setEnabled(True)
545
- else:
546
- self.ProcessEffectors.check_tracking_result_btn.setEnabled(False)
542
+ # if os.path.exists(os.sep.join([self.pos,'output','tables','napari_target_trajectories.npy'])):
543
+ # self.ProcessTargets.check_tracking_result_btn.setEnabled(True)
544
+ # else:
545
+ # self.ProcessTargets.check_tracking_result_btn.setEnabled(False)
546
+ # if os.path.exists(os.sep.join([self.pos,'output','tables','napari_effector_trajectories.npy'])):
547
+ # self.ProcessEffectors.check_tracking_result_btn.setEnabled(True)
548
+ # else:
549
+ # self.ProcessEffectors.check_tracking_result_btn.setEnabled(False)
547
550
 
548
551
  if os.path.exists(os.sep.join([self.pos,'output','tables','trajectories_effectors.csv'])):
549
552
  df = pd.read_csv(os.sep.join([self.pos,'output','tables','trajectories_effectors.csv']), nrows=1)
@@ -553,8 +556,10 @@ class ControlPanel(QMainWindow, Styles):
553
556
  self.ProcessEffectors.check_signals_btn.setEnabled(True)
554
557
  self.ProcessEffectors.delete_tracks_btn.show()
555
558
  self.ProcessEffectors.signal_analysis_action.setEnabled(True)
559
+ self.ProcessEffectors.check_tracking_result_btn.setEnabled(True)
556
560
  else:
557
- self.ProcessEffectors.signal_analysis_action.setEnabled(False)
561
+ self.ProcessEffectors.signal_analysis_action.setEnabled(False)
562
+ self.ProcessEffectors.check_tracking_result_btn.setEnabled(False)
558
563
 
559
564
  #self.ProcessEffectors.signal_analysis_action.setEnabled(True)
560
565
  self.ProcessEffectors.view_tab_btn.setEnabled(True)
@@ -575,9 +580,12 @@ class ControlPanel(QMainWindow, Styles):
575
580
  if id_col=='TRACK_ID':
576
581
  self.ProcessTargets.check_signals_btn.setEnabled(True)
577
582
  self.ProcessTargets.signal_analysis_action.setEnabled(True)
583
+ self.ProcessTargets.check_tracking_result_btn.setEnabled(True)
578
584
  self.ProcessTargets.delete_tracks_btn.show()
579
585
  else:
580
- self.ProcessTargets.signal_analysis_action.setEnabled(False)
586
+ self.ProcessTargets.signal_analysis_action.setEnabled(False)
587
+ self.ProcessTargets.check_tracking_result_btn.setEnabled(False)
588
+
581
589
  #self.ProcessTargets.signal_analysis_action.setEnabled(True)
582
590
  self.ProcessTargets.view_tab_btn.setEnabled(True)
583
591
  self.ProcessTargets.classify_btn.setEnabled(True)
@@ -229,6 +229,11 @@ class GenericSignalPlotWidget(QWidget, Styles):
229
229
  )
230
230
  self.alpha_slider.valueChanged.connect(self.submit_alpha)
231
231
  self.cell_lines_alpha_wdg.setLayout(alpha_hbox)
232
+
233
+ # self.submit_alpha_btn = QPushButton('submit')
234
+ # self.submit_alpha_btn.setStyleSheet(self.button_style_sheet_2)
235
+ # self.submit_alpha_btn.clicked.connect(self.submit_alpha)
236
+ # alpha_hbox.addWidget(self.submit_alpha_btn, 10)
232
237
  self.layout.addWidget(self.cell_lines_alpha_wdg)
233
238
 
234
239
  self.select_option = [QRadioButton() for i in range(2)]
@@ -1,36 +1,36 @@
1
1
  {
2
- "Blob-like cell": {
2
+ "Are the cells in your image blob-like?": {
3
3
  "yes": {
4
- "Fluorescence image": {
4
+ "Do you have a fluorescence image of the cells?": {
5
5
  "yes": {
6
- "Mixture of population": {
7
- "yes": "train custom StarDist model",
6
+ "Is there a mixture of different cell populations in the image?": {
7
+ "yes": "to train custom StarDist model",
8
8
  "no": {
9
- "Cells can be identified from a single channel": {
10
- "yes": "Use StarDist versatile fluorescence",
11
- "no" : "train custom StarDist model"
9
+ "Can the cells be identified using a single channel?": {
10
+ "yes": "to use the StarDist versatile fluorescence generalist model",
11
+ "no" : "to train a custom StarDist model using your multichannel data"
12
12
  }
13
13
  }
14
14
  }
15
15
  },
16
- "no": "train custom StarDist model"
16
+ "no": "to train a custom StarDist model on your data"
17
17
  }
18
18
  },
19
19
  "no": {
20
- "Mixture of population": {
21
- "yes": "train custom cellpose model",
20
+ "Is there a mixture of different cell populations in your image?": {
21
+ "yes": "to train a custom cellpose model with your data, annotating one of the two populations selectively",
22
22
  "no": {
23
- "Heterogeneity in cell sizes": {
24
- "yes": "train custom cellpose model",
23
+ "Is there significant variation in cell sizes?": {
24
+ "yes": "to train a custom cellpose model using your data",
25
25
  "no": {
26
- "Cells can be identified from at most 2-channels (one cyto-like and one nucleus-like)": {
26
+ "Can the cells be identified using at most two channels (one for cytoplasm and one for nucleus)?": {
27
27
  "yes": {
28
- "cyto-like channel is brightfield": {
29
- "yes": "cellpose livecell",
30
- "no" : "cellpose cyto3"
28
+ "Is the cytoplasm channel a brightfield image?": {
29
+ "yes": "to use the cellpose livecell generalist model",
30
+ "no" : "to use cellpose cyto3 generalist model"
31
31
  }
32
32
  },
33
- "no": "train custom cellpose model"
33
+ "no": "to train a custom cellpose model using your multichannel data"
34
34
  }
35
35
  }
36
36
  }
@@ -1,26 +1,26 @@
1
1
  {
2
- "Cell masks can be extracted from a single channel": {
2
+ "Can cell masks be extracted from a single channel?": {
3
3
  "yes": {
4
- "Rare cell-cell contacts": {
4
+ "Are cell-cell contacts rare?": {
5
5
  "yes": {
6
- "Non-cell objects easily separable": {
6
+ "Are non-cell objects easy to separate from cells?": {
7
7
  "yes": {
8
- "Background heterogeneities": {
8
+ "Are there background heterogeneities in the images?": {
9
9
  "yes": {
10
- "Correction possible": {
11
- "yes": "Threshold pipeline",
12
- "no" : "DL"
10
+ "Can the heterogeneities be corrected?": {
11
+ "yes": "to use a threshold-based pipeline for mask extraction.",
12
+ "no": "to use a deep learning approach for better segmentation."
13
13
  }
14
14
  },
15
- "no": "Threshold pipeline"
15
+ "no": "to use a threshold-based pipeline for mask extraction."
16
16
  }
17
17
  },
18
- "no": "DL"
18
+ "no": "to use a deep learning approach for segmentation."
19
19
  }
20
20
  },
21
- "no": "DL"
21
+ "no": "to use a deep learning approach for segmentation."
22
22
  }
23
23
  },
24
- "no": "DL"
24
+ "no": "to use a deep learning approach for segmentation."
25
25
  }
26
26
  }
@@ -1,11 +1,11 @@
1
1
  {
2
- "Do you have more than one cell population of interest in the images?": {
2
+ "Do you have multiple cell populations of interest in your images?": {
3
3
  "yes": {
4
- "Do you have more than two cell populations of interest?": {
5
- "yes": "The study of interactions between more than two cell populations is not currently supported in celldetective. Either study the cell populations two-by-two or group several populations into one (e.g. all effector-like cells vs all target-like cells). You can use the classification tools of celldetective to decompose the responses per cell population at a later stage.",
6
- "no": "Identify the effector-like population (effects some changes on the other population) and the target-like population. If this characterization is ill-defined, choose a convention and stick to it. Beware, the available non-generalist Deep-learning models differ slightly for the two populations."
4
+ "Do you have more than two distinct cell populations of interest?": {
5
+ "yes": "Currently, celldetective does not support the analysis of interactions between more than two cell populations at once. We recommend studying interactions between two populations at a time, or grouping similar populations together (e.g., effector-like vs target-like cells). Later, you can use celldetective's classification tools to break down the results by individual cell population.",
6
+ "no": "Identify the effector-like population (which influences changes in the target population) and the target-like population. If you're unsure about the exact definitions, choose a convention and apply it consistently. Please note, the non-generalist deep learning models in celldetective may treat these populations slightly differently."
7
7
  }
8
8
  },
9
- "no": "Pass your cell population as either EFFECTORS or TARGETS and stick with the convention. Beware, the available non-generalist Deep-learning model differs slightly for the two populations."
9
+ "no": "Assign your cell population as either `effectors` or `targets` and maintain consistency with this choice throughout your analysis. Keep in mind that the non-generalist deep learning models in celldetective may treat these populations differently."
10
10
  }
11
11
  }
@@ -1,33 +1,33 @@
1
1
  {
2
- "Did you test different biological conditions?": {
2
+ "Have you tested multiple biological conditions in your experiment?": {
3
3
  "yes": {
4
- "Did you image several positions (tiles) per biological condition?": {
4
+ "Did you capture images at multiple positions (tiles) for each biological condition?": {
5
5
  "yes": {
6
- "Did you record a time-lapse?": {
7
- "yes": "Set N wells for the N biological conditions. Set M positions per well. Put the respective time-lapse stack in each generated position folder.",
8
- "no": "Set N wells for the N biological conditions. Set a single position per well. Assemble all the tiles into a single stack per condition."
6
+ "Did you record time-lapse sequences for your images?": {
7
+ "yes": "Set up N wells to represent your N biological conditions. Assign M positions for each well. Place each time-lapse sequence into its corresponding position folder.",
8
+ "no": "Set up N wells for your N biological conditions. Assign one position per well. Combine all tiles into a single image stack for each condition."
9
9
  }
10
10
  },
11
11
  "no": {
12
- "Did you record a time-lapse?": {
13
- "yes": "Set N wells for the N biological conditions. Set a single position per well. Put the respective time-lapse stack in each generated position folder.",
14
- "no": "There is not enough data to create a valid stack. Consider merging different experiments to generate several positions."
12
+ "Did you record time-lapse sequences for your images?": {
13
+ "yes": "Set up N wells to represent your N biological conditions. Assign one position per well. Place each time-lapse sequence into its corresponding position folder.",
14
+ "no": "There isn’t enough data to create a valid stack. Consider combining data from multiple experiments to create stacks with more positions."
15
15
  }
16
16
  }
17
17
  }
18
18
  },
19
19
  "no": {
20
- "Did you image several positions (tiles)?": {
20
+ "Did you capture images at multiple positions (tiles)?": {
21
21
  "yes": {
22
- "Did you record a time-lapse?": {
23
- "yes": "Set a single well. Set N positions. Put the respective time-lapse stack in each generated position folder.",
24
- "no": "Set a single well. Set a single position. Assemble all the tiles into a single stack and put it in this position folder."
22
+ "Did you record time-lapse sequences for your images?": {
23
+ "yes": "Set up a single well. Assign N positions. Place each time-lapse sequence into its corresponding position folder.",
24
+ "no": "Set up a single well. Assign one position. Combine all tiles into a single image stack and place it into the position folder."
25
25
  }
26
26
  },
27
27
  "no": {
28
- "Did you record a time-lapse?": {
29
- "yes": "Set a single well and a single position. Put the time-lapse stack in the generated position folder.",
30
- "no": "There is not enough data to create a valid stack. Consider merging different experiments to generate several positions."
28
+ "Did you record time-lapse sequences for your images?": {
29
+ "yes": "Set up a single well and a single position. Place the time-lapse sequence into the position folder.",
30
+ "no": "There isn’t enough data to create a valid stack. Consider combining data from multiple experiments to increase the number of positions."
31
31
  }
32
32
  }
33
33
  }
@@ -1,11 +1,11 @@
1
1
  {
2
- "Are morphological, tonal or textural features critical to follow the cells of interest?": {
2
+ "Are morphological, tonal, or textural features important for identifying and tracking the cells of interest?": {
3
3
  "yes": {
4
- "Are the values of these features constant or slowly changing?": {
5
- "yes": "You may pass features to bTrack. Both motion and feature information will be combined to perform tracking.",
6
- "no": "Abrupt transitions in the features may perturb bTrack's performance and truncate trajectories prematurely. Avoid passing features. "
4
+ "Do these features remain constant or change gradually over time?": {
5
+ "yes": "You can include these features in bTrack. The tracker will combine motion data and feature information for improved tracking accuracy.",
6
+ "no": "Abrupt changes in these features could disrupt bTrack's performance and prematurely end trajectories. It's better to rely only on motion data and avoid using features."
7
7
  }
8
8
  },
9
- "no": "No need to pass features. The tracking will rely exclusively on the cell positions. Skip this step."
9
+ "no": "There's no need to include features. The tracking will use only the positions of the cells. You can skip this step."
10
10
  }
11
11
  }
@@ -1,15 +1,15 @@
1
1
  {
2
- "Do you want to relate target and effector populations?": {
2
+ "Do you want to establish relationships between the target and effector cell populations?": {
3
3
  "yes": {
4
- "Do you have the complete cell shape for both populations of interest?": {
5
- "yes": "You may compute a mask-contact neighborhood. Set the reference and neighbor populations. Use the tolerance parameter to be more or less sensitive about the reference-cell / neighbor-cell contact.",
6
- "no": "You may use an isotropic distance threshold. Set the reference and neighbor populations. Use a radius r > (R_ref + 1/2 * R_neigh) with R_ref the average reference cell radius and R_neigh the average neighbor cell radius."
4
+ "Do you have the complete shape information for both populations?": {
5
+ "yes": "You can calculate a mask-contact neighborhood. Define the reference and neighbor populations. Adjust the tolerance parameter to control how sensitive the analysis is to contact between reference and neighbor cells.",
6
+ "no": "You can use an isotropic distance threshold. Define the reference and neighbor populations. Set a radius \( r > (R_{\text{ref}} + 0.5 \times R_{\text{neigh}}) \), where \( R_{\text{ref}} \) is the average radius of reference cells and \( R_{\text{neigh}} \) is the average radius of neighbor cells."
7
7
  }
8
8
  },
9
9
  "no": {
10
- "Do you have the complete cell shape for the population of interest?": {
11
- "yes": "You may compute a mask-contact neighborhood. Set the same reference and neighbor population. Use the tolerance parameter to be more or less sensitive about the cell-cell contact.",
12
- "no": "You may use an isotropic distance threshold. Set the same reference and neighbor population. Use a radius r > 1.5*R with R the average cell radius."
10
+ "Do you have the complete shape information for the population of interest?": {
11
+ "yes": "You can calculate a mask-contact neighborhood. Use the same population as both the reference and neighbor. Adjust the tolerance parameter to control sensitivity to cell-cell contact.",
12
+ "no": "You can use an isotropic distance threshold. Use the same population as both the reference and neighbor. Set a radius \( r > 1.5 \times R \), where \( R \) is the average cell radius."
13
13
  }
14
14
  }
15
15
  }
@@ -1,15 +1,15 @@
1
1
  {
2
- "Cells are either brighter or darker than background": {
2
+ "Are the cells brighter or darker than the background?": {
3
3
  "yes": {
4
- "Background is heterogeneous": {
5
- "yes": "prefilter" ,
6
- "no" : "threshold directly"
4
+ "Is the background heterogeneous (varying in intensity across the image)?": {
5
+ "yes": "Apply a prefilter to normalize the background before further processing.",
6
+ "no": "You can directly apply a threshold to segment the cells from the background."
7
7
  }
8
8
  },
9
9
  "no": {
10
- "Background is perfectly homogeneous (same value everywhere on the image)": {
11
- "yes": "subtract the background value (subtract_filter) and take the absolute value (abs_filter) for all the pixels of the image. You may add a slight Gaussian blur after these operations",
12
- "no": "use a Gaussian blur and apply a standard-deviation filter (or variance filter)"
10
+ "Is the background perfectly homogeneous (constant intensity across the entire image)?": {
11
+ "yes": "Subtract the background value (subtract_filter) and compute the absolute value (abs_filter) for all pixels. Optionally, apply a slight Gaussian blur for smoother results.",
12
+ "no": "Use a Gaussian blur followed by a standard-deviation or variance filter to enhance cell features."
13
13
  }
14
14
  }
15
15
  }
@@ -1,17 +1,17 @@
1
1
  {
2
- "Is the background spatially invariant (within a well)?": {
2
+ "Is the background consistent across positions (spatially invariant within a well)?": {
3
3
  "yes": {
4
- "Do your stacks represent timeseries?": {
4
+ "Are your image stacks time-series data?": {
5
5
  "yes": {
6
- "Does your background add to the signal (typical of fluorescence)?": {
7
- "yes": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have timeseries. Estimate the frame range over which you have the best estimate of the background (typically before cell arrival, when applicable). Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one frame to the next or one position to the next, activate the optimization option. Since your background is additive, you may subtract it from the images. ",
8
- "no": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have timeseries. Estimate the frame range over which you have the best estimate of the background (typically before cell arrival, when applicable). Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one frame to the next or one position to the next, activate the optimization option. Since your background is not additive, you may divide it from the images to express the intensities as relative to the background."
6
+ "Does the background add to the signal (common in fluorescence imaging)?": {
7
+ "yes": "Perform a model-free background correction. Specify the channel(s) of interest and indicate that you have time-series data. Estimate the frame range where the background is best defined (e.g., before cell arrival). Adjust the threshold carefully to exclude non-background objects. If background intensity fluctuates slightly across frames or positions, enable the optimization option. Since the background is additive, subtract it from the images.",
8
+ "no": "Perform a model-free background correction. Specify the channel(s) of interest and indicate that you have time-series data. Estimate the frame range where the background is best defined (e.g., before cell arrival). Adjust the threshold carefully to exclude non-background objects. If background intensity fluctuates slightly across frames or positions, enable the optimization option. Since the background is not additive, divide the images by the background to express intensities relative to it."
9
9
  }
10
10
  },
11
11
  "no": {
12
- "Does your background add to the signal (typical of fluorescence)?": {
13
- "yes": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have tiles. Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one position to the next, activate the optimization option. Since your background is additive, you may subtract it from the images. ",
14
- "no": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have tiles. Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one position to the next, activate the optimization option. Since your background is not additive, you may divide it from the images to express the intensities as relative to the background."
12
+ "Does the background add to the signal (common in fluorescence imaging)?": {
13
+ "yes": "Perform a model-free background correction. Specify the channel(s) of interest and indicate that you are working with tiles. Adjust the threshold carefully to exclude non-background objects. If background intensity fluctuates across positions, enable the optimization option. Since the background is additive, subtract it from the images.",
14
+ "no": "Perform a model-free background correction. Specify the channel(s) of interest and indicate that you are working with tiles. Adjust the threshold carefully to exclude non-background objects. If background intensity fluctuates across positions, enable the optimization option. Since the background is not additive, divide the images by the background to express intensities relative to it."
15
15
  }
16
16
  }
17
17
  }
@@ -19,30 +19,30 @@
19
19
  "no": {
20
20
  "Is the background brighter at the center than at the edges?": {
21
21
  "yes": {
22
- "Is the background correction critical to segment the cells? It is often the case when you use a traditional segmentation pipeline.": {
22
+ "Is background correction critical for segmenting cells (common in traditional segmentation pipelines)?": {
23
23
  "yes": {
24
- "Does your background add to the signal (typical of fluorescence)?": {
25
- "yes": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a paraboloid model and subtract.",
26
- "no": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a paraboloid model and divide to express the intensities as relative to the background."
24
+ "Does the background add to the signal (common in fluorescence imaging)?": {
25
+ "yes": "Perform a model-based correction using a paraboloid model. Specify the channel(s) of interest and adjust the threshold to exclude non-background objects. Subtract the background from the images.",
26
+ "no": "Perform a model-based correction using a paraboloid model. Specify the channel(s) of interest and adjust the threshold to exclude non-background objects. Divide the images by the background to express intensities relative to it."
27
27
  }
28
28
  },
29
- "no": "You can skip the image preprocessing step to save a considerable amount of storage. You will have the option to perform this preprocessing on-the-fly in the measurement module, which may be preferable. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
29
+ "no": "Skip preprocessing to save storage. Preprocessing can be performed on-the-fly in the measurement module. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
30
30
  }
31
31
  },
32
32
  "no": {
33
33
  "Is the background a constant value?": {
34
34
  "yes": {
35
- "Is the background correction critical to segment the cells? It is often the case when you use a traditional segmentation pipeline.": {
35
+ "Is background correction critical for segmenting cells (common in traditional segmentation pipelines)?": {
36
36
  "yes": {
37
- "Does your background add to the signal (typical of fluorescence)?": {
38
- "yes": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a plane model and subtract.",
39
- "no": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a plane model and divide to express the intensities as relative to the background."
37
+ "Does the background add to the signal (common in fluorescence imaging)?": {
38
+ "yes": "Perform a model-based correction using a plane model. Specify the channel(s) of interest and adjust the threshold to exclude non-background objects. Subtract the background from the images.",
39
+ "no": "Perform a model-based correction using a plane model. Specify the channel(s) of interest and adjust the threshold to exclude non-background objects. Divide the images by the background to express intensities relative to it."
40
40
  }
41
41
  },
42
- "no": "You can skip the image preprocessing step to save a considerable amount of storage. You will have the option to perform this preprocessing on-the-fly in the measurement module, which may be preferable. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
42
+ "no": "Skip preprocessing to save storage. Preprocessing can be performed on-the-fly in the measurement module. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
43
43
  }
44
44
  },
45
- "no": "For a complex background pattern it is preferable to not perform a correction at this stage. We recommend exploring the local cell correction available in the measurement module. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
45
+ "no": "For complex background patterns, avoid correction at this stage. Use the local cell correction available in the measurement module. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
46
46
  }
47
47
  }
48
48
  }
@@ -1,16 +1,16 @@
1
1
  {
2
- "Do you want to exploit the instantaneous classification to interpret cell tracks as a whole?": {
2
+ "Do you want to use instantaneous classification to interpret entire cell tracks?": {
3
3
  "yes": {
4
- "Do the cells exhibit irreversible transitions between the negative and positive state?": {
5
- "yes": "Tick the 'irreversible event' option. This will generate a class of either 0 (an observed transition), 1 (no observed transition) or 2 (a left-censored transition) for your cells. In case of doubt, transitioning cells (class 0) will be assigned to class 2. Lower the R² threshold if too many transitions are misidentified.",
4
+ "Do the cells undergo irreversible transitions between negative and positive states?": {
5
+ "yes": "Select the `irreversible event` option. This will classify cells as 0 (transition observed), 1 (no transition observed), or 2 (left-censored transition). In cases of uncertainty, transitioning cells (class 0) will be reassigned to class 2. If too many transitions are misidentified, consider lowering the R2 threshold.",
6
6
  "no": {
7
- "Do the cells have a unique state from the beginning to the end of the movie?": {
8
- "yes": "Tick the 'unique state' option. This will generate a class of either 1 (always negative) or 2 (always positive) for your cells.",
9
- "no": "No model of propagation fit for your data is currently available in celldetective. Do not hesitate to formulate a <a href='https://github.com/remyeltorro/celldetective/issues/new?assignees=&labels=&projects=&template=%E2%AD%90-feature-request.md&title=%5BFEATURE%5D'>request</a>."
7
+ "Do cells maintain a single state (positive or negative) throughout the movie?": {
8
+ "yes": "Select the 'unique state' option. Cells will be classified as 1 (always negative) or 2 (always positive).",
9
+ "no": "Currently, no propagation model in celldetective fits your data. Feel free to submit a <a href='https://github.com/remyeltorro/celldetective/issues/new?assignees=&labels=&projects=&template=%E2%AD%90-feature-request.md&title=%5BFEATURE%5D'>feature request</a>."
10
10
  }
11
11
  }
12
12
  }
13
13
  },
14
- "no": "Then you do not need to tick the 'Time correlated' option. Each cell at each time point is classified independently."
14
+ "no": "You do not need to enable the `Time correlated` option. Each cell will be classified independently at each time point."
15
15
  }
16
16
  }
@@ -6,7 +6,7 @@ from PyQt5.QtGui import QIcon, QDoubleValidator
6
6
  from celldetective.gui.gui_utils import center_window
7
7
  from celldetective.gui.generic_signal_plot import GenericSignalPlotWidget
8
8
  from superqt import QLabeledSlider, QColormapComboBox, QSearchableComboBox
9
- from celldetective.utils import get_software_location, _extract_labels_from_config
9
+ from celldetective.utils import get_software_location, _extract_labels_from_config, extract_cols_from_table_list
10
10
  from celldetective.io import load_experiment_tables
11
11
  from celldetective.signals import mean_signal
12
12
  import numpy as np
@@ -15,7 +15,6 @@ import matplotlib.pyplot as plt
15
15
  plt.rcParams['svg.fonttype'] = 'none'
16
16
  from glob import glob
17
17
  from natsort import natsorted
18
- import pandas as pd
19
18
  import math
20
19
  from celldetective.gui import Styles
21
20
  from matplotlib import colormaps
@@ -92,8 +91,14 @@ class ConfigSignalPlot(QWidget, Styles):
92
91
  """)
93
92
  main_layout.addWidget(panel_title, alignment=Qt.AlignCenter)
94
93
 
94
+ pops = []
95
+ for population in ['effectors','targets','pairs']:
96
+ tables = glob(self.exp_dir+os.sep.join(['W*','*','output','tables',f'trajectories_{population}.csv']))
97
+ if len(tables)>0:
98
+ pops.append(population)
99
+
95
100
  labels = [QLabel('population: '), QLabel('class: '), QLabel('time of\ninterest: '), QLabel('cmap: ')]
96
- self.cb_options = [['targets','effectors'],[], [], []]
101
+ self.cb_options = [pops,[], [], []]
97
102
  self.cbs = [QComboBox() for i in range(len(labels))]
98
103
  self.cbs[-1] = QColormapComboBox()
99
104
 
@@ -161,12 +166,11 @@ class ConfigSignalPlot(QWidget, Styles):
161
166
  def set_classes_and_times(self):
162
167
 
163
168
  # Look for all classes and times
164
- tables = natsorted(glob(self.exp_dir+os.sep.join(['W*','*','output','tables',f'trajectories_*.csv'])))
165
- self.all_columns = []
166
- for tab in tables:
167
- cols = pd.read_csv(tab, nrows=1).columns.tolist()
168
- self.all_columns.extend(cols)
169
- self.all_columns = np.unique(self.all_columns)
169
+ population = self.cbs[0].currentText()
170
+ tables = natsorted(glob(self.exp_dir+os.sep.join(['W*','*','output','tables',f'trajectories_{population}.csv'])))
171
+
172
+ self.all_columns = extract_cols_from_table_list(tables)
173
+
170
174
  class_idx = np.array([s.startswith('class_') for s in self.all_columns])
171
175
  time_idx = np.array([s.startswith('t_') for s in self.all_columns])
172
176