celldetective 1.2.2.post1__py3-none-any.whl → 1.2.2.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,400 @@
1
+ from PyQt5.QtWidgets import QApplication, QMainWindow
2
+ from celldetective.utils import get_software_location
3
+ import os
4
+ from PyQt5.QtWidgets import QFileDialog, QWidget, QVBoxLayout, QCheckBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QMessageBox, QMenu, QAction
5
+ from PyQt5.QtCore import Qt, QUrl
6
+ from PyQt5.QtGui import QIcon, QDesktopServices, QIntValidator
7
+ from glob import glob
8
+ from superqt.fonticon import icon
9
+ from fonticon_mdi6 import MDI6
10
+ import gc
11
+ from celldetective.gui import Styles, ControlPanel, ConfigNewExperiment
12
+ from celldetective.gui.gui_utils import center_window
13
+ import subprocess
14
+ import os
15
+ from celldetective.gui.about import AboutWidget
16
+ from celldetective.io import correct_annotation
17
+ import psutil
18
+ import subprocess
19
+ import json
20
+
21
+ class AppInitWindow(QMainWindow):
22
+
23
+ """
24
+ Initial window to set the experiment folder or create a new one.
25
+ """
26
+
27
+ def __init__(self, parent_window=None):
28
+ super().__init__()
29
+
30
+ self.parent_window = parent_window
31
+ self.Styles = Styles()
32
+ self.init_styles()
33
+ self.setWindowTitle("celldetective")
34
+
35
+ self.n_threads = min([1,psutil.cpu_count()])
36
+
37
+ try:
38
+ subprocess.check_output('nvidia-smi')
39
+ print('NVIDIA GPU detected (activate or disable in Memory & Threads)...')
40
+ self.use_gpu = True
41
+ except Exception: # this command not being found can raise quite a few different errors depending on the configuration
42
+ print('No NVIDIA GPU detected...')
43
+ self.use_gpu = False
44
+
45
+ self.soft_path = get_software_location()
46
+ self.onlyInt = QIntValidator()
47
+ self.setWindowIcon(QIcon(os.sep.join([self.soft_path,'celldetective','icons','logo.png'])))
48
+ center_window(self)
49
+ self._createActions()
50
+ self._createMenuBar()
51
+
52
+ app = QApplication.instance()
53
+ self.screen = app.primaryScreen()
54
+ self.geometry = self.screen.availableGeometry()
55
+ self.screen_width, self.screen_height = self.geometry.getRect()[-2:]
56
+
57
+ central_widget = QWidget()
58
+ self.vertical_layout = QVBoxLayout(central_widget)
59
+ self.vertical_layout.setContentsMargins(15,15,15,15)
60
+ self.vertical_layout.addWidget(QLabel("Experiment folder:"))
61
+ self.create_locate_exp_hbox()
62
+ self.create_buttons_hbox()
63
+ self.setCentralWidget(central_widget)
64
+ self.reload_previous_gpu_threads()
65
+ self.show()
66
+
67
+ def closeEvent(self, event):
68
+ QApplication.closeAllWindows()
69
+ event.accept()
70
+ gc.collect()
71
+
72
+ def create_locate_exp_hbox(self):
73
+
74
+ self.locate_exp_layout = QHBoxLayout()
75
+ self.locate_exp_layout.setContentsMargins(0,5,0,0)
76
+ self.experiment_path_selection = QLineEdit()
77
+ self.experiment_path_selection.setAlignment(Qt.AlignLeft)
78
+ self.experiment_path_selection.setEnabled(True)
79
+ self.experiment_path_selection.setDragEnabled(True)
80
+ self.experiment_path_selection.setFixedWidth(430)
81
+ self.experiment_path_selection.textChanged[str].connect(self.check_path_and_enable_opening)
82
+ self.foldername = os.getcwd()
83
+ self.experiment_path_selection.setPlaceholderText('/path/to/experiment/folder/')
84
+ self.locate_exp_layout.addWidget(self.experiment_path_selection, 90)
85
+
86
+ self.browse_button = QPushButton("Browse...")
87
+ self.browse_button.clicked.connect(self.browse_experiment_folder)
88
+ self.browse_button.setStyleSheet(self.button_style_sheet)
89
+ self.browse_button.setIcon(icon(MDI6.folder, color="white"))
90
+ self.locate_exp_layout.addWidget(self.browse_button, 10)
91
+ self.vertical_layout.addLayout(self.locate_exp_layout)
92
+
93
+
94
+ def _createMenuBar(self):
95
+
96
+ menuBar = self.menuBar()
97
+ menuBar.clear()
98
+ # Creating menus using a QMenu object
99
+
100
+ fileMenu = QMenu("File", self)
101
+ fileMenu.clear()
102
+ fileMenu.addAction(self.newExpAction)
103
+ fileMenu.addAction(self.openAction)
104
+
105
+ fileMenu.addMenu(self.OpenRecentAction)
106
+ self.OpenRecentAction.clear()
107
+ if len(self.recentFileActs)>0:
108
+ for i in range(len(self.recentFileActs)):
109
+ self.OpenRecentAction.addAction(self.recentFileActs[i])
110
+
111
+ fileMenu.addAction(self.openModels)
112
+ fileMenu.addSeparator()
113
+ fileMenu.addAction(self.exitAction)
114
+ menuBar.addMenu(fileMenu)
115
+
116
+ OptionsMenu = QMenu("Options", self)
117
+ OptionsMenu.addAction(self.MemoryAndThreadsAction)
118
+ menuBar.addMenu(OptionsMenu)
119
+
120
+ PluginsMenu = QMenu("Plugins", self)
121
+ PluginsMenu.addAction(self.CorrectAnnotationAction)
122
+ menuBar.addMenu(PluginsMenu)
123
+
124
+ helpMenu = QMenu("Help", self)
125
+ helpMenu.clear()
126
+ helpMenu.addAction(self.DocumentationAction)
127
+ helpMenu.addAction(self.SoftwareAction)
128
+ helpMenu.addSeparator()
129
+ helpMenu.addAction(self.AboutAction)
130
+ menuBar.addMenu(helpMenu)
131
+
132
+ #editMenu = menuBar.addMenu("&Edit")
133
+ #helpMenu = menuBar.addMenu("&Help")
134
+
135
+ def _createActions(self):
136
+ # Creating action using the first constructor
137
+ #self.newAction = QAction(self)
138
+ #self.newAction.setText("&New")
139
+ # Creating actions using the second constructor
140
+ self.openAction = QAction('Open...', self)
141
+ self.openAction.setShortcut("Ctrl+O")
142
+ self.openAction.setShortcutVisibleInContextMenu(True)
143
+
144
+ self.MemoryAndThreadsAction = QAction('Memory & Threads...')
145
+
146
+ self.CorrectAnnotationAction = QAction('Correct a segmentation annotation...')
147
+
148
+ self.newExpAction = QAction('New', self)
149
+ self.newExpAction.setShortcut("Ctrl+N")
150
+ self.newExpAction.setShortcutVisibleInContextMenu(True)
151
+ self.exitAction = QAction('Exit', self)
152
+
153
+ self.openModels = QAction('Open Models Location')
154
+ self.openModels.setShortcut("Ctrl+L")
155
+ self.openModels.setShortcutVisibleInContextMenu(True)
156
+
157
+ self.OpenRecentAction = QMenu('Open Recent')
158
+ self.reload_previous_experiments()
159
+
160
+ self.DocumentationAction = QAction("Documentation", self)
161
+ self.DocumentationAction.setShortcut("Ctrl+D")
162
+ self.DocumentationAction.setShortcutVisibleInContextMenu(True)
163
+
164
+ self.SoftwareAction = QAction("Software", self) #1st arg icon(MDI6.information)
165
+ self.AboutAction = QAction("About celldetective", self)
166
+
167
+ #self.DocumentationAction.triggered.connect(self.load_previous_config)
168
+ self.openAction.triggered.connect(self.open_experiment)
169
+ self.newExpAction.triggered.connect(self.create_new_experiment)
170
+ self.exitAction.triggered.connect(self.close)
171
+ self.openModels.triggered.connect(self.open_models_folder)
172
+ self.AboutAction.triggered.connect(self.open_about_window)
173
+ self.MemoryAndThreadsAction.triggered.connect(self.set_memory_and_threads)
174
+ self.CorrectAnnotationAction.triggered.connect(self.correct_seg_annotation)
175
+
176
+ self.DocumentationAction.triggered.connect(self.open_documentation)
177
+
178
+ def reload_previous_gpu_threads(self):
179
+
180
+ self.recentFileActs = []
181
+ self.threads_config_path = os.sep.join([self.soft_path,'celldetective','threads.json'])
182
+ print('Reading previous Memory & Threads settings...')
183
+ if os.path.exists(self.threads_config_path):
184
+ with open(self.threads_config_path, 'r') as f:
185
+ self.threads_config = json.load(f)
186
+ if 'use_gpu' in self.threads_config:
187
+ self.use_gpu = bool(self.threads_config['use_gpu'])
188
+ print(f'Use GPU: {self.use_gpu}...')
189
+ if 'n_threads' in self.threads_config:
190
+ self.n_threads = int(self.threads_config['n_threads'])
191
+ print(f'Number of threads: {self.n_threads}...')
192
+
193
+
194
+ def reload_previous_experiments(self):
195
+
196
+ recentExps = []
197
+ self.recentFileActs = []
198
+ if os.path.exists(os.sep.join([self.soft_path,'celldetective','recent.txt'])):
199
+ recentExps = open(os.sep.join([self.soft_path,'celldetective','recent.txt']), 'r')
200
+ recentExps = recentExps.readlines()
201
+ recentExps = [r.strip() for r in recentExps]
202
+ recentExps.reverse()
203
+ recentExps = list(dict.fromkeys(recentExps))
204
+ self.recentFileActs = [QAction(r,self) for r in recentExps]
205
+ for r in self.recentFileActs:
206
+ r.triggered.connect(lambda checked, item=r: self.load_recent_exp(item.text()))
207
+
208
+ def correct_seg_annotation(self):
209
+
210
+ self.filename,_ = QFileDialog.getOpenFileName(self,"Open Image", "/home/", "TIF Files (*.tif)")
211
+ if self.filename!='':
212
+ print('Opening ',self.filename,' in napari...')
213
+ correct_annotation(self.filename)
214
+ else:
215
+ return None
216
+
217
+ def set_memory_and_threads(self):
218
+
219
+ print('setting memory and threads')
220
+
221
+ self.ThreadsWidget = QWidget()
222
+ self.ThreadsWidget.setWindowTitle("Threads")
223
+ layout = QVBoxLayout()
224
+ self.ThreadsWidget.setLayout(layout)
225
+
226
+ self.threads_le = QLineEdit(str(self.n_threads))
227
+ self.threads_le.setValidator(self.onlyInt)
228
+
229
+ hbox = QHBoxLayout()
230
+ hbox.addWidget(QLabel('Parallel threads: '), 33)
231
+ hbox.addWidget(self.threads_le, 66)
232
+ layout.addLayout(hbox)
233
+
234
+ self.use_gpu_checkbox = QCheckBox()
235
+ hbox2 = QHBoxLayout()
236
+ hbox2.addWidget(QLabel('Use GPU: '), 33)
237
+ hbox2.addWidget(self.use_gpu_checkbox, 66)
238
+ layout.addLayout(hbox2)
239
+ if self.use_gpu:
240
+ self.use_gpu_checkbox.setChecked(True)
241
+
242
+ self.validateThreadBtn = QPushButton('Submit')
243
+ self.validateThreadBtn.setStyleSheet(self.button_style_sheet)
244
+ self.validateThreadBtn.clicked.connect(self.set_threads)
245
+ layout.addWidget(self.validateThreadBtn)
246
+ center_window(self.ThreadsWidget)
247
+ self.ThreadsWidget.show()
248
+
249
+ def set_threads(self):
250
+ self.n_threads = int(self.threads_le.text())
251
+ self.use_gpu = bool(self.use_gpu_checkbox.isChecked())
252
+ dico = {"use_gpu": self.use_gpu, "n_threads": self.n_threads}
253
+ with open(self.threads_config_path, 'w') as f:
254
+ json.dump(dico, f, indent=4)
255
+ self.ThreadsWidget.close()
256
+
257
+
258
+ def open_experiment(self):
259
+ print('ok')
260
+ self.browse_experiment_folder()
261
+ if self.experiment_path_selection.text()!='':
262
+ self.open_directory()
263
+
264
+ def load_recent_exp(self, path):
265
+
266
+ self.experiment_path_selection.setText(path)
267
+ print(f'Attempt to load experiment folder: {path}...')
268
+ self.open_directory()
269
+
270
+ def open_about_window(self):
271
+ self.about_wdw = AboutWidget()
272
+ self.about_wdw.show()
273
+
274
+ def open_documentation(self):
275
+ doc_url = QUrl('https://celldetective.readthedocs.io/')
276
+ QDesktopServices.openUrl(doc_url)
277
+
278
+ def open_models_folder(self):
279
+ path = os.sep.join([self.soft_path,'celldetective','models',os.sep])
280
+ try:
281
+ subprocess.Popen(f'explorer {os.path.realpath(path)}')
282
+ except:
283
+
284
+ try:
285
+ os.system('xdg-open "%s"' % path)
286
+ except:
287
+ return None
288
+
289
+
290
+ #os.system(f'start {os.path.realpath(path)}')
291
+
292
+ def create_buttons_hbox(self):
293
+
294
+ self.buttons_layout = QHBoxLayout()
295
+ self.buttons_layout.setContentsMargins(30,15,30,5)
296
+ self.new_exp_button = QPushButton("New")
297
+ self.new_exp_button.clicked.connect(self.create_new_experiment)
298
+ self.new_exp_button.setStyleSheet(self.button_style_sheet_2)
299
+ self.buttons_layout.addWidget(self.new_exp_button, 50)
300
+
301
+ self.validate_button = QPushButton("Open")
302
+ self.validate_button.clicked.connect(self.open_directory)
303
+ self.validate_button.setStyleSheet(self.button_style_sheet)
304
+ self.validate_button.setEnabled(False)
305
+ self.validate_button.setShortcut("Return")
306
+ self.buttons_layout.addWidget(self.validate_button, 50)
307
+ self.vertical_layout.addLayout(self.buttons_layout)
308
+
309
+ def check_path_and_enable_opening(self):
310
+
311
+ """
312
+ Enable 'Open' button if the text is a valid path.
313
+ """
314
+
315
+ text = self.experiment_path_selection.text()
316
+ if (os.path.exists(text)) and os.path.exists(os.sep.join([text,"config.ini"])):
317
+ self.validate_button.setEnabled(True)
318
+ else:
319
+ self.validate_button.setEnabled(False)
320
+
321
+ def init_styles(self):
322
+
323
+ """
324
+ Initialize styles.
325
+ """
326
+
327
+ self.qtab_style = self.Styles.qtab_style
328
+ self.button_style_sheet = self.Styles.button_style_sheet
329
+ self.button_style_sheet_2 = self.Styles.button_style_sheet_2
330
+ self.button_style_sheet_2_not_done = self.Styles.button_style_sheet_2_not_done
331
+ self.button_style_sheet_3 = self.Styles.button_style_sheet_3
332
+ self.button_select_all = self.Styles.button_select_all
333
+
334
+ def set_experiment_path(self, path):
335
+ self.experiment_path_selection.setText(path)
336
+
337
+ def create_new_experiment(self):
338
+
339
+ print("Configuring new experiment...")
340
+ self.new_exp_window = ConfigNewExperiment(self)
341
+ self.new_exp_window.show()
342
+
343
+ def open_directory(self):
344
+
345
+ self.exp_dir = self.experiment_path_selection.text().replace('/', os.sep)
346
+ print(f"Setting current directory to {self.exp_dir}...")
347
+
348
+ wells = glob(os.sep.join([self.exp_dir,"W*"]))
349
+ self.number_of_wells = len(wells)
350
+ if self.number_of_wells==0:
351
+ msgBox = QMessageBox()
352
+ msgBox.setIcon(QMessageBox.Critical)
353
+ msgBox.setText("No well was found in the experiment folder.\nPlease respect the W*/ nomenclature...")
354
+ msgBox.setWindowTitle("Error")
355
+ msgBox.setStandardButtons(QMessageBox.Ok)
356
+ returnValue = msgBox.exec()
357
+ if returnValue == QMessageBox.Ok:
358
+ return None
359
+ else:
360
+ if self.number_of_wells==1:
361
+ print(f"Found {self.number_of_wells} well...")
362
+ elif self.number_of_wells>1:
363
+ print(f"Found {self.number_of_wells} wells...")
364
+ number_pos = []
365
+ for w in wells:
366
+ position_folders = glob(os.sep.join([w,f"{w.split(os.sep)[-1][1]}*", os.sep]))
367
+ number_pos.append(len(position_folders))
368
+ print(f"Number of positions per well: {number_pos}")
369
+
370
+ with open(os.sep.join([self.soft_path,'celldetective','recent.txt']), 'a+') as f:
371
+ f.write(self.exp_dir+'\n')
372
+
373
+ self.control_panel = ControlPanel(self, self.exp_dir)
374
+ self.control_panel.show()
375
+
376
+ self.reload_previous_experiments()
377
+ self._createMenuBar()
378
+
379
+
380
+ def browse_experiment_folder(self):
381
+
382
+ """
383
+ Locate an experiment folder. If no configuration file is in the experiment, display a warning.
384
+ """
385
+
386
+ self.foldername = str(QFileDialog.getExistingDirectory(self, 'Select directory'))
387
+ if self.foldername!='':
388
+ self.experiment_path_selection.setText(self.foldername)
389
+ else:
390
+ return None
391
+ if not os.path.exists(self.foldername+"/config.ini"):
392
+ msgBox = QMessageBox()
393
+ msgBox.setIcon(QMessageBox.Warning)
394
+ msgBox.setText("No configuration can be found in the selected folder...")
395
+ msgBox.setWindowTitle("Warning")
396
+ msgBox.setStandardButtons(QMessageBox.Ok)
397
+ returnValue = msgBox.exec()
398
+ if returnValue == QMessageBox.Ok:
399
+ self.experiment_path_selection.setText('')
400
+ return None
@@ -0,0 +1,41 @@
1
+ {
2
+ "Blob-like cell": {
3
+ "yes": {
4
+ "Fluorescence image": {
5
+ "yes": {
6
+ "Mixture of population": {
7
+ "yes": "train custom StarDist model",
8
+ "no": {
9
+ "Cells can be identified from a single channel": {
10
+ "yes": "Use StarDist versatile fluorescence",
11
+ "no" : "train custom StarDist model"
12
+ }
13
+ }
14
+ }
15
+ },
16
+ "no": "train custom StarDist model"
17
+ }
18
+ },
19
+ "no": {
20
+ "Mixture of population": {
21
+ "yes": "train custom cellpose model",
22
+ "no": {
23
+ "Heterogeneity in cell sizes": {
24
+ "yes": "train custom cellpose model",
25
+ "no": {
26
+ "Cells can be identified from at most 2-channels (one cyto-like and one nucleus-like)": {
27
+ "yes": {
28
+ "cyto-like channel is brightfield": {
29
+ "yes": "cellpose livecell",
30
+ "no" : "cellpose cyto3"
31
+ }
32
+ },
33
+ "no": "train custom cellpose model"
34
+ }
35
+ }
36
+ }
37
+ }
38
+ }
39
+ }
40
+ }
41
+ }
@@ -0,0 +1,26 @@
1
+ {
2
+ "Cell masks can be extracted from a single channel": {
3
+ "yes": {
4
+ "Rare cell-cell contacts": {
5
+ "yes": {
6
+ "Non-cell objects easily separable": {
7
+ "yes": {
8
+ "Background heterogeneities": {
9
+ "yes": {
10
+ "Correction possible": {
11
+ "yes": "Threshold pipeline",
12
+ "no" : "DL"
13
+ }
14
+ },
15
+ "no": "Threshold pipeline"
16
+ }
17
+ },
18
+ "no": "DL"
19
+ }
20
+ },
21
+ "no": "DL"
22
+ }
23
+ },
24
+ "no": "DL"
25
+ }
26
+ }
@@ -0,0 +1,11 @@
1
+ {
2
+ "Do you have more than one cell population of interest in the images?": {
3
+ "yes": {
4
+ "Do you have more than two cell populations of interest?": {
5
+ "yes": "The study of interactions between more than two cell populations is not currently supported in celldetective. Either study the cell populations two-by-two or group several populations into one (e.g. all effector-like cells vs all target-like cells). You can use the classification tools of celldetective to decompose the responses per cell population at a later stage.",
6
+ "no": "Identify the effector-like population (effects some changes on the other population) and the target-like population. If this characterization is ill-defined, choose a convention and stick to it. Beware, the available non-generalist Deep-learning models differ slightly for the two populations."
7
+ }
8
+ },
9
+ "no": "Pass your cell population as either EFFECTORS or TARGETS and stick with the convention. Beware, the available non-generalist Deep-learning model differs slightly for the two populations."
10
+ }
11
+ }
@@ -0,0 +1,36 @@
1
+ {
2
+ "Did you test different biological conditions?": {
3
+ "yes": {
4
+ "Did you image several positions (tiles) per biological condition?": {
5
+ "yes": {
6
+ "Did you record a time-lapse?": {
7
+ "yes": "Set N wells for the N biological conditions. Set M positions per well. Put the respective time-lapse stack in each generated position folder.",
8
+ "no": "Set N wells for the N biological conditions. Set a single position per well. Assemble all the tiles into a single stack per condition."
9
+ }
10
+ },
11
+ "no": {
12
+ "Did you record a time-lapse?": {
13
+ "yes": "Set N wells for the N biological conditions. Set a single position per well. Put the respective time-lapse stack in each generated position folder.",
14
+ "no": "There is not enough data to create a valid stack. Consider merging different experiments to generate several positions."
15
+ }
16
+ }
17
+ }
18
+ },
19
+ "no": {
20
+ "Did you image several positions (tiles)?": {
21
+ "yes": {
22
+ "Did you record a time-lapse?": {
23
+ "yes": "Set a single well. Set N positions. Put the respective time-lapse stack in each generated position folder.",
24
+ "no": "Set a single well. Set a single position. Assemble all the tiles into a single stack and put it in this position folder."
25
+ }
26
+ },
27
+ "no": {
28
+ "Did you record a time-lapse?": {
29
+ "yes": "Set a single well and a single position. Put the time-lapse stack in the generated position folder.",
30
+ "no": "There is not enough data to create a valid stack. Consider merging different experiments to generate several positions."
31
+ }
32
+ }
33
+ }
34
+ }
35
+ }
36
+ }
@@ -0,0 +1,11 @@
1
+ {
2
+ "Are morphological, tonal or textural features critical to follow the cells of interest?": {
3
+ "yes": {
4
+ "Are the values of these features constant or slowly changing?": {
5
+ "yes": "You may pass features to bTrack. Both motion and feature information will be combined to perform tracking.",
6
+ "no": "Abrupt transitions in the features may perturb bTrack's performance and truncate trajectories prematurely. Avoid passing features. "
7
+ }
8
+ },
9
+ "no": "No need to pass features. The tracking will rely exclusively on the cell positions. Skip this step."
10
+ }
11
+ }
@@ -0,0 +1,16 @@
1
+ {
2
+ "Do you want to relate target and effector populations?": {
3
+ "yes": {
4
+ "Do you have the complete cell shape for both populations of interest?": {
5
+ "yes": "You may compute a mask-contact neighborhood. Set the reference and neighbor populations. Use the tolerance parameter to be more or less sensitive about the reference-cell / neighbor-cell contact.",
6
+ "no": "You may use an isotropic distance threshold. Set the reference and neighbor populations. Use a radius r > (R_ref + 1/2 * R_neigh) with R_ref the average reference cell radius and R_neigh the average neighbor cell radius."
7
+ }
8
+ },
9
+ "no": {
10
+ "Do you have the complete cell shape for the population of interest?": {
11
+ "yes": "You may compute a mask-contact neighborhood. Set the same reference and neighbor population. Use the tolerance parameter to be more or less sensitive about the cell-cell contact.",
12
+ "no": "You may use an isotropic distance threshold. Set the same reference and neighbor population. Use a radius r > 1.5*R with R the average cell radius."
13
+ }
14
+ }
15
+ }
16
+ }
@@ -0,0 +1,16 @@
1
+ {
2
+ "Cells are either brighter or darker than background": {
3
+ "yes": {
4
+ "Background is heterogeneous": {
5
+ "yes": "prefilter" ,
6
+ "no" : "threshold directly"
7
+ }
8
+ },
9
+ "no": {
10
+ "Background is perfectly homogeneous (same value everywhere on the image)": {
11
+ "yes": "subtract the background value (subtract_filter) and take the absolute value (abs_filter) for all the pixels of the image. You may add a slight Gaussian blur after these operations",
12
+ "no": "use a Gaussian blur and apply a standard-deviation filter (or variance filter)"
13
+ }
14
+ }
15
+ }
16
+ }
@@ -0,0 +1,51 @@
1
+ {
2
+ "Is the background spatially invariant (within a well)?": {
3
+ "yes": {
4
+ "Do your stacks represent timeseries?": {
5
+ "yes": {
6
+ "Does your background add to the signal (typical of fluorescence)?": {
7
+ "yes": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have timeseries. Estimate the frame range over which you have the best estimate of the background (typically before cell arrival, when applicable). Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one frame to the next or one position to the next, activate the optimization option. Since your background is additive, you may subtract it from the images. ",
8
+ "no": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have timeseries. Estimate the frame range over which you have the best estimate of the background (typically before cell arrival, when applicable). Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one frame to the next or one position to the next, activate the optimization option. Since your background is not additive, you may divide it from the images to express the intensities as relative to the background."
9
+ }
10
+ },
11
+ "no": {
12
+ "Does your background add to the signal (typical of fluorescence)?": {
13
+ "yes": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have tiles. Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one position to the next, activate the optimization option. Since your background is additive, you may subtract it from the images. ",
14
+ "no": "You may perform a model-free background correction. Set the channel(s) of interest and specify that you have tiles. Carefully tune the threshold to exclude all non-background objects. If the background intensity values fluctuate slightly from one position to the next, activate the optimization option. Since your background is not additive, you may divide it from the images to express the intensities as relative to the background."
15
+ }
16
+ }
17
+ }
18
+ },
19
+ "no": {
20
+ "Is the background brighter at the center than at the edges?": {
21
+ "yes": {
22
+ "Is the background correction critical to segment the cells? It is often the case when you use a traditional segmentation pipeline.": {
23
+ "yes": {
24
+ "Does your background add to the signal (typical of fluorescence)?": {
25
+ "yes": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a paraboloid model and subtract.",
26
+ "no": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a paraboloid model and divide to express the intensities as relative to the background."
27
+ }
28
+ },
29
+ "no": "You can skip the image preprocessing step to save a considerable amount of storage. You will have the option to perform this preprocessing on-the-fly in the measurement module, which may be preferable. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
30
+ }
31
+ },
32
+ "no": {
33
+ "Is the background a constant value?": {
34
+ "yes": {
35
+ "Is the background correction critical to segment the cells? It is often the case when you use a traditional segmentation pipeline.": {
36
+ "yes": {
37
+ "Does your background add to the signal (typical of fluorescence)?": {
38
+ "yes": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a plane model and subtract.",
39
+ "no": "You may perform a model-based (fit) correction. Set the channel(s) of interest and carefully tune the threshold to exclude all non-background objects. Choose a plane model and divide to express the intensities as relative to the background."
40
+ }
41
+ },
42
+ "no": "You can skip the image preprocessing step to save a considerable amount of storage. You will have the option to perform this preprocessing on-the-fly in the measurement module, which may be preferable. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
43
+ }
44
+ },
45
+ "no": "For a complex background pattern it is preferable to not perform a correction at this stage. We recommend exploring the local cell correction available in the measurement module. See <a href='https://celldetective.readthedocs.io/en/latest/measure.html#background-correction'>the documentation</a>."
46
+ }
47
+ }
48
+ }
49
+ }
50
+ }
51
+ }
@@ -0,0 +1,16 @@
1
+ {
2
+ "Do you want to exploit the instantaneous classification to interpret cell tracks as a whole?": {
3
+ "yes": {
4
+ "Do the cells exhibit irreversible transitions between the negative and positive state?": {
5
+ "yes": "Tick the 'irreversible event' option. This will generate a class of either 0 (an observed transition), 1 (no observed transition) or 2 (a left-censored transition) for your cells. In case of doubt, transitioning cells (class 0) will be assigned to class 2. Lower the R² threshold if too many transitions are misidentified.",
6
+ "no": {
7
+ "Do the cells have a unique state from the beginning to the end of the movie?": {
8
+ "yes": "Tick the 'unique state' option. This will generate a class of either 1 (always negative) or 2 (always positive) for your cells.",
9
+ "no": "No model of propagation fit for your data is currently available in celldetective. Do not hesitate to formulate a <a href='https://github.com/remyeltorro/celldetective/issues/new?assignees=&labels=&projects=&template=%E2%AD%90-feature-request.md&title=%5BFEATURE%5D'>request</a>."
10
+ }
11
+ }
12
+ }
13
+ },
14
+ "no": "Then you do not need to tick the 'Time correlated' option. Each cell at each time point is classified independently."
15
+ }
16
+ }