cellfinder 1.4.1__py3-none-any.whl → 1.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. cellfinder/cli_migration_warning.py +3 -1
  2. cellfinder/core/classify/classify.py +51 -5
  3. cellfinder/core/classify/tools.py +13 -3
  4. cellfinder/core/detect/detect.py +94 -59
  5. cellfinder/core/detect/filters/plane/plane_filter.py +107 -10
  6. cellfinder/core/detect/filters/setup_filters.py +51 -12
  7. cellfinder/core/detect/filters/volume/ball_filter.py +5 -5
  8. cellfinder/core/detect/filters/volume/structure_detection.py +5 -0
  9. cellfinder/core/detect/filters/volume/structure_splitting.py +3 -2
  10. cellfinder/core/detect/filters/volume/volume_filter.py +1 -1
  11. cellfinder/core/download/download.py +2 -1
  12. cellfinder/core/main.py +162 -30
  13. cellfinder/core/tools/threading.py +4 -3
  14. cellfinder/core/tools/tools.py +1 -1
  15. cellfinder/core/train/{train_yml.py → train_yaml.py} +6 -15
  16. cellfinder/napari/curation.py +72 -21
  17. cellfinder/napari/detect/detect.py +87 -28
  18. cellfinder/napari/detect/detect_containers.py +41 -9
  19. cellfinder/napari/detect/thread_worker.py +26 -16
  20. cellfinder/napari/input_container.py +14 -4
  21. cellfinder/napari/train/train.py +5 -9
  22. cellfinder/napari/train/train_containers.py +2 -4
  23. cellfinder/napari/utils.py +6 -1
  24. {cellfinder-1.4.1.dist-info → cellfinder-1.9.0.dist-info}/METADATA +16 -12
  25. {cellfinder-1.4.1.dist-info → cellfinder-1.9.0.dist-info}/RECORD +29 -29
  26. {cellfinder-1.4.1.dist-info → cellfinder-1.9.0.dist-info}/WHEEL +1 -1
  27. {cellfinder-1.4.1.dist-info → cellfinder-1.9.0.dist-info}/entry_points.txt +1 -1
  28. {cellfinder-1.4.1.dist-info → cellfinder-1.9.0.dist-info/licenses}/LICENSE +0 -0
  29. {cellfinder-1.4.1.dist-info → cellfinder-1.9.0.dist-info}/top_level.txt +0 -0
@@ -78,11 +78,11 @@ class BallFilter:
78
78
  ----------
79
79
  plane_height, plane_width : int
80
80
  Height/width of the planes.
81
- ball_xy_size : int
82
- Diameter of the spherical kernel in the x/y dimensions.
83
- ball_z_size : int
84
- Diameter of the spherical kernel in the z dimension.
85
- Equal to the number of planes stacked to filter
81
+ ball_xy_size : float
82
+ Diameter of the spherical kernel (in microns) in the x/y dimensions.
83
+ ball_z_size : float
84
+ Diameter of the spherical kernel in the z dimension in microns.
85
+ Determines the number of planes stacked to filter
86
86
  the central plane of the stack.
87
87
  overlap_fraction : float
88
88
  The fraction of pixels within the spherical kernel that
@@ -222,6 +222,11 @@ class CellDetector:
222
222
  neighbour_ids[2] = previous_plane[y, x]
223
223
 
224
224
  if is_new_structure(neighbour_ids):
225
+ if self.next_structure_id > self.soma_centre_value:
226
+ raise ValueError(
227
+ "label overflow: number of connected "
228
+ "components exceeds label capacity"
229
+ )
225
230
  neighbour_ids[0] = self.next_structure_id
226
231
  self.next_structure_id += 1
227
232
  struct_id = self.add(x, y, self.z, neighbour_ids)
@@ -1,3 +1,4 @@
1
+ from copy import copy
1
2
  from typing import List, Tuple, Type
2
3
 
3
4
  import numpy as np
@@ -65,8 +66,7 @@ def coords_to_volume(
65
66
  relative_zs = np.array((zs - z_min + ball_radius), dtype=np.int64)
66
67
 
67
68
  # set each point as the center with a value of threshold
68
- for rel_x, rel_y, rel_z in zip(relative_xs, relative_ys, relative_zs):
69
- volume[rel_x, rel_y, rel_z] = threshold_value
69
+ volume[relative_xs, relative_ys, relative_zs] = threshold_value
70
70
 
71
71
  volume = volume.swapaxes(0, 2)
72
72
  return torch.from_numpy(volume)
@@ -224,6 +224,7 @@ def split_cells(
224
224
  where M is the number of individual cells and each centre is
225
225
  represented by its x, y, and z coordinates.
226
226
  """
227
+ settings = copy(settings)
227
228
  # these points are in x, y, z order columnwise, in absolute pixels
228
229
  orig_centre = get_structure_centre(cell_points)
229
230
 
@@ -140,7 +140,7 @@ class VolumeFilter:
140
140
  tensor = torch.empty(
141
141
  (batch_size, *self.settings.plane_shape),
142
142
  dtype=torch_dtype,
143
- pin_memory=not cpu,
143
+ pin_memory=not cpu and self.settings.pin_memory,
144
144
  device="cpu",
145
145
  )
146
146
 
@@ -6,6 +6,7 @@ import pooch
6
6
  from brainglobe_utils.general.config import get_config_obj
7
7
 
8
8
  from cellfinder import DEFAULT_CELLFINDER_DIRECTORY
9
+ from cellfinder.core import logger
9
10
  from cellfinder.core.tools.source_files import (
10
11
  default_configuration_path,
11
12
  user_specific_configuration_path,
@@ -74,7 +75,7 @@ def amend_user_configuration(new_model_path=None) -> None:
74
75
  new_model_path : Path, optional
75
76
  The path to the new model configuration.
76
77
  """
77
- print("(Over-)writing custom user configuration")
78
+ logger.info("(Over-)writing custom user configuration")
78
79
 
79
80
  original_config = default_configuration_path()
80
81
  new_config = user_specific_configuration_path()
cellfinder/core/main.py CHANGED
@@ -1,34 +1,33 @@
1
1
  import os
2
2
  from typing import Callable, List, Optional, Tuple
3
3
 
4
- import numpy as np
5
4
  from brainglobe_utils.cells.cells import Cell
6
5
 
7
- from cellfinder.core import logger
6
+ from cellfinder.core import logger, types
8
7
  from cellfinder.core.download.download import model_type
9
- from cellfinder.core.train.train_yml import depth_type
8
+ from cellfinder.core.train.train_yaml import depth_type
10
9
 
11
10
 
12
11
  def main(
13
- signal_array: np.ndarray,
14
- background_array: np.ndarray,
15
- voxel_sizes: Tuple[int, int, int],
12
+ signal_array: types.array,
13
+ background_array: types.array,
14
+ voxel_sizes: Tuple[float, float, float],
16
15
  start_plane: int = 0,
17
16
  end_plane: int = -1,
18
17
  trained_model: Optional[os.PathLike] = None,
19
18
  model_weights: Optional[os.PathLike] = None,
20
19
  model: model_type = "resnet50_tv",
21
- batch_size: int = 64,
20
+ classification_batch_size: int = 64,
22
21
  n_free_cpus: int = 2,
23
- network_voxel_sizes: Tuple[int, int, int] = (5, 1, 1),
24
- soma_diameter: int = 16,
25
- ball_xy_size: int = 6,
26
- ball_z_size: int = 15,
22
+ network_voxel_sizes: Tuple[float, float, float] = (5, 1, 1),
23
+ soma_diameter: float = 16,
24
+ ball_xy_size: float = 6,
25
+ ball_z_size: float = 15,
27
26
  ball_overlap_fraction: float = 0.6,
28
27
  log_sigma_size: float = 0.2,
29
28
  n_sds_above_mean_thresh: float = 10,
30
29
  soma_spread_factor: float = 1.4,
31
- max_cluster_size: int = 100000,
30
+ max_cluster_size: float = 100000,
32
31
  cube_width: int = 50,
33
32
  cube_height: int = 50,
34
33
  cube_depth: int = 20,
@@ -36,8 +35,15 @@ def main(
36
35
  skip_detection: bool = False,
37
36
  skip_classification: bool = False,
38
37
  detected_cells: List[Cell] = None,
39
- classification_batch_size: Optional[int] = None,
40
- classification_torch_device: str = "cpu",
38
+ detection_batch_size: Optional[int] = None,
39
+ torch_device: Optional[str] = None,
40
+ pin_memory: bool = False,
41
+ split_ball_xy_size: float = 6,
42
+ split_ball_z_size: float = 15,
43
+ split_ball_overlap_fraction: float = 0.8,
44
+ n_splitting_iter: int = 10,
45
+ n_sds_above_mean_tiled_thresh: float = 10,
46
+ tiled_thresh_tile_size: float | None = None,
41
47
  *,
42
48
  detect_callback: Optional[Callable[[int], None]] = None,
43
49
  classify_callback: Optional[Callable[[int], None]] = None,
@@ -46,6 +52,125 @@ def main(
46
52
  """
47
53
  Parameters
48
54
  ----------
55
+ signal_array : numpy.ndarray or dask array
56
+ 3D array representing the signal data in z, y, x order.
57
+ background_array : numpy.ndarray or dask array
58
+ 3D array representing the signal data in z, y, x order.
59
+ voxel_sizes : 3-tuple of floats
60
+ Size of your voxels in the z, y, and x dimensions (microns).
61
+ start_plane : int
62
+ First plane index to process (inclusive, to process a subset of the
63
+ data).
64
+ end_plane : int
65
+ Last plane index to process (exclusive, to process a subset of the
66
+ data).
67
+ trained_model : Optional[Path]
68
+ Trained model file path (home directory (default) -> pretrained
69
+ weights).
70
+ model_weights : Optional[Path]
71
+ Model weights path (home directory (default) -> pretrained
72
+ weights).
73
+ model: str
74
+ Type of model to use. Defaults to `"resnet50_tv"`.
75
+ classification_batch_size : int
76
+ How many potential cells to classify at one time. The GPU/CPU
77
+ memory must be able to contain at once this many data cubes for
78
+ the models. For performance-critical applications, tune to maximize
79
+ memory usage without running out. Check your GPU/CPU memory to verify
80
+ it's not full.
81
+ n_free_cpus : int
82
+ How many CPU cores to leave free.
83
+ network_voxel_sizes : 3-tuple of floats
84
+ Size of the pre-trained network's voxels (microns) in the z, y, and x
85
+ dimensions.
86
+ soma_diameter : float
87
+ The expected in-plane (xy) soma diameter (microns).
88
+ ball_xy_size : float
89
+ 3d filter's in-plane (xy) filter ball size (microns).
90
+ ball_z_size : float
91
+ 3d filter's axial (z) filter ball size (microns).
92
+ ball_overlap_fraction : float
93
+ 3d filter's fraction of the ball filter needed to be filled by
94
+ foreground voxels, centered on a voxel, to retain the voxel.
95
+ log_sigma_size : float
96
+ Gaussian filter width (as a fraction of soma diameter) used during
97
+ 2d in-plane Laplacian of Gaussian filtering.
98
+ n_sds_above_mean_thresh : float
99
+ Per-plane intensity threshold (the number of standard deviations
100
+ above the mean) of the filtered 2d planes used to mark pixels as
101
+ foreground or background.
102
+ soma_spread_factor : float
103
+ Cell spread factor for determining the largest cell volume before
104
+ splitting up cell clusters. Structures with spherical volume of
105
+ diameter `soma_spread_factor * soma_diameter` or less will not be
106
+ split.
107
+ max_cluster_size : float
108
+ Largest detected cell cluster (in cubic um) where splitting
109
+ should be attempted. Clusters above this size will be labeled
110
+ as artifacts.
111
+ cube_width: int
112
+ The width of the data cube centered on the cell used for
113
+ classification. Defaults to `50`.
114
+ cube_height: int
115
+ The height of the data cube centered on the cell used for
116
+ classification. Defaults to `50`.
117
+ cube_depth: int
118
+ The depth of the data cube centered on the cell used for
119
+ classification. Defaults to `20`.
120
+ network_depth: str
121
+ The network depth to use during classification. Defaults to `"50"`.
122
+ skip_detection : bool
123
+ If selected, the detection step is skipped and instead we get the
124
+ detected cells from the cell layer below (from a previous
125
+ detection run or import).
126
+ skip_classification : bool
127
+ If selected, the classification step is skipped and all cells from
128
+ the detection stage are added.
129
+ detected_cells: Optional list of Cell objects.
130
+ If specified, the cells to use during classification.
131
+ detection_batch_size: int
132
+ The number of planes of the original data volume to process at
133
+ once. The GPU/CPU memory must be able to contain this many planes
134
+ for all the filters. For performance-critical applications, tune
135
+ to maximize memory usage without running out. Check your GPU/CPU
136
+ memory to verify it's not full.
137
+ torch_device : str, optional
138
+ The device on which to run the computation. If not specified (None),
139
+ "cuda" will be used if a GPU is available, otherwise "cpu".
140
+ You can also manually specify "cuda" or "cpu".
141
+ pin_memory: bool
142
+ Pins data to be sent to the GPU to the CPU memory. This allows faster
143
+ GPU data speeds, but can only be used if the data used by the GPU can
144
+ stay in the CPU RAM while the GPU uses it. I.e. there's enough RAM.
145
+ Otherwise, if there's a risk of the RAM being paged, it shouldn't be
146
+ used. Defaults to False.
147
+ split_ball_xy_size: float
148
+ Similar to `ball_xy_size`, except the value to use for the 3d
149
+ filter during cluster splitting.
150
+ split_ball_z_size: float
151
+ Similar to `ball_z_size`, except the value to use for the 3d filter
152
+ during cluster splitting.
153
+ split_ball_overlap_fraction: float
154
+ Similar to `ball_overlap_fraction`, except the value to use for the
155
+ 3d filter during cluster splitting.
156
+ n_splitting_iter: int
157
+ The number of iterations to run the 3d filtering on a cluster. Each
158
+ iteration reduces the cluster size by the voxels not retained in
159
+ the previous iteration.
160
+ n_sds_above_mean_tiled_thresh : float
161
+ Per-plane, per-tile intensity threshold (the number of standard
162
+ deviations above the mean) for the filtered 2d planes used to mark
163
+ pixels as foreground or background. When used, (tile size is not zero)
164
+ a pixel is marked as foreground if its intensity is above both the
165
+ per-plane and per-tile threshold. I.e. it's above the set number of
166
+ standard deviations of the per-plane average and of the per-plane
167
+ per-tile average for the tile that contains it.
168
+ tiled_thresh_tile_size : float
169
+ The tile size used to tile the x, y plane to calculate the local
170
+ average intensity for the tiled threshold. The value is multiplied
171
+ by soma diameter (i.e. 1 means one soma diameter). If zero or None, the
172
+ tiled threshold is disabled and only the per-plane threshold is used.
173
+ Tiling is done with 50% overlap when striding.
49
174
  detect_callback : Callable[int], optional
50
175
  Called every time a plane has finished being processed during the
51
176
  detection stage. Called with the plane number that has finished.
@@ -63,22 +188,29 @@ def main(
63
188
  logger.info("Detecting cell candidates")
64
189
 
65
190
  points = detect.main(
66
- signal_array,
67
- start_plane,
68
- end_plane,
69
- voxel_sizes,
70
- soma_diameter,
71
- max_cluster_size,
72
- ball_xy_size,
73
- ball_z_size,
74
- ball_overlap_fraction,
75
- soma_spread_factor,
76
- n_free_cpus,
77
- log_sigma_size,
78
- n_sds_above_mean_thresh,
79
- batch_size=classification_batch_size,
80
- torch_device=classification_torch_device,
191
+ signal_array=signal_array,
192
+ start_plane=start_plane,
193
+ end_plane=end_plane,
194
+ voxel_sizes=voxel_sizes,
195
+ soma_diameter=soma_diameter,
196
+ max_cluster_size=max_cluster_size,
197
+ ball_xy_size=ball_xy_size,
198
+ ball_z_size=ball_z_size,
199
+ ball_overlap_fraction=ball_overlap_fraction,
200
+ soma_spread_factor=soma_spread_factor,
201
+ n_free_cpus=n_free_cpus,
202
+ log_sigma_size=log_sigma_size,
203
+ n_sds_above_mean_thresh=n_sds_above_mean_thresh,
204
+ n_sds_above_mean_tiled_thresh=n_sds_above_mean_tiled_thresh,
205
+ tiled_thresh_tile_size=tiled_thresh_tile_size,
206
+ batch_size=detection_batch_size,
207
+ torch_device=torch_device,
208
+ pin_memory=pin_memory,
81
209
  callback=detect_callback,
210
+ split_ball_z_size=split_ball_z_size,
211
+ split_ball_xy_size=split_ball_xy_size,
212
+ split_ball_overlap_fraction=split_ball_overlap_fraction,
213
+ n_splitting_iter=n_splitting_iter,
82
214
  )
83
215
 
84
216
  if detect_finished_callback is not None:
@@ -101,7 +233,7 @@ def main(
101
233
  n_free_cpus,
102
234
  voxel_sizes,
103
235
  network_voxel_sizes,
104
- batch_size,
236
+ classification_batch_size,
105
237
  cube_height,
106
238
  cube_width,
107
239
  cube_depth,
@@ -15,6 +15,7 @@ Typical example::
15
15
 
16
16
  from cellfinder.core.tools.threading import ThreadWithException, \\
17
17
  EOFSignal, ProcessWithException
18
+ from cellfinder.core import logger
18
19
  import torch
19
20
 
20
21
 
@@ -63,7 +64,7 @@ Typical example::
63
64
  # thread exited for whatever reason (not exception)
64
65
  break
65
66
 
66
- print(f"Thread processed tensor {i}")
67
+ logger.debug(f"Thread processed tensor {i}")
67
68
  finally:
68
69
  # whatever happens, make sure thread is told to finish so it
69
70
  # doesn't get stuck
@@ -248,8 +249,8 @@ class ExceptionWithQueueMixIn:
248
249
  ... # do something with the msg
249
250
  ... pass
250
251
  ... except ExecutionFailure as e:
251
- ... print(f"got exception {type(e.__cause__)}")
252
- ... print(f"with message {e.__cause__.args[0]}")
252
+ ... logger.error(f"got exception {type(e.__cause__)}")
253
+ ... logger.error(f"with message {e.__cause__.args[0]}")
253
254
  """
254
255
  msg, value = self.from_thread_queue.get(block=True, timeout=timeout)
255
256
  if msg == "eof":
@@ -261,7 +261,7 @@ def random_bool(likelihood: Optional[float] = None) -> bool:
261
261
  if likelihood is None:
262
262
  return bool(getrandbits(1))
263
263
  else:
264
- if uniform(0, 1) > likelihood:
264
+ if uniform(0, 1) < likelihood:
265
265
  return True
266
266
  else:
267
267
  return False
@@ -3,9 +3,6 @@ main
3
3
  ===============
4
4
 
5
5
  Trains a network based on a yaml file specifying cubes of cells/non cells.
6
-
7
- N.B imports are within functions to prevent tensorflow being imported before
8
- it's warnings are silenced
9
6
  """
10
7
 
11
8
  import os
@@ -29,12 +26,16 @@ from brainglobe_utils.general.system import (
29
26
  from brainglobe_utils.IO.cells import find_relevant_tiffs
30
27
  from brainglobe_utils.IO.yaml import read_yaml_section
31
28
  from fancylog import fancylog
29
+ from keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard
32
30
  from sklearn.model_selection import train_test_split
33
31
 
34
- import cellfinder.core as program_for_log
32
+ import cellfinder.core as package_for_log
35
33
  from cellfinder.core import logger
34
+ from cellfinder.core.classify.cube_generator import CubeGeneratorFromDisk
36
35
  from cellfinder.core.classify.resnet import layer_type
36
+ from cellfinder.core.classify.tools import get_model, make_lists
37
37
  from cellfinder.core.download.download import DEFAULT_DOWNLOAD_DIRECTORY
38
+ from cellfinder.core.tools.prep import prep_model_weights
38
39
 
39
40
  depth_type = Literal["18", "34", "50", "101", "152"]
40
41
 
@@ -268,7 +269,7 @@ def cli():
268
269
 
269
270
  fancylog.start_logging(
270
271
  args.output_dir,
271
- program_for_log,
272
+ package=package_for_log,
272
273
  variables=[args],
273
274
  log_header="CELLFINDER TRAINING LOG",
274
275
  )
@@ -316,16 +317,6 @@ def run(
316
317
  save_progress=False,
317
318
  epochs=100,
318
319
  ):
319
- from keras.callbacks import (
320
- CSVLogger,
321
- ModelCheckpoint,
322
- TensorBoard,
323
- )
324
-
325
- from cellfinder.core.classify.cube_generator import CubeGeneratorFromDisk
326
- from cellfinder.core.classify.tools import get_model, make_lists
327
- from cellfinder.core.tools.prep import prep_model_weights
328
-
329
320
  start_time = datetime.now()
330
321
 
331
322
  ensure_directory_exists(output_dir)
@@ -8,11 +8,11 @@ from brainglobe_napari_io.cellfinder.utils import convert_layer_to_cells
8
8
  from brainglobe_utils.cells.cells import Cell
9
9
  from brainglobe_utils.general.system import delete_directory_contents
10
10
  from brainglobe_utils.IO.yaml import save_yaml
11
- from brainglobe_utils.qtpy.dialog import display_warning
12
- from brainglobe_utils.qtpy.interaction import add_button, add_combobox
13
11
  from magicgui.widgets import ProgressBar
14
12
  from napari.qt.threading import thread_worker
15
13
  from napari.utils.notifications import show_info
14
+ from qt_niu.dialog import display_info, display_warning
15
+ from qt_niu.interaction import add_button, add_combobox
16
16
  from qtpy import QtCore
17
17
  from qtpy.QtWidgets import (
18
18
  QComboBox,
@@ -95,6 +95,22 @@ class CurationWidget(QWidget):
95
95
  self.training_data_non_cell_choice, self.point_layer_names
96
96
  )
97
97
 
98
+ @self.viewer.layers.events.removed.connect
99
+ def _remove_selection_layers(event: QtCore.QEvent):
100
+ """
101
+ Set internal background, signal, training data cell,
102
+ and training data non-cell layers to None when they
103
+ are removed from the napari viewer GUI.
104
+ """
105
+ if event.value == self.signal_layer:
106
+ self.signal_layer = None
107
+ if event.value == self.background_layer:
108
+ self.background_layer = None
109
+ if event.value == self.training_data_cell_layer:
110
+ self.training_data_cell_layer = None
111
+ if event.value == self.training_data_non_cell_layer:
112
+ self.training_data_non_cell_layer = None
113
+
98
114
  @staticmethod
99
115
  def _update_combobox_options(combobox: QComboBox, options_list: List[str]):
100
116
  original_text = combobox.currentText()
@@ -212,8 +228,8 @@ class CurationWidget(QWidget):
212
228
  self.layout.addWidget(self.load_data_panel, row, column, 1, 1)
213
229
 
214
230
  def setup_keybindings(self):
215
- self.viewer.bind_key("c", self.mark_as_cell)
216
- self.viewer.bind_key("x", self.mark_as_non_cell)
231
+ self.viewer.bind_key("c", self.mark_as_cell, overwrite=True)
232
+ self.viewer.bind_key("x", self.mark_as_non_cell, overwrite=True)
217
233
 
218
234
  def set_signal_image(self):
219
235
  """
@@ -406,7 +422,7 @@ class CurationWidget(QWidget):
406
422
  self.update_status_label("Ready")
407
423
 
408
424
  def __prep_directories_for_save(self):
409
- self.yaml_filename = self.output_directory / "training.yml"
425
+ self.yaml_filename = self.output_directory / "training.yaml"
410
426
  self.cell_cube_dir = self.output_directory / "cells"
411
427
  self.no_cell_cube_dir = self.output_directory / "non_cells"
412
428
 
@@ -479,26 +495,61 @@ class CurationWidget(QWidget):
479
495
  return False
480
496
 
481
497
  def check_training_data_exists(self) -> bool:
482
- if not (
483
- self.training_data_cell_layer or self.training_data_non_cell_layer
484
- ):
485
- show_info(
486
- "No training data layers have been added. "
487
- "Please add a layer and annotate some points.",
498
+ """
499
+ Checks that
500
+ - both training data layers exists
501
+ - at least one of them is not empty.
502
+
503
+ Will display a popup dialog and return False if these conditions
504
+ are not both fulfilled.
505
+
506
+ Will show a notification if only one layer is non-empty, but this
507
+ is considered valid.
508
+
509
+ Returns
510
+ -------
511
+ bool
512
+ True if both training layers exists and at least one
513
+ of them contains some data. False otherwise.
514
+ """
515
+ both_training_layers_exist = (
516
+ self.training_data_cell_layer and self.training_data_non_cell_layer
517
+ )
518
+
519
+ if not both_training_layers_exist:
520
+ display_info(
521
+ self,
522
+ "No training data layers have been added.",
523
+ "Please add layers for both cells and non-cells,"
524
+ "and annotate some points.",
488
525
  )
489
526
  return False
490
- else:
491
- if (
492
- len(self.training_data_cell_layer.data) > 0
493
- or len(self.training_data_non_cell_layer.data) > 0
494
- ):
495
- return True
496
- else:
527
+
528
+ at_least_one_training_layer_contains_data = (
529
+ len(self.training_data_cell_layer.data) > 0
530
+ or len(self.training_data_non_cell_layer.data) > 0
531
+ )
532
+
533
+ both_training_layers_contain_data = (
534
+ len(self.training_data_cell_layer.data) > 0
535
+ and len(self.training_data_non_cell_layer.data) > 0
536
+ )
537
+
538
+ if at_least_one_training_layer_contains_data:
539
+ if not both_training_layers_contain_data:
497
540
  show_info(
498
- "No training data points have been added. "
499
- "Please annotate some points.",
541
+ "One of the training layers is empty. This is OK, but"
542
+ "For optimal (re-)training ensure you have roughly equal "
543
+ "number of points in each of your training points layers."
500
544
  )
501
- return False
545
+ return True
546
+ else:
547
+ display_info(
548
+ self,
549
+ "No training data points have been added.",
550
+ "Please annotate points in the training data layers.",
551
+ )
552
+ return False
502
553
 
503
554
  def get_output_directory(self):
504
555
  """