cellfinder 1.7.0__tar.gz → 1.8.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cellfinder might be problematic. Click here for more details.

Files changed (73) hide show
  1. {cellfinder-1.7.0 → cellfinder-1.8.0}/PKG-INFO +2 -2
  2. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/classify/classify.py +48 -2
  3. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/detect.py +60 -51
  4. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/plane/plane_filter.py +1 -1
  5. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/setup_filters.py +31 -12
  6. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/volume/ball_filter.py +5 -5
  7. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/volume/structure_splitting.py +2 -0
  8. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/volume/volume_filter.py +1 -1
  9. cellfinder-1.8.0/cellfinder/core/main.py +229 -0
  10. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/curation.py +2 -2
  11. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/detect/detect.py +58 -26
  12. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/detect/detect_containers.py +19 -6
  13. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder.egg-info/PKG-INFO +2 -2
  14. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder.egg-info/requires.txt +1 -1
  15. {cellfinder-1.7.0 → cellfinder-1.8.0}/pyproject.toml +1 -1
  16. cellfinder-1.7.0/cellfinder/core/main.py +0 -115
  17. {cellfinder-1.7.0 → cellfinder-1.8.0}/.github/workflows/test_and_deploy.yml +0 -0
  18. {cellfinder-1.7.0 → cellfinder-1.8.0}/.github/workflows/test_include_guard.yaml +0 -0
  19. {cellfinder-1.7.0 → cellfinder-1.8.0}/.gitignore +0 -0
  20. {cellfinder-1.7.0 → cellfinder-1.8.0}/.napari/config.yml +0 -0
  21. {cellfinder-1.7.0 → cellfinder-1.8.0}/CITATION.cff +0 -0
  22. {cellfinder-1.7.0 → cellfinder-1.8.0}/LICENSE +0 -0
  23. {cellfinder-1.7.0 → cellfinder-1.8.0}/MANIFEST.in +0 -0
  24. {cellfinder-1.7.0 → cellfinder-1.8.0}/README.md +0 -0
  25. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/__init__.py +0 -0
  26. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/cli_migration_warning.py +0 -0
  27. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/__init__.py +0 -0
  28. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/classify/__init__.py +0 -0
  29. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/classify/augment.py +0 -0
  30. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/classify/cube_generator.py +0 -0
  31. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/classify/resnet.py +0 -0
  32. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/classify/tools.py +0 -0
  33. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/config/__init__.py +0 -0
  34. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/config/cellfinder.conf +0 -0
  35. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/__init__.py +0 -0
  36. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/__init__.py +0 -0
  37. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/plane/__init__.py +0 -0
  38. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/plane/classical_filter.py +0 -0
  39. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/plane/tile_walker.py +0 -0
  40. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/volume/__init__.py +0 -0
  41. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/detect/filters/volume/structure_detection.py +0 -0
  42. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/download/__init__.py +0 -0
  43. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/download/cli.py +0 -0
  44. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/download/download.py +0 -0
  45. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/IO.py +0 -0
  46. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/__init__.py +0 -0
  47. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/array_operations.py +0 -0
  48. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/geometry.py +0 -0
  49. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/image_processing.py +0 -0
  50. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/prep.py +0 -0
  51. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/source_files.py +0 -0
  52. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/system.py +0 -0
  53. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/threading.py +0 -0
  54. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/tiff.py +0 -0
  55. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/tools/tools.py +0 -0
  56. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/train/__init__.py +0 -0
  57. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/train/train_yaml.py +0 -0
  58. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/core/types.py +0 -0
  59. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/__init__.py +0 -0
  60. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/detect/__init__.py +0 -0
  61. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/detect/thread_worker.py +0 -0
  62. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/input_container.py +0 -0
  63. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/napari.yaml +0 -0
  64. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/sample_data.py +0 -0
  65. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/train/__init__.py +0 -0
  66. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/train/train.py +0 -0
  67. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/train/train_containers.py +0 -0
  68. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder/napari/utils.py +0 -0
  69. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder.egg-info/SOURCES.txt +0 -0
  70. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder.egg-info/dependency_links.txt +0 -0
  71. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder.egg-info/entry_points.txt +0 -0
  72. {cellfinder-1.7.0 → cellfinder-1.8.0}/cellfinder.egg-info/top_level.txt +0 -0
  73. {cellfinder-1.7.0 → cellfinder-1.8.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cellfinder
3
- Version: 1.7.0
3
+ Version: 1.8.0
4
4
  Summary: Automated 3D cell detection in large microscopy images
5
5
  Author-email: "Adam Tyson, Christian Niedworok, Charly Rousseau" <code@adamltyson.com>
6
6
  License: BSD-3-Clause
@@ -53,7 +53,7 @@ Requires-Dist: brainglobe-napari-io; extra == "napari"
53
53
  Requires-Dist: magicgui; extra == "napari"
54
54
  Requires-Dist: napari-ndtiffs; extra == "napari"
55
55
  Requires-Dist: napari-plugin-engine>=0.1.4; extra == "napari"
56
- Requires-Dist: napari[pyqt5]; extra == "napari"
56
+ Requires-Dist: napari[pyqt5]>=0.6.1; extra == "napari"
57
57
  Requires-Dist: pooch>=1; extra == "napari"
58
58
  Requires-Dist: qtpy; extra == "napari"
59
59
  Dynamic: license-file
@@ -19,8 +19,8 @@ def main(
19
19
  signal_array: types.array,
20
20
  background_array: types.array,
21
21
  n_free_cpus: int,
22
- voxel_sizes: Tuple[int, int, int],
23
- network_voxel_sizes: Tuple[int, int, int],
22
+ voxel_sizes: Tuple[float, float, float],
23
+ network_voxel_sizes: Tuple[float, float, float],
24
24
  batch_size: int,
25
25
  cube_height: int,
26
26
  cube_width: int,
@@ -29,12 +29,58 @@ def main(
29
29
  model_weights: Optional[os.PathLike],
30
30
  network_depth: depth_type,
31
31
  max_workers: int = 3,
32
+ pin_memory: bool = False,
32
33
  *,
33
34
  callback: Optional[Callable[[int], None]] = None,
34
35
  ) -> List[Cell]:
35
36
  """
36
37
  Parameters
37
38
  ----------
39
+
40
+ points: List of Cell objects
41
+ The potential cells to classify.
42
+ signal_array : numpy.ndarray or dask array
43
+ 3D array representing the signal data in z, y, x order.
44
+ background_array : numpy.ndarray or dask array
45
+ 3D array representing the signal data in z, y, x order.
46
+ n_free_cpus : int
47
+ How many CPU cores to leave free.
48
+ voxel_sizes : 3-tuple of floats
49
+ Size of your voxels in the z, y, and x dimensions.
50
+ network_voxel_sizes : 3-tuple of floats
51
+ Size of the pre-trained network's voxels in the z, y, and x dimensions.
52
+ batch_size : int
53
+ How many potential cells to classify at one time. The GPU/CPU
54
+ memory must be able to contain at once this many data cubes for
55
+ the models. For performance-critical applications, tune to maximize
56
+ memory usage without running out. Check your GPU/CPU memory to verify
57
+ it's not full.
58
+ cube_height: int
59
+ The height of the data cube centered on the cell used for
60
+ classification. Defaults to `50`.
61
+ cube_width: int
62
+ The width of the data cube centered on the cell used for
63
+ classification. Defaults to `50`.
64
+ cube_depth: int
65
+ The depth of the data cube centered on the cell used for
66
+ classification. Defaults to `20`.
67
+ trained_model : Optional[Path]
68
+ Trained model file path (home directory (default) -> pretrained
69
+ weights).
70
+ model_weights : Optional[Path]
71
+ Model weights path (home directory (default) -> pretrained
72
+ weights).
73
+ network_depth: str
74
+ The network depth to use during classification. Defaults to `"50"`.
75
+ max_workers: int
76
+ The number of sub-processes to use for data loading / processing.
77
+ Defaults to 8.
78
+ pin_memory: bool
79
+ Pins data to be sent to the GPU to the CPU memory. This allows faster
80
+ GPU data speeds, but can only be used if the data used by the GPU can
81
+ stay in the CPU RAM while the GPU uses it. I.e. there's enough RAM.
82
+ Otherwise, if there's a risk of the RAM being paged, it shouldn't be
83
+ used. Defaults to False.
38
84
  callback : Callable[int], optional
39
85
  A callback function that is called during classification. Called with
40
86
  the batch number once that batch has been classified.
@@ -49,10 +49,11 @@ def main(
49
49
  plane_directory: Optional[str] = None,
50
50
  batch_size: Optional[int] = None,
51
51
  torch_device: Optional[str] = None,
52
- split_ball_xy_size: int = 3,
53
- split_ball_z_size: int = 3,
52
+ pin_memory: bool = False,
53
+ split_ball_xy_size: float = 6,
54
+ split_ball_z_size: float = 15,
54
55
  split_ball_overlap_fraction: float = 0.8,
55
- split_soma_diameter: int = 7,
56
+ n_splitting_iter: int = 10,
56
57
  *,
57
58
  callback: Optional[Callable[[int], None]] = None,
58
59
  ) -> List[Cell]:
@@ -61,69 +62,80 @@ def main(
61
62
 
62
63
  Parameters
63
64
  ----------
64
- signal_array : numpy.ndarray
65
- 3D array representing the signal data.
66
-
65
+ signal_array : numpy.ndarray or dask array
66
+ 3D array representing the signal data in z, y, x order.
67
67
  start_plane : int
68
- Index of the starting plane for detection.
69
-
68
+ First plane index to process (inclusive, to process a subset of the
69
+ data).
70
70
  end_plane : int
71
- Index of the ending plane for detection.
72
-
73
- voxel_sizes : Tuple[float, float, float]
74
- Tuple of voxel sizes in each dimension (z, y, x).
75
-
71
+ Last plane index to process (exclusive, to process a subset of the
72
+ data).
73
+ voxel_sizes : 3-tuple of floats
74
+ Size of your voxels in the z, y, and x dimensions (microns).
76
75
  soma_diameter : float
77
- Diameter of the soma in physical units.
78
-
76
+ The expected in-plane (xy) soma diameter (microns).
79
77
  max_cluster_size : float
80
- Maximum size of a cluster in physical units.
81
-
78
+ Largest detected cell cluster (in cubic um) where splitting
79
+ should be attempted. Clusters above this size will be labeled
80
+ as artifacts.
82
81
  ball_xy_size : float
83
- Size of the XY ball used for filtering in physical units.
84
-
82
+ 3d filter's in-plane (xy) filter ball size (microns).
85
83
  ball_z_size : float
86
- Size of the Z ball used for filtering in physical units.
87
-
84
+ 3d filter's axial (z) filter ball size (microns).
88
85
  ball_overlap_fraction : float
89
- Fraction of overlap allowed between balls.
90
-
86
+ 3d filter's fraction of the ball filter needed to be filled by
87
+ foreground voxels, centered on a voxel, to retain the voxel.
91
88
  soma_spread_factor : float
92
- Spread factor for soma size.
93
-
89
+ Cell spread factor for determining the largest cell volume before
90
+ splitting up cell clusters. Structures with spherical volume of
91
+ diameter `soma_spread_factor * soma_diameter` or less will not be
92
+ split.
94
93
  n_free_cpus : int
95
- Number of free CPU cores available for parallel processing.
96
-
94
+ How many CPU cores to leave free.
97
95
  log_sigma_size : float
98
- Size of the sigma for the log filter.
99
-
96
+ Gaussian filter width (as a fraction of soma diameter) used during
97
+ 2d in-plane Laplacian of Gaussian filtering.
100
98
  n_sds_above_mean_thresh : float
101
- Number of standard deviations above the mean threshold.
102
-
99
+ Intensity threshold (the number of standard deviations above
100
+ the mean) of the filtered 2d planes used to mark pixels as
101
+ foreground or background.
103
102
  outlier_keep : bool, optional
104
103
  Whether to keep outliers during detection. Defaults to False.
105
-
106
104
  artifact_keep : bool, optional
107
105
  Whether to keep artifacts during detection. Defaults to False.
108
-
109
106
  save_planes : bool, optional
110
107
  Whether to save the planes during detection. Defaults to False.
111
-
112
108
  plane_directory : str, optional
113
109
  Directory path to save the planes. Defaults to None.
114
-
115
- batch_size : int, optional
116
- The number of planes to process in each batch. Defaults to 1.
117
- For CPU, there's no benefit for a larger batch size. Only a memory
118
- usage increase. For CUDA, the larger the batch size the better the
119
- performance. Until it fills up the GPU memory - after which it
120
- becomes slower.
121
-
110
+ batch_size: int
111
+ The number of planes of the original data volume to process at
112
+ once. The GPU/CPU memory must be able to contain this many planes
113
+ for all the filters. For performance-critical applications, tune to
114
+ maximize memory usage without running out. Check your GPU/CPU memory
115
+ to verify it's not full.
122
116
  torch_device : str, optional
123
117
  The device on which to run the computation. If not specified (None),
124
118
  "cuda" will be used if a GPU is available, otherwise "cpu".
125
119
  You can also manually specify "cuda" or "cpu".
126
-
120
+ pin_memory: bool
121
+ Pins data to be sent to the GPU to the CPU memory. This allows faster
122
+ GPU data speeds, but can only be used if the data used by the GPU can
123
+ stay in the CPU RAM while the GPU uses it. I.e. there's enough RAM.
124
+ Otherwise, if there's a risk of the RAM being paged, it shouldn't be
125
+ used. Defaults to False.
126
+ split_ball_xy_size: float
127
+ Similar to `ball_xy_size`, except the value to use for the 3d
128
+ filter during cluster splitting.
129
+ split_ball_z_size: float
130
+ Similar to `ball_z_size`, except the value to use for the 3d filter
131
+ during cluster splitting.
132
+ split_ball_overlap_fraction: float
133
+ Similar to `ball_overlap_fraction`, except the value to use for the
134
+ 3d filter during cluster splitting.
135
+ n_splitting_iter: int
136
+ The number of iterations to run the 3d filtering on a cluster. Each
137
+ iteration reduces the cluster size by the voxels not retained in
138
+ the previous iteration.
127
139
  callback : Callable[int], optional
128
140
  A callback function that is called every time a plane has finished
129
141
  being processed. Called with the plane number that has finished.
@@ -131,7 +143,7 @@ def main(
131
143
  Returns
132
144
  -------
133
145
  List[Cell]
134
- List of detected cells.
146
+ List of detected cell candidates.
135
147
  """
136
148
  start_time = datetime.now()
137
149
  if torch_device is None:
@@ -187,19 +199,16 @@ def main(
187
199
  plane_directory=plane_directory,
188
200
  batch_size=batch_size,
189
201
  torch_device=torch_device,
202
+ pin_memory=pin_memory,
203
+ n_splitting_iter=n_splitting_iter,
190
204
  )
191
205
 
192
206
  # replicate the settings specific to splitting, before we access anything
193
207
  # of the original settings, causing cached properties
194
208
  kwargs = dataclasses.asdict(settings)
195
- kwargs["ball_z_size_um"] = split_ball_z_size * settings.z_pixel_size
196
- kwargs["ball_xy_size_um"] = (
197
- split_ball_xy_size * settings.in_plane_pixel_size
198
- )
209
+ kwargs["ball_z_size_um"] = split_ball_z_size
210
+ kwargs["ball_xy_size_um"] = split_ball_xy_size
199
211
  kwargs["ball_overlap_fraction"] = split_ball_overlap_fraction
200
- kwargs["soma_diameter_um"] = (
201
- split_soma_diameter * settings.in_plane_pixel_size
202
- )
203
212
  # always run on cpu because copying to gpu overhead is likely slower than
204
213
  # any benefit for detection on smallish volumes
205
214
  kwargs["torch_device"] = "cpu"
@@ -39,7 +39,7 @@ class TileProcessor:
39
39
  Number of standard deviations above the mean threshold to use for
40
40
  determining whether a voxel is bright.
41
41
  log_sigma_size : float
42
- Size of the sigma for the gaussian filter.
42
+ Size of the Gaussian sigma for the Laplacian of Gaussian filtering.
43
43
  soma_diameter : float
44
44
  Diameter of the soma in voxels.
45
45
  torch_device: str
@@ -80,23 +80,28 @@ class DetectionSettings:
80
80
 
81
81
  voxel_sizes: Tuple[float, float, float] = (1.0, 1.0, 1.0)
82
82
  """
83
- Tuple of voxel sizes in each dimension (z, y, x). We use this to convert
84
- from `um` to pixel sizes.
83
+ Tuple of voxel sizes (microns) in each dimension (z, y, x). We use this
84
+ to convert from `um` to pixel sizes.
85
85
  """
86
86
 
87
87
  soma_spread_factor: float = 1.4
88
- """Spread factor for soma size - how much it may stretch in the images."""
88
+ """
89
+ Cell spread factor for determining the largest cell volume before
90
+ splitting up cell clusters. Structures with spherical volume of
91
+ diameter `soma_spread_factor * soma_diameter` or less will not be
92
+ split.
93
+ """
89
94
 
90
95
  soma_diameter_um: float = 16
91
96
  """
92
- Diameter of a typical soma in um. Bright areas larger than this will be
93
- split.
97
+ Diameter of a typical soma in-plane (xy) in microns.
94
98
  """
95
99
 
96
100
  max_cluster_size_um3: float = 100_000
97
101
  """
98
- Maximum size of a cluster (bright area) that will be processed, in um.
99
- Larger bright areas are skipped as artifacts.
102
+ Largest detected cell cluster (in cubic um) where splitting
103
+ should be attempted. Clusters above this size will be labeled
104
+ as artifacts.
100
105
  """
101
106
 
102
107
  ball_xy_size_um: float = 6
@@ -116,17 +121,21 @@ class DetectionSettings:
116
121
 
117
122
  ball_overlap_fraction: float = 0.6
118
123
  """
119
- Fraction of overlap between a bright area and the spherical kernel,
120
- for the area to be considered a single ball.
124
+ Fraction of the 3d ball filter needed to be filled by foreground voxels,
125
+ centered on a voxel, to retain the voxel.
121
126
  """
122
127
 
123
128
  log_sigma_size: float = 0.2
124
- """Size of the sigma for the 2d Gaussian filter."""
129
+ """
130
+ Gaussian filter width (as a fraction of soma diameter) used during
131
+ 2d in-plane Laplacian of Gaussian filtering.
132
+ """
125
133
 
126
134
  n_sds_above_mean_thresh: float = 10
127
135
  """
128
- Number of standard deviations above the mean intensity to use for a
129
- threshold to define bright areas. Below it, it's not considered bright.
136
+ Intensity threshold (the number of standard deviations above
137
+ the mean) of the filtered 2d planes used to mark pixels as
138
+ foreground or background.
130
139
  """
131
140
 
132
141
  outlier_keep: bool = False
@@ -180,6 +189,14 @@ class DetectionSettings:
180
189
  to run on the first GPU.
181
190
  """
182
191
 
192
+ pin_memory: bool = False
193
+ """
194
+ Pins data to be sent to the GPU to the CPU memory. This allows faster GPU
195
+ data speeds, but can only be used if the data used by the GPU can stay in
196
+ the CPU RAM while the GPU uses it. I.e. there's enough RAM. Otherwise, if
197
+ there's a risk of the RAM being paged, it shouldn't be used.
198
+ """
199
+
183
200
  n_free_cpus: int = 2
184
201
  """
185
202
  Number of free CPU cores to keep available and not use during parallel
@@ -191,6 +208,8 @@ class DetectionSettings:
191
208
  """
192
209
  During the structure splitting phase we iteratively shrink the bright areas
193
210
  and re-filter with the 3d filter. This is the number of iterations to do.
211
+ Each iteration reduces the cluster size by the voxels not retained in the
212
+ previous iteration.
194
213
 
195
214
  This is a maximum because we also stop if there are no more structures left
196
215
  during any iteration.
@@ -78,11 +78,11 @@ class BallFilter:
78
78
  ----------
79
79
  plane_height, plane_width : int
80
80
  Height/width of the planes.
81
- ball_xy_size : int
82
- Diameter of the spherical kernel in the x/y dimensions.
83
- ball_z_size : int
84
- Diameter of the spherical kernel in the z dimension.
85
- Equal to the number of planes stacked to filter
81
+ ball_xy_size : float
82
+ Diameter of the spherical kernel (in microns) in the x/y dimensions.
83
+ ball_z_size : float
84
+ Diameter of the spherical kernel in the z dimension in microns.
85
+ Determines the number of planes stacked to filter
86
86
  the central plane of the stack.
87
87
  overlap_fraction : float
88
88
  The fraction of pixels within the spherical kernel that
@@ -1,3 +1,4 @@
1
+ from copy import copy
1
2
  from typing import List, Tuple, Type
2
3
 
3
4
  import numpy as np
@@ -224,6 +225,7 @@ def split_cells(
224
225
  where M is the number of individual cells and each centre is
225
226
  represented by its x, y, and z coordinates.
226
227
  """
228
+ settings = copy(settings)
227
229
  # these points are in x, y, z order columnwise, in absolute pixels
228
230
  orig_centre = get_structure_centre(cell_points)
229
231
 
@@ -140,7 +140,7 @@ class VolumeFilter:
140
140
  tensor = torch.empty(
141
141
  (batch_size, *self.settings.plane_shape),
142
142
  dtype=torch_dtype,
143
- pin_memory=not cpu,
143
+ pin_memory=not cpu and self.settings.pin_memory,
144
144
  device="cpu",
145
145
  )
146
146
 
@@ -0,0 +1,229 @@
1
+ import os
2
+ from typing import Callable, List, Optional, Tuple
3
+
4
+ from brainglobe_utils.cells.cells import Cell
5
+
6
+ from cellfinder.core import logger, types
7
+ from cellfinder.core.download.download import model_type
8
+ from cellfinder.core.train.train_yaml import depth_type
9
+
10
+
11
+ def main(
12
+ signal_array: types.array,
13
+ background_array: types.array,
14
+ voxel_sizes: Tuple[float, float, float],
15
+ start_plane: int = 0,
16
+ end_plane: int = -1,
17
+ trained_model: Optional[os.PathLike] = None,
18
+ model_weights: Optional[os.PathLike] = None,
19
+ model: model_type = "resnet50_tv",
20
+ classification_batch_size: int = 64,
21
+ n_free_cpus: int = 2,
22
+ network_voxel_sizes: Tuple[float, float, float] = (5, 1, 1),
23
+ soma_diameter: float = 16,
24
+ ball_xy_size: float = 6,
25
+ ball_z_size: float = 15,
26
+ ball_overlap_fraction: float = 0.6,
27
+ log_sigma_size: float = 0.2,
28
+ n_sds_above_mean_thresh: float = 10,
29
+ soma_spread_factor: float = 1.4,
30
+ max_cluster_size: float = 100000,
31
+ cube_width: int = 50,
32
+ cube_height: int = 50,
33
+ cube_depth: int = 20,
34
+ network_depth: depth_type = "50",
35
+ skip_detection: bool = False,
36
+ skip_classification: bool = False,
37
+ detected_cells: List[Cell] = None,
38
+ detection_batch_size: Optional[int] = None,
39
+ torch_device: Optional[str] = None,
40
+ pin_memory: bool = False,
41
+ split_ball_xy_size: float = 6,
42
+ split_ball_z_size: float = 15,
43
+ split_ball_overlap_fraction: float = 0.8,
44
+ n_splitting_iter: int = 10,
45
+ *,
46
+ detect_callback: Optional[Callable[[int], None]] = None,
47
+ classify_callback: Optional[Callable[[int], None]] = None,
48
+ detect_finished_callback: Optional[Callable[[list], None]] = None,
49
+ ) -> List[Cell]:
50
+ """
51
+ Parameters
52
+ ----------
53
+ signal_array : numpy.ndarray or dask array
54
+ 3D array representing the signal data in z, y, x order.
55
+ background_array : numpy.ndarray or dask array
56
+ 3D array representing the signal data in z, y, x order.
57
+ voxel_sizes : 3-tuple of floats
58
+ Size of your voxels in the z, y, and x dimensions (microns).
59
+ start_plane : int
60
+ First plane index to process (inclusive, to process a subset of the
61
+ data).
62
+ end_plane : int
63
+ Last plane index to process (exclusive, to process a subset of the
64
+ data).
65
+ trained_model : Optional[Path]
66
+ Trained model file path (home directory (default) -> pretrained
67
+ weights).
68
+ model_weights : Optional[Path]
69
+ Model weights path (home directory (default) -> pretrained
70
+ weights).
71
+ model: str
72
+ Type of model to use. Defaults to `"resnet50_tv"`.
73
+ classification_batch_size : int
74
+ How many potential cells to classify at one time. The GPU/CPU
75
+ memory must be able to contain at once this many data cubes for
76
+ the models. For performance-critical applications, tune to maximize
77
+ memory usage without running out. Check your GPU/CPU memory to verify
78
+ it's not full.
79
+ n_free_cpus : int
80
+ How many CPU cores to leave free.
81
+ network_voxel_sizes : 3-tuple of floats
82
+ Size of the pre-trained network's voxels (microns) in the z, y, and x
83
+ dimensions.
84
+ soma_diameter : float
85
+ The expected in-plane (xy) soma diameter (microns).
86
+ ball_xy_size : float
87
+ 3d filter's in-plane (xy) filter ball size (microns).
88
+ ball_z_size : float
89
+ 3d filter's axial (z) filter ball size (microns).
90
+ ball_overlap_fraction : float
91
+ 3d filter's fraction of the ball filter needed to be filled by
92
+ foreground voxels, centered on a voxel, to retain the voxel.
93
+ log_sigma_size : float
94
+ Gaussian filter width (as a fraction of soma diameter) used during
95
+ 2d in-plane Laplacian of Gaussian filtering.
96
+ n_sds_above_mean_thresh : float
97
+ Intensity threshold (the number of standard deviations above
98
+ the mean) of the filtered 2d planes used to mark pixels as
99
+ foreground or background.
100
+ soma_spread_factor : float
101
+ Cell spread factor for determining the largest cell volume before
102
+ splitting up cell clusters. Structures with spherical volume of
103
+ diameter `soma_spread_factor * soma_diameter` or less will not be
104
+ split.
105
+ max_cluster_size : float
106
+ Largest detected cell cluster (in cubic um) where splitting
107
+ should be attempted. Clusters above this size will be labeled
108
+ as artifacts.
109
+ cube_width: int
110
+ The width of the data cube centered on the cell used for
111
+ classification. Defaults to `50`.
112
+ cube_height: int
113
+ The height of the data cube centered on the cell used for
114
+ classification. Defaults to `50`.
115
+ cube_depth: int
116
+ The depth of the data cube centered on the cell used for
117
+ classification. Defaults to `20`.
118
+ network_depth: str
119
+ The network depth to use during classification. Defaults to `"50"`.
120
+ skip_detection : bool
121
+ If selected, the detection step is skipped and instead we get the
122
+ detected cells from the cell layer below (from a previous
123
+ detection run or import).
124
+ skip_classification : bool
125
+ If selected, the classification step is skipped and all cells from
126
+ the detection stage are added.
127
+ detected_cells: Optional list of Cell objects.
128
+ If specified, the cells to use during classification.
129
+ detection_batch_size: int
130
+ The number of planes of the original data volume to process at
131
+ once. The GPU/CPU memory must be able to contain this many planes
132
+ for all the filters. For performance-critical applications, tune
133
+ to maximize memory usage without running out. Check your GPU/CPU
134
+ memory to verify it's not full.
135
+ torch_device : str, optional
136
+ The device on which to run the computation. If not specified (None),
137
+ "cuda" will be used if a GPU is available, otherwise "cpu".
138
+ You can also manually specify "cuda" or "cpu".
139
+ pin_memory: bool
140
+ Pins data to be sent to the GPU to the CPU memory. This allows faster
141
+ GPU data speeds, but can only be used if the data used by the GPU can
142
+ stay in the CPU RAM while the GPU uses it. I.e. there's enough RAM.
143
+ Otherwise, if there's a risk of the RAM being paged, it shouldn't be
144
+ used. Defaults to False.
145
+ split_ball_xy_size: float
146
+ Similar to `ball_xy_size`, except the value to use for the 3d
147
+ filter during cluster splitting.
148
+ split_ball_z_size: float
149
+ Similar to `ball_z_size`, except the value to use for the 3d filter
150
+ during cluster splitting.
151
+ split_ball_overlap_fraction: float
152
+ Similar to `ball_overlap_fraction`, except the value to use for the
153
+ 3d filter during cluster splitting.
154
+ n_splitting_iter: int
155
+ The number of iterations to run the 3d filtering on a cluster. Each
156
+ iteration reduces the cluster size by the voxels not retained in
157
+ the previous iteration.
158
+ detect_callback : Callable[int], optional
159
+ Called every time a plane has finished being processed during the
160
+ detection stage. Called with the plane number that has finished.
161
+ classify_callback : Callable[int], optional
162
+ Called every time a point has finished being classified.
163
+ Called with the batch number that has just finished.
164
+ detect_finished_callback : Callable[list], optional
165
+ Called after detection is finished with the list of detected points.
166
+ """
167
+ from cellfinder.core.classify import classify
168
+ from cellfinder.core.detect import detect
169
+ from cellfinder.core.tools import prep
170
+
171
+ if not skip_detection:
172
+ logger.info("Detecting cell candidates")
173
+
174
+ points = detect.main(
175
+ signal_array,
176
+ start_plane,
177
+ end_plane,
178
+ voxel_sizes,
179
+ soma_diameter,
180
+ max_cluster_size,
181
+ ball_xy_size,
182
+ ball_z_size,
183
+ ball_overlap_fraction,
184
+ soma_spread_factor,
185
+ n_free_cpus,
186
+ log_sigma_size,
187
+ n_sds_above_mean_thresh,
188
+ batch_size=detection_batch_size,
189
+ torch_device=torch_device,
190
+ pin_memory=pin_memory,
191
+ callback=detect_callback,
192
+ split_ball_z_size=split_ball_z_size,
193
+ split_ball_xy_size=split_ball_xy_size,
194
+ split_ball_overlap_fraction=split_ball_overlap_fraction,
195
+ n_splitting_iter=n_splitting_iter,
196
+ )
197
+
198
+ if detect_finished_callback is not None:
199
+ detect_finished_callback(points)
200
+ else:
201
+ points = detected_cells or [] # if None
202
+ detect_finished_callback(points)
203
+
204
+ if not skip_classification:
205
+ install_path = None
206
+ model_weights = prep.prep_model_weights(
207
+ model_weights, install_path, model
208
+ )
209
+ if len(points) > 0:
210
+ logger.info("Running classification")
211
+ points = classify.main(
212
+ points,
213
+ signal_array,
214
+ background_array,
215
+ n_free_cpus,
216
+ voxel_sizes,
217
+ network_voxel_sizes,
218
+ classification_batch_size,
219
+ cube_height,
220
+ cube_width,
221
+ cube_depth,
222
+ trained_model,
223
+ model_weights,
224
+ network_depth,
225
+ callback=classify_callback,
226
+ )
227
+ else:
228
+ logger.info("No candidates, skipping classification")
229
+ return points
@@ -228,8 +228,8 @@ class CurationWidget(QWidget):
228
228
  self.layout.addWidget(self.load_data_panel, row, column, 1, 1)
229
229
 
230
230
  def setup_keybindings(self):
231
- self.viewer.bind_key("c", self.mark_as_cell)
232
- self.viewer.bind_key("x", self.mark_as_non_cell)
231
+ self.viewer.bind_key("c", self.mark_as_cell, overwrite=True)
232
+ self.viewer.bind_key("x", self.mark_as_non_cell, overwrite=True)
233
233
 
234
234
  def set_signal_image(self):
235
235
  """
@@ -244,24 +244,26 @@ def detect_widget() -> FunctionGui:
244
244
  detection_options,
245
245
  skip_detection: bool,
246
246
  soma_diameter: float,
247
+ log_sigma_size: float,
248
+ n_sds_above_mean_thresh: float,
247
249
  ball_xy_size: float,
248
250
  ball_z_size: float,
249
251
  ball_overlap_fraction: float,
250
- log_sigma_size: float,
251
- n_sds_above_mean_thresh: int,
252
+ detection_batch_size: int,
252
253
  soma_spread_factor: float,
253
- max_cluster_size: int,
254
+ max_cluster_size: float,
254
255
  classification_options,
255
256
  skip_classification: bool,
256
257
  use_pre_trained_weights: bool,
257
258
  trained_model: Optional[Path],
258
- batch_size: int,
259
+ classification_batch_size: int,
259
260
  misc_options,
260
261
  start_plane: int,
261
262
  end_plane: int,
262
263
  n_free_cpus: int,
263
264
  analyse_local: bool,
264
265
  use_gpu: bool,
266
+ pin_memory: bool,
265
267
  debug: bool,
266
268
  reset_button,
267
269
  ) -> None:
@@ -271,43 +273,60 @@ def detect_widget() -> FunctionGui:
271
273
  Parameters
272
274
  ----------
273
275
  voxel_size_z : float
274
- Size of your voxels in the axial dimension
276
+ Size of your voxels in the axial dimension (microns)
275
277
  voxel_size_y : float
276
- Size of your voxels in the y direction (top to bottom)
278
+ Size of your voxels in the y direction (top to bottom) (microns)
277
279
  voxel_size_x : float
278
- Size of your voxels in the x direction (left to right)
280
+ Size of your voxels in the x direction (left to right) (microns)
279
281
  skip_detection : bool
280
282
  If selected, the detection step is skipped and instead we get the
281
283
  detected cells from the cell layer below (from a previous
282
284
  detection run or import)
283
285
  soma_diameter : float
284
- The expected in-plane soma diameter (microns)
286
+ The expected in-plane (xy) soma diameter (microns)
287
+ log_sigma_size : float
288
+ Gaussian filter width (as a fraction of soma diameter) used during
289
+ 2d in-plane Laplacian of Gaussian filtering
290
+ n_sds_above_mean_thresh : float
291
+ Intensity threshold (the number of standard deviations above
292
+ the mean) of the filtered 2d planes used to mark pixels as
293
+ foreground or background
285
294
  ball_xy_size : float
286
- Elliptical morphological in-plane filter size (microns)
295
+ 3d filter's in-plane (xy) filter ball size (microns)
287
296
  ball_z_size : float
288
- Elliptical morphological axial filter size (microns)
297
+ 3d filter's axial (z) filter ball size (microns)
289
298
  ball_overlap_fraction : float
290
- Fraction of the morphological filter needed to be filled
291
- to retain a voxel
292
- log_sigma_size : float
293
- Laplacian of Gaussian filter width (as a fraction of soma diameter)
294
- n_sds_above_mean_thresh : int
295
- Cell intensity threshold (as a multiple of noise above the mean)
299
+ 3d filter's fraction of the ball filter needed to be filled by
300
+ foreground voxels, centered on a voxel, to retain the voxel
301
+ detection_batch_size: int
302
+ The number of planes of the original data volume to process at
303
+ once. The GPU/CPU memory must be able to contain this many planes
304
+ for all the filters. For performance-critical applications, tune
305
+ to maximize memory usage without
306
+ running out. Check your GPU/CPU memory to verify it's not full
296
307
  soma_spread_factor : float
297
- Cell spread factor (for splitting up cell clusters)
298
- max_cluster_size : int
299
- Largest putative cell cluster (in cubic um) where splitting
300
- should be attempted
301
- use_pre_trained_weights : bool
302
- Select to use pre-trained model weights
303
- batch_size : int
304
- How many points to classify at one time
308
+ Cell spread factor for determining the largest cell volume before
309
+ splitting up cell clusters. Structures with spherical volume of
310
+ diameter `soma_spread_factor * soma_diameter` or less will not be
311
+ split
312
+ max_cluster_size : float
313
+ Largest detected cell cluster (in cubic um) where splitting
314
+ should be attempted. Clusters above this size will be labeled
315
+ as artifacts
305
316
  skip_classification : bool
306
317
  If selected, the classification step is skipped and all cells from
307
318
  the detection stage are added
319
+ use_pre_trained_weights : bool
320
+ Select to use pre-trained model weights
308
321
  trained_model : Optional[Path]
309
322
  Trained model file path (home directory (default) -> pretrained
310
323
  weights)
324
+ classification_batch_size : int
325
+ How many potential cells to classify at one time. The GPU/CPU
326
+ memory must be able to contain at once this many data cubes for
327
+ the models. For performance-critical applications, tune to
328
+ maximize memory usage without running
329
+ out. Check your GPU/CPU memory to verify it's not full
311
330
  start_plane : int
312
331
  First plane to process (to process a subset of the data)
313
332
  end_plane : int
@@ -318,6 +337,12 @@ def detect_widget() -> FunctionGui:
318
337
  Only analyse planes around the current position
319
338
  use_gpu : bool
320
339
  If True, use GPU for processing (if available); otherwise, use CPU.
340
+ pin_memory: bool
341
+ Pins data to be sent to the GPU to the CPU memory. This allows
342
+ faster GPU data speeds, but can only be used if the data used by
343
+ the GPU can stay in the CPU RAM while the GPU uses it. I.e. there's
344
+ enough RAM. Otherwise, if there's a risk of the RAM being paged, it
345
+ shouldn't be used. Defaults to False.
321
346
  debug : bool
322
347
  Increase logging
323
348
  reset_button :
@@ -373,6 +398,7 @@ def detect_widget() -> FunctionGui:
373
398
  n_sds_above_mean_thresh,
374
399
  soma_spread_factor,
375
400
  max_cluster_size,
401
+ detection_batch_size,
376
402
  )
377
403
 
378
404
  if use_pre_trained_weights:
@@ -381,7 +407,7 @@ def detect_widget() -> FunctionGui:
381
407
  skip_classification,
382
408
  use_pre_trained_weights,
383
409
  trained_model,
384
- batch_size,
410
+ classification_batch_size,
385
411
  )
386
412
 
387
413
  if analyse_local:
@@ -392,7 +418,13 @@ def detect_widget() -> FunctionGui:
392
418
  end_plane = len(signal_image.data)
393
419
 
394
420
  misc_inputs = MiscInputs(
395
- start_plane, end_plane, n_free_cpus, analyse_local, use_gpu, debug
421
+ start_plane,
422
+ end_plane,
423
+ n_free_cpus,
424
+ analyse_local,
425
+ use_gpu,
426
+ pin_memory,
427
+ debug,
396
428
  )
397
429
 
398
430
  worker = Worker(
@@ -68,9 +68,10 @@ class DetectionInputs(InputContainer):
68
68
  ball_z_size: float = 15
69
69
  ball_overlap_fraction: float = 0.6
70
70
  log_sigma_size: float = 0.2
71
- n_sds_above_mean_thresh: int = 10
71
+ n_sds_above_mean_thresh: float = 10
72
72
  soma_spread_factor: float = 1.4
73
- max_cluster_size: int = 100000
73
+ max_cluster_size: float = 100000
74
+ detection_batch_size: int = 1
74
75
 
75
76
  def as_core_arguments(self) -> dict:
76
77
  return super().as_core_arguments()
@@ -97,14 +98,17 @@ class DetectionInputs(InputContainer):
97
98
  "n_sds_above_mean_thresh", custom_label="Threshold"
98
99
  ),
99
100
  soma_spread_factor=cls._custom_widget(
100
- "soma_spread_factor", custom_label="Cell spread"
101
+ "soma_spread_factor", custom_label="Split cell spread"
101
102
  ),
102
103
  max_cluster_size=cls._custom_widget(
103
104
  "max_cluster_size",
104
- custom_label="Max cluster",
105
+ custom_label="Split max cluster",
105
106
  min=0,
106
107
  max=10000000,
107
108
  ),
109
+ detection_batch_size=cls._custom_widget(
110
+ "detection_batch_size", custom_label="Batch size (detection)"
111
+ ),
108
112
  )
109
113
 
110
114
 
@@ -115,7 +119,7 @@ class ClassificationInputs(InputContainer):
115
119
  skip_classification: bool = False
116
120
  use_pre_trained_weights: bool = True
117
121
  trained_model: Optional[Path] = Path.home()
118
- batch_size: int = 64
122
+ classification_batch_size: int = 64
119
123
 
120
124
  def as_core_arguments(self) -> dict:
121
125
  args = super().as_core_arguments()
@@ -133,7 +137,10 @@ class ClassificationInputs(InputContainer):
133
137
  skip_classification=dict(
134
138
  value=cls.defaults()["skip_classification"]
135
139
  ),
136
- batch_size=dict(value=cls.defaults()["batch_size"]),
140
+ classification_batch_size=dict(
141
+ value=cls.defaults()["classification_batch_size"],
142
+ label="Batch size (classification)",
143
+ ),
137
144
  )
138
145
 
139
146
 
@@ -146,6 +153,7 @@ class MiscInputs(InputContainer):
146
153
  n_free_cpus: int = 2
147
154
  analyse_local: bool = False
148
155
  use_gpu: bool = field(default_factory=lambda: torch.cuda.is_available())
156
+ pin_memory: bool = False
149
157
  debug: bool = False
150
158
 
151
159
  def as_core_arguments(self) -> dict:
@@ -172,5 +180,10 @@ class MiscInputs(InputContainer):
172
180
  value=cls.defaults()["use_gpu"],
173
181
  enabled=torch.cuda.is_available(),
174
182
  ),
183
+ pin_memory=dict(
184
+ widget_type="CheckBox",
185
+ label="Pin data to memory",
186
+ value=cls.defaults()["pin_memory"],
187
+ ),
175
188
  debug=dict(value=cls.defaults()["debug"]),
176
189
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cellfinder
3
- Version: 1.7.0
3
+ Version: 1.8.0
4
4
  Summary: Automated 3D cell detection in large microscopy images
5
5
  Author-email: "Adam Tyson, Christian Niedworok, Charly Rousseau" <code@adamltyson.com>
6
6
  License: BSD-3-Clause
@@ -53,7 +53,7 @@ Requires-Dist: brainglobe-napari-io; extra == "napari"
53
53
  Requires-Dist: magicgui; extra == "napari"
54
54
  Requires-Dist: napari-ndtiffs; extra == "napari"
55
55
  Requires-Dist: napari-plugin-engine>=0.1.4; extra == "napari"
56
- Requires-Dist: napari[pyqt5]; extra == "napari"
56
+ Requires-Dist: napari[pyqt5]>=0.6.1; extra == "napari"
57
57
  Requires-Dist: pooch>=1; extra == "napari"
58
58
  Requires-Dist: qtpy; extra == "napari"
59
59
  Dynamic: license-file
@@ -30,6 +30,6 @@ brainglobe-napari-io
30
30
  magicgui
31
31
  napari-ndtiffs
32
32
  napari-plugin-engine>=0.1.4
33
- napari[pyqt5]
33
+ napari[pyqt5]>=0.6.1
34
34
  pooch>=1
35
35
  qtpy
@@ -59,7 +59,7 @@ napari = [
59
59
  "magicgui",
60
60
  "napari-ndtiffs",
61
61
  "napari-plugin-engine >= 0.1.4",
62
- "napari[pyqt5]",
62
+ "napari[pyqt5]>=0.6.1",
63
63
  "pooch >= 1",
64
64
  "qtpy",
65
65
  ]
@@ -1,115 +0,0 @@
1
- import os
2
- from typing import Callable, List, Optional, Tuple
3
-
4
- import numpy as np
5
- from brainglobe_utils.cells.cells import Cell
6
-
7
- from cellfinder.core import logger
8
- from cellfinder.core.download.download import model_type
9
- from cellfinder.core.train.train_yaml import depth_type
10
-
11
-
12
- def main(
13
- signal_array: np.ndarray,
14
- background_array: np.ndarray,
15
- voxel_sizes: Tuple[int, int, int],
16
- start_plane: int = 0,
17
- end_plane: int = -1,
18
- trained_model: Optional[os.PathLike] = None,
19
- model_weights: Optional[os.PathLike] = None,
20
- model: model_type = "resnet50_tv",
21
- batch_size: int = 64,
22
- n_free_cpus: int = 2,
23
- network_voxel_sizes: Tuple[int, int, int] = (5, 1, 1),
24
- soma_diameter: int = 16,
25
- ball_xy_size: int = 6,
26
- ball_z_size: int = 15,
27
- ball_overlap_fraction: float = 0.6,
28
- log_sigma_size: float = 0.2,
29
- n_sds_above_mean_thresh: float = 10,
30
- soma_spread_factor: float = 1.4,
31
- max_cluster_size: int = 100000,
32
- cube_width: int = 50,
33
- cube_height: int = 50,
34
- cube_depth: int = 20,
35
- network_depth: depth_type = "50",
36
- skip_detection: bool = False,
37
- skip_classification: bool = False,
38
- detected_cells: List[Cell] = None,
39
- classification_batch_size: Optional[int] = None,
40
- torch_device: Optional[str] = None,
41
- *,
42
- detect_callback: Optional[Callable[[int], None]] = None,
43
- classify_callback: Optional[Callable[[int], None]] = None,
44
- detect_finished_callback: Optional[Callable[[list], None]] = None,
45
- ) -> List[Cell]:
46
- """
47
- Parameters
48
- ----------
49
- detect_callback : Callable[int], optional
50
- Called every time a plane has finished being processed during the
51
- detection stage. Called with the plane number that has finished.
52
- classify_callback : Callable[int], optional
53
- Called every time a point has finished being classified.
54
- Called with the batch number that has just finished.
55
- detect_finished_callback : Callable[list], optional
56
- Called after detection is finished with the list of detected points.
57
- """
58
- from cellfinder.core.classify import classify
59
- from cellfinder.core.detect import detect
60
- from cellfinder.core.tools import prep
61
-
62
- if not skip_detection:
63
- logger.info("Detecting cell candidates")
64
-
65
- points = detect.main(
66
- signal_array,
67
- start_plane,
68
- end_plane,
69
- voxel_sizes,
70
- soma_diameter,
71
- max_cluster_size,
72
- ball_xy_size,
73
- ball_z_size,
74
- ball_overlap_fraction,
75
- soma_spread_factor,
76
- n_free_cpus,
77
- log_sigma_size,
78
- n_sds_above_mean_thresh,
79
- batch_size=classification_batch_size,
80
- torch_device=torch_device,
81
- callback=detect_callback,
82
- )
83
-
84
- if detect_finished_callback is not None:
85
- detect_finished_callback(points)
86
- else:
87
- points = detected_cells or [] # if None
88
- detect_finished_callback(points)
89
-
90
- if not skip_classification:
91
- install_path = None
92
- model_weights = prep.prep_model_weights(
93
- model_weights, install_path, model
94
- )
95
- if len(points) > 0:
96
- logger.info("Running classification")
97
- points = classify.main(
98
- points,
99
- signal_array,
100
- background_array,
101
- n_free_cpus,
102
- voxel_sizes,
103
- network_voxel_sizes,
104
- batch_size,
105
- cube_height,
106
- cube_width,
107
- cube_depth,
108
- trained_model,
109
- model_weights,
110
- network_depth,
111
- callback=classify_callback,
112
- )
113
- else:
114
- logger.info("No candidates, skipping classification")
115
- return points
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes