cellfinder 1.7.0__tar.gz → 1.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {cellfinder-1.7.0 → cellfinder-1.9.0}/.github/workflows/test_and_deploy.yml +28 -0
  2. {cellfinder-1.7.0 → cellfinder-1.9.0}/PKG-INFO +10 -8
  3. {cellfinder-1.7.0 → cellfinder-1.9.0}/README.md +7 -5
  4. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/classify/classify.py +48 -2
  5. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/detect.py +81 -52
  6. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/plane/plane_filter.py +107 -10
  7. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/setup_filters.py +51 -12
  8. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/volume/ball_filter.py +5 -5
  9. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/volume/structure_detection.py +5 -0
  10. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/volume/structure_splitting.py +3 -2
  11. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/volume/volume_filter.py +1 -1
  12. cellfinder-1.9.0/cellfinder/core/main.py +247 -0
  13. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/tools.py +1 -1
  14. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/train/train_yaml.py +2 -2
  15. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/curation.py +54 -19
  16. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/detect/detect.py +84 -28
  17. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/detect/detect_containers.py +29 -7
  18. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/detect/thread_worker.py +26 -16
  19. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/utils.py +6 -1
  20. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder.egg-info/PKG-INFO +10 -8
  21. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder.egg-info/requires.txt +2 -2
  22. {cellfinder-1.7.0 → cellfinder-1.9.0}/pyproject.toml +6 -2
  23. cellfinder-1.7.0/cellfinder/core/main.py +0 -115
  24. {cellfinder-1.7.0 → cellfinder-1.9.0}/.github/workflows/test_include_guard.yaml +0 -0
  25. {cellfinder-1.7.0 → cellfinder-1.9.0}/.gitignore +0 -0
  26. {cellfinder-1.7.0 → cellfinder-1.9.0}/.napari/config.yml +0 -0
  27. {cellfinder-1.7.0 → cellfinder-1.9.0}/CITATION.cff +0 -0
  28. {cellfinder-1.7.0 → cellfinder-1.9.0}/LICENSE +0 -0
  29. {cellfinder-1.7.0 → cellfinder-1.9.0}/MANIFEST.in +0 -0
  30. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/__init__.py +0 -0
  31. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/cli_migration_warning.py +0 -0
  32. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/__init__.py +0 -0
  33. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/classify/__init__.py +0 -0
  34. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/classify/augment.py +0 -0
  35. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/classify/cube_generator.py +0 -0
  36. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/classify/resnet.py +0 -0
  37. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/classify/tools.py +0 -0
  38. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/config/__init__.py +0 -0
  39. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/config/cellfinder.conf +0 -0
  40. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/__init__.py +0 -0
  41. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/__init__.py +0 -0
  42. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/plane/__init__.py +0 -0
  43. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/plane/classical_filter.py +0 -0
  44. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/plane/tile_walker.py +0 -0
  45. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/detect/filters/volume/__init__.py +0 -0
  46. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/download/__init__.py +0 -0
  47. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/download/cli.py +0 -0
  48. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/download/download.py +0 -0
  49. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/IO.py +0 -0
  50. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/__init__.py +0 -0
  51. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/array_operations.py +0 -0
  52. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/geometry.py +0 -0
  53. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/image_processing.py +0 -0
  54. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/prep.py +0 -0
  55. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/source_files.py +0 -0
  56. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/system.py +0 -0
  57. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/threading.py +0 -0
  58. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/tools/tiff.py +0 -0
  59. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/train/__init__.py +0 -0
  60. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/core/types.py +0 -0
  61. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/__init__.py +0 -0
  62. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/detect/__init__.py +0 -0
  63. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/input_container.py +0 -0
  64. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/napari.yaml +0 -0
  65. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/sample_data.py +0 -0
  66. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/train/__init__.py +0 -0
  67. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/train/train.py +0 -0
  68. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder/napari/train/train_containers.py +0 -0
  69. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder.egg-info/SOURCES.txt +0 -0
  70. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder.egg-info/dependency_links.txt +0 -0
  71. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder.egg-info/entry_points.txt +0 -0
  72. {cellfinder-1.7.0 → cellfinder-1.9.0}/cellfinder.egg-info/top_level.txt +0 -0
  73. {cellfinder-1.7.0 → cellfinder-1.9.0}/setup.cfg +0 -0
@@ -59,6 +59,16 @@ jobs:
59
59
  python-version: "3.13"
60
60
 
61
61
  steps:
62
+ # Free up disk space on ubuntu runners
63
+ - name: Free Disk Space
64
+ if: matrix.os == 'ubuntu-latest'
65
+ uses: endersonmenezes/free-disk-space@6c4664f43348c8c7011b53488d5ca65e9fc5cd1a # v3
66
+ with:
67
+ remove_android: true
68
+ remove_dotnet: true
69
+ rm_cmd: 'rmz'
70
+ rmz_version: '3.1.1'
71
+
62
72
  - uses: actions/checkout@v4
63
73
  - name: Cache pooch data
64
74
  uses: actions/cache@v4
@@ -116,6 +126,15 @@ jobs:
116
126
  BRAINGLOBE_TEST_DATA_DIR: "~/.pooch_cache"
117
127
 
118
128
  steps:
129
+ # Free up disk space on ubuntu runners
130
+ - name: Free Disk Space
131
+ uses: endersonmenezes/free-disk-space@6c4664f43348c8c7011b53488d5ca65e9fc5cd1a # v3
132
+ with:
133
+ remove_android: true
134
+ remove_dotnet: true
135
+ rm_cmd: 'rmz'
136
+ rmz_version: '3.1.1'
137
+
119
138
  - uses: actions/checkout@v4
120
139
  - name: Cache brainglobe directory
121
140
  uses: actions/cache@v3
@@ -163,6 +182,15 @@ jobs:
163
182
  KERAS_BACKEND: torch
164
183
  CELLFINDER_TEST_DEVICE: cpu
165
184
  steps:
185
+ # Free up disk space on ubuntu runners
186
+ - name: Free Disk Space
187
+ uses: endersonmenezes/free-disk-space@6c4664f43348c8c7011b53488d5ca65e9fc5cd1a # v3
188
+ with:
189
+ remove_android: true
190
+ remove_dotnet: true
191
+ rm_cmd: 'rmz'
192
+ rmz_version: '3.1.1'
193
+
166
194
  - name: Cache brainglobe directory
167
195
  uses: actions/cache@v3
168
196
  with:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cellfinder
3
- Version: 1.7.0
3
+ Version: 1.9.0
4
4
  Summary: Automated 3D cell detection in large microscopy images
5
5
  Author-email: "Adam Tyson, Christian Niedworok, Charly Rousseau" <code@adamltyson.com>
6
6
  License: BSD-3-Clause
@@ -26,7 +26,7 @@ License-File: LICENSE
26
26
  Requires-Dist: brainglobe-utils>=0.5.0
27
27
  Requires-Dist: brainglobe-napari-io>=0.3.4
28
28
  Requires-Dist: dask[array]
29
- Requires-Dist: fancylog>=0.0.7
29
+ Requires-Dist: fancylog>=0.6.0
30
30
  Requires-Dist: natsort
31
31
  Requires-Dist: numba
32
32
  Requires-Dist: numpy
@@ -53,24 +53,26 @@ Requires-Dist: brainglobe-napari-io; extra == "napari"
53
53
  Requires-Dist: magicgui; extra == "napari"
54
54
  Requires-Dist: napari-ndtiffs; extra == "napari"
55
55
  Requires-Dist: napari-plugin-engine>=0.1.4; extra == "napari"
56
- Requires-Dist: napari[pyqt5]; extra == "napari"
56
+ Requires-Dist: napari[pyqt5]>=0.6.5; extra == "napari"
57
57
  Requires-Dist: pooch>=1; extra == "napari"
58
58
  Requires-Dist: qtpy; extra == "napari"
59
59
  Dynamic: license-file
60
60
 
61
61
  [![Python Version](https://img.shields.io/pypi/pyversions/cellfinder.svg)](https://pypi.org/project/cellfinder)
62
62
  [![PyPI](https://img.shields.io/pypi/v/cellfinder.svg)](https://pypi.org/project/cellfinder)
63
- [![Downloads](https://pepy.tech/badge/cellfinder)](https://pepy.tech/project/cellfinder)
63
+ [![Anaconda version](https://anaconda.org/conda-forge/cellfinder/badges/version.svg)](https://anaconda.org/conda-forge/cellfinder)
64
+ [![Napari hub](https://img.shields.io/endpoint?url=https://npe2api-git-add-shields-napari.vercel.app/api/shields/cellfinder)](https://napari-hub.org/plugins/cellfinder.html)
65
+ [![PyPI Downloads](https://pepy.tech/badge/cellfinder)](https://pepy.tech/project/cellfinder)
64
66
  [![Wheel](https://img.shields.io/pypi/wheel/cellfinder.svg)](https://pypi.org/project/cellfinder)
65
67
  [![Development Status](https://img.shields.io/pypi/status/cellfinder.svg)](https://github.com/brainglobe/cellfinder)
66
68
  [![Tests](https://img.shields.io/github/actions/workflow/status/brainglobe/cellfinder/test_and_deploy.yml?branch=main)](https://github.com/brainglobe/cellfinder/actions)
67
69
  [![codecov](https://codecov.io/gh/brainglobe/cellfinder/branch/main/graph/badge.svg?token=nx1lhNI7ox)](https://codecov.io/gh/brainglobe/cellfinder)
68
- [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)
69
- [![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)
70
+ [![Code style: Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/format.json)](https://github.com/astral-sh/ruff)[![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)
70
71
  [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://github.com/pre-commit/pre-commit)
71
72
  [![Contributions](https://img.shields.io/badge/Contributions-Welcome-brightgreen.svg)](https://brainglobe.info/community/developers/index.html)
72
- [![Twitter](https://img.shields.io/twitter/follow/brain_globe?style=social)](https://twitter.com/brain_globe)
73
-
73
+ [![image.sc forum](https://img.shields.io/badge/dynamic/json.svg?label=forum&url=https%3A%2F%2Fforum.image.sc%2Ftags%2Fbrainglobe.json&query=%24.topic_list.tags.0.topic_count&colorB=brightgreen&suffix=%20topics&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAABPklEQVR42m3SyyqFURTA8Y2BER0TDyExZ+aSPIKUlPIITFzKeQWXwhBlQrmFgUzMMFLKZeguBu5y+//17dP3nc5vuPdee6299gohUYYaDGOyyACq4JmQVoFujOMR77hNfOAGM+hBOQqB9TjHD36xhAa04RCuuXeKOvwHVWIKL9jCK2bRiV284QgL8MwEjAneeo9VNOEaBhzALGtoRy02cIcWhE34jj5YxgW+E5Z4iTPkMYpPLCNY3hdOYEfNbKYdmNngZ1jyEzw7h7AIb3fRTQ95OAZ6yQpGYHMMtOTgouktYwxuXsHgWLLl+4x++Kx1FJrjLTagA77bTPvYgw1rRqY56e+w7GNYsqX6JfPwi7aR+Y5SA+BXtKIRfkfJAYgj14tpOF6+I46c4/cAM3UhM3JxyKsxiOIhH0IO6SH/A1Kb1WBeUjbkAAAAAElFTkSuQmCC)](https://forum.image.sc/tag/brainglobe)
74
+ [![Bluesky](https://img.shields.io/badge/Bluesky-0285FF?logo=bluesky&logoColor=fff)](https://bsky.app/profile/brainglobe.info)
75
+ [![Mastodon](https://img.shields.io/badge/Mastodon-6364FF?logo=mastodon&logoColor=fff)](https://mastodon.online/@brainglobe)
74
76
  # cellfinder
75
77
 
76
78
  cellfinder is software for automated 3D cell detection in very large 3D images (e.g., serial two-photon or lightsheet volumes of whole mouse brains).
@@ -1,16 +1,18 @@
1
1
  [![Python Version](https://img.shields.io/pypi/pyversions/cellfinder.svg)](https://pypi.org/project/cellfinder)
2
2
  [![PyPI](https://img.shields.io/pypi/v/cellfinder.svg)](https://pypi.org/project/cellfinder)
3
- [![Downloads](https://pepy.tech/badge/cellfinder)](https://pepy.tech/project/cellfinder)
3
+ [![Anaconda version](https://anaconda.org/conda-forge/cellfinder/badges/version.svg)](https://anaconda.org/conda-forge/cellfinder)
4
+ [![Napari hub](https://img.shields.io/endpoint?url=https://npe2api-git-add-shields-napari.vercel.app/api/shields/cellfinder)](https://napari-hub.org/plugins/cellfinder.html)
5
+ [![PyPI Downloads](https://pepy.tech/badge/cellfinder)](https://pepy.tech/project/cellfinder)
4
6
  [![Wheel](https://img.shields.io/pypi/wheel/cellfinder.svg)](https://pypi.org/project/cellfinder)
5
7
  [![Development Status](https://img.shields.io/pypi/status/cellfinder.svg)](https://github.com/brainglobe/cellfinder)
6
8
  [![Tests](https://img.shields.io/github/actions/workflow/status/brainglobe/cellfinder/test_and_deploy.yml?branch=main)](https://github.com/brainglobe/cellfinder/actions)
7
9
  [![codecov](https://codecov.io/gh/brainglobe/cellfinder/branch/main/graph/badge.svg?token=nx1lhNI7ox)](https://codecov.io/gh/brainglobe/cellfinder)
8
- [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)
9
- [![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)
10
+ [![Code style: Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/format.json)](https://github.com/astral-sh/ruff)[![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)
10
11
  [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://github.com/pre-commit/pre-commit)
11
12
  [![Contributions](https://img.shields.io/badge/Contributions-Welcome-brightgreen.svg)](https://brainglobe.info/community/developers/index.html)
12
- [![Twitter](https://img.shields.io/twitter/follow/brain_globe?style=social)](https://twitter.com/brain_globe)
13
-
13
+ [![image.sc forum](https://img.shields.io/badge/dynamic/json.svg?label=forum&url=https%3A%2F%2Fforum.image.sc%2Ftags%2Fbrainglobe.json&query=%24.topic_list.tags.0.topic_count&colorB=brightgreen&suffix=%20topics&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAABPklEQVR42m3SyyqFURTA8Y2BER0TDyExZ+aSPIKUlPIITFzKeQWXwhBlQrmFgUzMMFLKZeguBu5y+//17dP3nc5vuPdee6299gohUYYaDGOyyACq4JmQVoFujOMR77hNfOAGM+hBOQqB9TjHD36xhAa04RCuuXeKOvwHVWIKL9jCK2bRiV284QgL8MwEjAneeo9VNOEaBhzALGtoRy02cIcWhE34jj5YxgW+E5Z4iTPkMYpPLCNY3hdOYEfNbKYdmNngZ1jyEzw7h7AIb3fRTQ95OAZ6yQpGYHMMtOTgouktYwxuXsHgWLLl+4x++Kx1FJrjLTagA77bTPvYgw1rRqY56e+w7GNYsqX6JfPwi7aR+Y5SA+BXtKIRfkfJAYgj14tpOF6+I46c4/cAM3UhM3JxyKsxiOIhH0IO6SH/A1Kb1WBeUjbkAAAAAElFTkSuQmCC)](https://forum.image.sc/tag/brainglobe)
14
+ [![Bluesky](https://img.shields.io/badge/Bluesky-0285FF?logo=bluesky&logoColor=fff)](https://bsky.app/profile/brainglobe.info)
15
+ [![Mastodon](https://img.shields.io/badge/Mastodon-6364FF?logo=mastodon&logoColor=fff)](https://mastodon.online/@brainglobe)
14
16
  # cellfinder
15
17
 
16
18
  cellfinder is software for automated 3D cell detection in very large 3D images (e.g., serial two-photon or lightsheet volumes of whole mouse brains).
@@ -19,8 +19,8 @@ def main(
19
19
  signal_array: types.array,
20
20
  background_array: types.array,
21
21
  n_free_cpus: int,
22
- voxel_sizes: Tuple[int, int, int],
23
- network_voxel_sizes: Tuple[int, int, int],
22
+ voxel_sizes: Tuple[float, float, float],
23
+ network_voxel_sizes: Tuple[float, float, float],
24
24
  batch_size: int,
25
25
  cube_height: int,
26
26
  cube_width: int,
@@ -29,12 +29,58 @@ def main(
29
29
  model_weights: Optional[os.PathLike],
30
30
  network_depth: depth_type,
31
31
  max_workers: int = 3,
32
+ pin_memory: bool = False,
32
33
  *,
33
34
  callback: Optional[Callable[[int], None]] = None,
34
35
  ) -> List[Cell]:
35
36
  """
36
37
  Parameters
37
38
  ----------
39
+
40
+ points: List of Cell objects
41
+ The potential cells to classify.
42
+ signal_array : numpy.ndarray or dask array
43
+ 3D array representing the signal data in z, y, x order.
44
+ background_array : numpy.ndarray or dask array
45
+ 3D array representing the signal data in z, y, x order.
46
+ n_free_cpus : int
47
+ How many CPU cores to leave free.
48
+ voxel_sizes : 3-tuple of floats
49
+ Size of your voxels in the z, y, and x dimensions.
50
+ network_voxel_sizes : 3-tuple of floats
51
+ Size of the pre-trained network's voxels in the z, y, and x dimensions.
52
+ batch_size : int
53
+ How many potential cells to classify at one time. The GPU/CPU
54
+ memory must be able to contain at once this many data cubes for
55
+ the models. For performance-critical applications, tune to maximize
56
+ memory usage without running out. Check your GPU/CPU memory to verify
57
+ it's not full.
58
+ cube_height: int
59
+ The height of the data cube centered on the cell used for
60
+ classification. Defaults to `50`.
61
+ cube_width: int
62
+ The width of the data cube centered on the cell used for
63
+ classification. Defaults to `50`.
64
+ cube_depth: int
65
+ The depth of the data cube centered on the cell used for
66
+ classification. Defaults to `20`.
67
+ trained_model : Optional[Path]
68
+ Trained model file path (home directory (default) -> pretrained
69
+ weights).
70
+ model_weights : Optional[Path]
71
+ Model weights path (home directory (default) -> pretrained
72
+ weights).
73
+ network_depth: str
74
+ The network depth to use during classification. Defaults to `"50"`.
75
+ max_workers: int
76
+ The number of sub-processes to use for data loading / processing.
77
+ Defaults to 8.
78
+ pin_memory: bool
79
+ Pins data to be sent to the GPU to the CPU memory. This allows faster
80
+ GPU data speeds, but can only be used if the data used by the GPU can
81
+ stay in the CPU RAM while the GPU uses it. I.e. there's enough RAM.
82
+ Otherwise, if there's a risk of the RAM being paged, it shouldn't be
83
+ used. Defaults to False.
38
84
  callback : Callable[int], optional
39
85
  A callback function that is called during classification. Called with
40
86
  the batch number once that batch has been classified.
@@ -49,10 +49,13 @@ def main(
49
49
  plane_directory: Optional[str] = None,
50
50
  batch_size: Optional[int] = None,
51
51
  torch_device: Optional[str] = None,
52
- split_ball_xy_size: int = 3,
53
- split_ball_z_size: int = 3,
52
+ pin_memory: bool = False,
53
+ split_ball_xy_size: float = 6,
54
+ split_ball_z_size: float = 15,
54
55
  split_ball_overlap_fraction: float = 0.8,
55
- split_soma_diameter: int = 7,
56
+ n_splitting_iter: int = 10,
57
+ n_sds_above_mean_tiled_thresh: float = 10,
58
+ tiled_thresh_tile_size: float | None = None,
56
59
  *,
57
60
  callback: Optional[Callable[[int], None]] = None,
58
61
  ) -> List[Cell]:
@@ -61,69 +64,94 @@ def main(
61
64
 
62
65
  Parameters
63
66
  ----------
64
- signal_array : numpy.ndarray
65
- 3D array representing the signal data.
66
-
67
+ signal_array : numpy.ndarray or dask array
68
+ 3D array representing the signal data in z, y, x order.
67
69
  start_plane : int
68
- Index of the starting plane for detection.
69
-
70
+ First plane index to process (inclusive, to process a subset of the
71
+ data).
70
72
  end_plane : int
71
- Index of the ending plane for detection.
72
-
73
- voxel_sizes : Tuple[float, float, float]
74
- Tuple of voxel sizes in each dimension (z, y, x).
75
-
73
+ Last plane index to process (exclusive, to process a subset of the
74
+ data).
75
+ voxel_sizes : 3-tuple of floats
76
+ Size of your voxels in the z, y, and x dimensions (microns).
76
77
  soma_diameter : float
77
- Diameter of the soma in physical units.
78
-
78
+ The expected in-plane (xy) soma diameter (microns).
79
79
  max_cluster_size : float
80
- Maximum size of a cluster in physical units.
81
-
80
+ Largest detected cell cluster (in cubic um) where splitting
81
+ should be attempted. Clusters above this size will be labeled
82
+ as artifacts.
82
83
  ball_xy_size : float
83
- Size of the XY ball used for filtering in physical units.
84
-
84
+ 3d filter's in-plane (xy) filter ball size (microns).
85
85
  ball_z_size : float
86
- Size of the Z ball used for filtering in physical units.
87
-
86
+ 3d filter's axial (z) filter ball size (microns).
88
87
  ball_overlap_fraction : float
89
- Fraction of overlap allowed between balls.
90
-
88
+ 3d filter's fraction of the ball filter needed to be filled by
89
+ foreground voxels, centered on a voxel, to retain the voxel.
91
90
  soma_spread_factor : float
92
- Spread factor for soma size.
93
-
91
+ Cell spread factor for determining the largest cell volume before
92
+ splitting up cell clusters. Structures with spherical volume of
93
+ diameter `soma_spread_factor * soma_diameter` or less will not be
94
+ split.
94
95
  n_free_cpus : int
95
- Number of free CPU cores available for parallel processing.
96
-
96
+ How many CPU cores to leave free.
97
97
  log_sigma_size : float
98
- Size of the sigma for the log filter.
99
-
98
+ Gaussian filter width (as a fraction of soma diameter) used during
99
+ 2d in-plane Laplacian of Gaussian filtering.
100
100
  n_sds_above_mean_thresh : float
101
- Number of standard deviations above the mean threshold.
102
-
101
+ Per-plane intensity threshold (the number of standard deviations
102
+ above the mean) of the filtered 2d planes used to mark pixels as
103
+ foreground or background.
103
104
  outlier_keep : bool, optional
104
105
  Whether to keep outliers during detection. Defaults to False.
105
-
106
106
  artifact_keep : bool, optional
107
107
  Whether to keep artifacts during detection. Defaults to False.
108
-
109
108
  save_planes : bool, optional
110
109
  Whether to save the planes during detection. Defaults to False.
111
-
112
110
  plane_directory : str, optional
113
111
  Directory path to save the planes. Defaults to None.
114
-
115
- batch_size : int, optional
116
- The number of planes to process in each batch. Defaults to 1.
117
- For CPU, there's no benefit for a larger batch size. Only a memory
118
- usage increase. For CUDA, the larger the batch size the better the
119
- performance. Until it fills up the GPU memory - after which it
120
- becomes slower.
121
-
112
+ batch_size: int
113
+ The number of planes of the original data volume to process at
114
+ once. The GPU/CPU memory must be able to contain this many planes
115
+ for all the filters. For performance-critical applications, tune to
116
+ maximize memory usage without running out. Check your GPU/CPU memory
117
+ to verify it's not full.
122
118
  torch_device : str, optional
123
119
  The device on which to run the computation. If not specified (None),
124
120
  "cuda" will be used if a GPU is available, otherwise "cpu".
125
121
  You can also manually specify "cuda" or "cpu".
126
-
122
+ pin_memory: bool
123
+ Pins data to be sent to the GPU to the CPU memory. This allows faster
124
+ GPU data speeds, but can only be used if the data used by the GPU can
125
+ stay in the CPU RAM while the GPU uses it. I.e. there's enough RAM.
126
+ Otherwise, if there's a risk of the RAM being paged, it shouldn't be
127
+ used. Defaults to False.
128
+ split_ball_xy_size: float
129
+ Similar to `ball_xy_size`, except the value to use for the 3d
130
+ filter during cluster splitting.
131
+ split_ball_z_size: float
132
+ Similar to `ball_z_size`, except the value to use for the 3d filter
133
+ during cluster splitting.
134
+ split_ball_overlap_fraction: float
135
+ Similar to `ball_overlap_fraction`, except the value to use for the
136
+ 3d filter during cluster splitting.
137
+ n_splitting_iter: int
138
+ The number of iterations to run the 3d filtering on a cluster. Each
139
+ iteration reduces the cluster size by the voxels not retained in
140
+ the previous iteration.
141
+ n_sds_above_mean_tiled_thresh : float
142
+ Per-plane, per-tile intensity threshold (the number of standard
143
+ deviations above the mean) for the filtered 2d planes used to mark
144
+ pixels as foreground or background. When used, (tile size is not zero)
145
+ a pixel is marked as foreground if its intensity is above both the
146
+ per-plane and per-tile threshold. I.e. it's above the set number of
147
+ standard deviations of the per-plane average and of the per-plane
148
+ per-tile average for the tile that contains it.
149
+ tiled_thresh_tile_size : float
150
+ The tile size used to tile the x, y plane to calculate the local
151
+ average intensity for the tiled threshold. The value is multiplied
152
+ by soma diameter (i.e. 1 means one soma diameter). If zero or None, the
153
+ tiled threshold is disabled and only the per-plane threshold is used.
154
+ Tiling is done with 50% overlap when striding.
127
155
  callback : Callable[int], optional
128
156
  A callback function that is called every time a plane has finished
129
157
  being processed. Called with the plane number that has finished.
@@ -131,7 +159,7 @@ def main(
131
159
  Returns
132
160
  -------
133
161
  List[Cell]
134
- List of detected cells.
162
+ List of detected cell candidates.
135
163
  """
136
164
  start_time = datetime.now()
137
165
  if torch_device is None:
@@ -181,25 +209,24 @@ def main(
181
209
  ball_overlap_fraction=ball_overlap_fraction,
182
210
  log_sigma_size=log_sigma_size,
183
211
  n_sds_above_mean_thresh=n_sds_above_mean_thresh,
212
+ n_sds_above_mean_tiled_thresh=n_sds_above_mean_tiled_thresh,
213
+ tiled_thresh_tile_size=tiled_thresh_tile_size,
184
214
  outlier_keep=outlier_keep,
185
215
  artifact_keep=artifact_keep,
186
216
  save_planes=save_planes,
187
217
  plane_directory=plane_directory,
188
218
  batch_size=batch_size,
189
219
  torch_device=torch_device,
220
+ pin_memory=pin_memory,
221
+ n_splitting_iter=n_splitting_iter,
190
222
  )
191
223
 
192
224
  # replicate the settings specific to splitting, before we access anything
193
225
  # of the original settings, causing cached properties
194
226
  kwargs = dataclasses.asdict(settings)
195
- kwargs["ball_z_size_um"] = split_ball_z_size * settings.z_pixel_size
196
- kwargs["ball_xy_size_um"] = (
197
- split_ball_xy_size * settings.in_plane_pixel_size
198
- )
227
+ kwargs["ball_z_size_um"] = split_ball_z_size
228
+ kwargs["ball_xy_size_um"] = split_ball_xy_size
199
229
  kwargs["ball_overlap_fraction"] = split_ball_overlap_fraction
200
- kwargs["soma_diameter_um"] = (
201
- split_soma_diameter * settings.in_plane_pixel_size
202
- )
203
230
  # always run on cpu because copying to gpu overhead is likely slower than
204
231
  # any benefit for detection on smallish volumes
205
232
  kwargs["torch_device"] = "cpu"
@@ -219,7 +246,9 @@ def main(
219
246
  plane_shape=settings.plane_shape,
220
247
  clipping_value=settings.clipping_value,
221
248
  threshold_value=settings.threshold_value,
222
- n_sds_above_mean_thresh=n_sds_above_mean_thresh,
249
+ n_sds_above_mean_thresh=settings.n_sds_above_mean_thresh,
250
+ n_sds_above_mean_tiled_thresh=settings.n_sds_above_mean_tiled_thresh,
251
+ tiled_thresh_tile_size=settings.tiled_thresh_tile_size,
223
252
  log_sigma_size=log_sigma_size,
224
253
  soma_diameter=settings.soma_diameter,
225
254
  torch_device=torch_device,
@@ -1,13 +1,12 @@
1
- from dataclasses import dataclass, field
2
1
  from typing import Tuple
3
2
 
4
3
  import torch
4
+ import torch.nn.functional as F
5
5
 
6
6
  from cellfinder.core.detect.filters.plane.classical_filter import PeakEnhancer
7
7
  from cellfinder.core.detect.filters.plane.tile_walker import TileWalker
8
8
 
9
9
 
10
- @dataclass
11
10
  class TileProcessor:
12
11
  """
13
12
  Processor that filters each plane to highlight the peaks and also
@@ -39,7 +38,7 @@ class TileProcessor:
39
38
  Number of standard deviations above the mean threshold to use for
40
39
  determining whether a voxel is bright.
41
40
  log_sigma_size : float
42
- Size of the sigma for the gaussian filter.
41
+ Size of the Gaussian sigma for the Laplacian of Gaussian filtering.
43
42
  soma_diameter : float
44
43
  Diameter of the soma in voxels.
45
44
  torch_device: str
@@ -63,12 +62,22 @@ class TileProcessor:
63
62
  # voxels who are this many std above mean or more are set to
64
63
  # threshold_value
65
64
  n_sds_above_mean_thresh: float
65
+ # If used, voxels who are this many or more std above mean of the
66
+ # containing tile as well as above n_sds_above_mean_thresh for the plane
67
+ # average are set to threshold_value.
68
+ n_sds_above_mean_tiled_thresh: float
69
+ # the tile size, in pixels, that will be used to tile the x, y plane when
70
+ # we calculate the per-tile mean / std for use with
71
+ # n_sds_above_mean_tiled_thresh. We use 50% overlap when tiling.
72
+ local_threshold_tile_size_px: int = 0
73
+ # the torch device name
74
+ torch_device: str = ""
66
75
 
67
76
  # filter that finds the peaks in the planes
68
- peak_enhancer: PeakEnhancer = field(init=False)
77
+ peak_enhancer: PeakEnhancer = None
69
78
  # generates tiles of the planes, with each tile marked as being inside
70
79
  # or outside the brain based on brightness
71
- tile_walker: TileWalker = field(init=False)
80
+ tile_walker: TileWalker = None
72
81
 
73
82
  def __init__(
74
83
  self,
@@ -76,6 +85,8 @@ class TileProcessor:
76
85
  clipping_value: int,
77
86
  threshold_value: int,
78
87
  n_sds_above_mean_thresh: float,
88
+ n_sds_above_mean_tiled_thresh: float,
89
+ tiled_thresh_tile_size: float | None,
79
90
  log_sigma_size: float,
80
91
  soma_diameter: int,
81
92
  torch_device: str,
@@ -85,6 +96,12 @@ class TileProcessor:
85
96
  self.clipping_value = clipping_value
86
97
  self.threshold_value = threshold_value
87
98
  self.n_sds_above_mean_thresh = n_sds_above_mean_thresh
99
+ self.n_sds_above_mean_tiled_thresh = n_sds_above_mean_tiled_thresh
100
+ if tiled_thresh_tile_size:
101
+ self.local_threshold_tile_size_px = int(
102
+ round(soma_diameter * tiled_thresh_tile_size)
103
+ )
104
+ self.torch_device = torch_device
88
105
 
89
106
  laplace_gaussian_sigma = log_sigma_size * soma_diameter
90
107
  self.peak_enhancer = PeakEnhancer(
@@ -131,7 +148,10 @@ class TileProcessor:
131
148
  planes,
132
149
  enhanced_planes,
133
150
  self.n_sds_above_mean_thresh,
151
+ self.n_sds_above_mean_tiled_thresh,
152
+ self.local_threshold_tile_size_px,
134
153
  self.threshold_value,
154
+ self.torch_device,
135
155
  )
136
156
 
137
157
  return planes, inside_brain_tiles
@@ -145,21 +165,98 @@ def _threshold_planes(
145
165
  planes: torch.Tensor,
146
166
  enhanced_planes: torch.Tensor,
147
167
  n_sds_above_mean_thresh: float,
168
+ n_sds_above_mean_tiled_thresh: float,
169
+ local_threshold_tile_size_px: int,
148
170
  threshold_value: int,
171
+ torch_device: str,
149
172
  ) -> None:
150
173
  """
151
174
  Sets each plane (in-place) to threshold_value, where the corresponding
152
175
  enhanced_plane > mean + n_sds_above_mean_thresh*std. Each plane will be
153
176
  set to zero elsewhere.
154
177
  """
155
- planes_1d = enhanced_planes.view(enhanced_planes.shape[0], -1)
178
+ z, y, x = enhanced_planes.shape
156
179
 
180
+ # ---- get per-plane global threshold ----
181
+ planes_1d = enhanced_planes.view(z, -1)
157
182
  # add back last dim
158
- avg = torch.mean(planes_1d, dim=1, keepdim=True).unsqueeze(2)
159
- sd = torch.std(planes_1d, dim=1, keepdim=True).unsqueeze(2)
160
- threshold = avg + n_sds_above_mean_thresh * sd
183
+ std, mean = torch.std_mean(planes_1d, dim=1, keepdim=True)
184
+ threshold = mean.unsqueeze(2) + n_sds_above_mean_thresh * std.unsqueeze(2)
185
+ above_global = enhanced_planes > threshold
186
+
187
+ # ---- calculate the local tiled threshold ----
188
+ # we do 50% overlap so there's no jumps at boundaries
189
+ stride = local_threshold_tile_size_px // 2
190
+ # make tile even for ease of computation
191
+ tile_size = stride * 2
192
+ # Due to 50% overlap, to get tiles we move the tile by half tile (stride).
193
+ # Total moves will be y // stride - 2 (we start already with mask on first
194
+ # tile). So add back 1 for the first tile. Partial tiles are dropped
195
+ n_y_tiles = max(y // stride - 1, 1) if stride else 1
196
+ n_x_tiles = max(x // stride - 1, 1) if stride else 1
197
+ do_tile_y = n_y_tiles >= 2
198
+ do_tile_x = n_x_tiles >= 2
199
+ # we want at least one axis to have at least two tiles
200
+ if local_threshold_tile_size_px >= 2 and (do_tile_y or do_tile_x):
201
+ # num edge pixels dropped b/c moving by stride would move tile off edge
202
+ y_rem = y % stride
203
+ x_rem = x % stride
204
+ enhanced_planes_raw = enhanced_planes
205
+ if do_tile_y:
206
+ enhanced_planes = enhanced_planes[:, y_rem // 2 :, :]
207
+ if do_tile_x:
208
+ enhanced_planes = enhanced_planes[:, :, x_rem // 2 :]
209
+
210
+ # add empty channel dim after z "batch" dim -> zcyx
211
+ enhanced_planes = enhanced_planes.unsqueeze(1)
212
+ # unfold makes it 3 dim, z, M, L. L is number of tiles, M is tile area
213
+ unfolded = F.unfold(
214
+ enhanced_planes,
215
+ (tile_size if do_tile_y else y, tile_size if do_tile_x else x),
216
+ stride=stride,
217
+ )
218
+ # average the tile areas, for each tile
219
+ std, mean = torch.std_mean(unfolded, dim=1, keepdim=True)
220
+ threshold = mean + n_sds_above_mean_tiled_thresh * std
221
+
222
+ # reshape it back into Y by X tiles, instead of YX being one dim
223
+ threshold = threshold.reshape((z, n_y_tiles, n_x_tiles))
224
+
225
+ # we need total size of n_tiles * stride + stride + rem for the
226
+ # original size. So we add 2 strides and then chop off the excess above
227
+ # rem. We center it because of 50% overlap, the first tile is actually
228
+ # centered in between the first two strides
229
+ offsets = [(0, y), (0, x)]
230
+ for dim, do_tile, n_tiles, n, rem in [
231
+ (1, do_tile_y, n_y_tiles, y, y_rem),
232
+ (2, do_tile_x, n_x_tiles, x, x_rem),
233
+ ]:
234
+ if do_tile:
235
+ repeats = (
236
+ torch.ones(n_tiles, dtype=torch.int, device=torch_device)
237
+ * stride
238
+ )
239
+ # add total of 2 additional strides
240
+ repeats[0] = 2 * stride
241
+ repeats[-1] = 2 * stride
242
+ output_size = (n_tiles + 2) * stride
243
+
244
+ threshold = threshold.repeat_interleave(
245
+ repeats, dim=dim, output_size=output_size
246
+ )
247
+ # drop the excess we gained from padding rem to whole stride
248
+ offset = (stride - rem) // 2
249
+ offsets[dim - 1] = offset, n + offset
250
+
251
+ # can't use slice(...) objects in jit code so use actual indices
252
+ (a, b), (c, d) = offsets
253
+ threshold = threshold[:, a:b, c:d]
254
+
255
+ above_local = enhanced_planes_raw > threshold
256
+ above = torch.logical_and(above_global, above_local)
257
+ else:
258
+ above = above_global
161
259
 
162
- above = enhanced_planes > threshold
163
260
  planes[above] = threshold_value
164
261
  # subsequent steps only care about the values that are set to threshold or
165
262
  # above in planes. We set values in *planes* to threshold based on the