cellfinder 1.1.3__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cellfinder might be problematic. Click here for more details.
- cellfinder/__init__.py +21 -12
- cellfinder/core/classify/classify.py +13 -6
- cellfinder/core/classify/cube_generator.py +27 -11
- cellfinder/core/classify/resnet.py +9 -6
- cellfinder/core/classify/tools.py +13 -11
- cellfinder/core/detect/detect.py +12 -1
- cellfinder/core/detect/filters/volume/ball_filter.py +198 -113
- cellfinder/core/detect/filters/volume/structure_detection.py +105 -41
- cellfinder/core/detect/filters/volume/structure_splitting.py +1 -1
- cellfinder/core/detect/filters/volume/volume_filter.py +48 -49
- cellfinder/core/download/cli.py +39 -32
- cellfinder/core/download/download.py +44 -56
- cellfinder/core/main.py +53 -68
- cellfinder/core/tools/prep.py +12 -20
- cellfinder/core/tools/source_files.py +5 -3
- cellfinder/core/tools/system.py +10 -0
- cellfinder/core/train/train_yml.py +29 -27
- cellfinder/napari/curation.py +1 -1
- cellfinder/napari/detect/detect.py +259 -58
- cellfinder/napari/detect/detect_containers.py +11 -1
- cellfinder/napari/detect/thread_worker.py +16 -2
- cellfinder/napari/train/train.py +2 -9
- cellfinder/napari/train/train_containers.py +3 -3
- cellfinder/napari/utils.py +88 -47
- {cellfinder-1.1.3.dist-info → cellfinder-1.3.0.dist-info}/METADATA +12 -11
- {cellfinder-1.1.3.dist-info → cellfinder-1.3.0.dist-info}/RECORD +30 -34
- cellfinder/core/download/models.py +0 -49
- cellfinder/core/tools/IO.py +0 -48
- cellfinder/core/tools/tf.py +0 -46
- cellfinder/napari/images/brainglobe.png +0 -0
- {cellfinder-1.1.3.dist-info → cellfinder-1.3.0.dist-info}/LICENSE +0 -0
- {cellfinder-1.1.3.dist-info → cellfinder-1.3.0.dist-info}/WHEEL +0 -0
- {cellfinder-1.1.3.dist-info → cellfinder-1.3.0.dist-info}/entry_points.txt +0 -0
- {cellfinder-1.1.3.dist-info → cellfinder-1.3.0.dist-info}/top_level.txt +0 -0
cellfinder/__init__.py
CHANGED
|
@@ -1,24 +1,33 @@
|
|
|
1
|
+
import os
|
|
1
2
|
from importlib.metadata import PackageNotFoundError, version
|
|
3
|
+
from pathlib import Path
|
|
2
4
|
|
|
5
|
+
# Check cellfinder is installed
|
|
3
6
|
try:
|
|
4
7
|
__version__ = version("cellfinder")
|
|
5
8
|
except PackageNotFoundError as e:
|
|
6
9
|
raise PackageNotFoundError("cellfinder package not installed") from e
|
|
7
10
|
|
|
8
|
-
# If
|
|
11
|
+
# If Keras is not present, tools cannot be used.
|
|
9
12
|
# Throw an error in this case to prevent invocation of functions.
|
|
10
13
|
try:
|
|
11
|
-
|
|
14
|
+
KERAS_VERSION = version("keras")
|
|
12
15
|
except PackageNotFoundError as e:
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
16
|
+
raise PackageNotFoundError(
|
|
17
|
+
f"cellfinder tools cannot be invoked without Keras. "
|
|
18
|
+
f"Please install Keras with a backend into your environment "
|
|
19
|
+
f"to use cellfinder tools. "
|
|
20
|
+
f"For more information on Keras backends, please see "
|
|
21
|
+
f"https://keras.io/getting_started/#installing-keras-3."
|
|
22
|
+
f"For more information on brainglobe, please see "
|
|
23
|
+
f"https://github.com/brainglobe/brainglobe-meta#readme."
|
|
24
|
+
) from e
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# Set the Keras backend to torch
|
|
28
|
+
os.environ["KERAS_BACKEND"] = "torch"
|
|
29
|
+
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
|
22
30
|
|
|
23
|
-
__author__ = "Adam Tyson, Christian Niedworok, Charly Rousseau"
|
|
24
31
|
__license__ = "BSD-3-Clause"
|
|
32
|
+
|
|
33
|
+
DEFAULT_CELLFINDER_DIRECTORY = Path.home() / ".brainglobe" / "cellfinder"
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import os
|
|
2
|
+
from datetime import datetime
|
|
2
3
|
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
3
4
|
|
|
5
|
+
import keras
|
|
4
6
|
import numpy as np
|
|
5
7
|
from brainglobe_utils.cells.cells import Cell
|
|
6
8
|
from brainglobe_utils.general.system import get_num_processes
|
|
7
|
-
from tensorflow import keras
|
|
8
9
|
|
|
9
10
|
from cellfinder.core import logger, types
|
|
10
11
|
from cellfinder.core.classify.cube_generator import CubeGeneratorFromFile
|
|
@@ -48,9 +49,9 @@ def main(
|
|
|
48
49
|
callbacks = None
|
|
49
50
|
|
|
50
51
|
# Too many workers doesn't increase speed, and uses huge amounts of RAM
|
|
51
|
-
workers = get_num_processes(
|
|
52
|
-
|
|
53
|
-
)
|
|
52
|
+
workers = get_num_processes(min_free_cpu_cores=n_free_cpus)
|
|
53
|
+
|
|
54
|
+
start_time = datetime.now()
|
|
54
55
|
|
|
55
56
|
logger.debug("Initialising cube generator")
|
|
56
57
|
inference_generator = CubeGeneratorFromFile(
|
|
@@ -63,6 +64,8 @@ def main(
|
|
|
63
64
|
cube_width=cube_width,
|
|
64
65
|
cube_height=cube_height,
|
|
65
66
|
cube_depth=cube_depth,
|
|
67
|
+
use_multiprocessing=False,
|
|
68
|
+
workers=workers,
|
|
66
69
|
)
|
|
67
70
|
|
|
68
71
|
model = get_model(
|
|
@@ -73,10 +76,9 @@ def main(
|
|
|
73
76
|
)
|
|
74
77
|
|
|
75
78
|
logger.info("Running inference")
|
|
79
|
+
# in Keras 3.0 multiprocessing params are specified in the generator
|
|
76
80
|
predictions = model.predict(
|
|
77
81
|
inference_generator,
|
|
78
|
-
use_multiprocessing=True,
|
|
79
|
-
workers=workers,
|
|
80
82
|
verbose=True,
|
|
81
83
|
callbacks=callbacks,
|
|
82
84
|
)
|
|
@@ -91,6 +93,11 @@ def main(
|
|
|
91
93
|
cell.type = predictions[idx] + 1
|
|
92
94
|
points_list.append(cell)
|
|
93
95
|
|
|
96
|
+
time_elapsed = datetime.now() - start_time
|
|
97
|
+
print(
|
|
98
|
+
"Classfication complete - all points done in : {}".format(time_elapsed)
|
|
99
|
+
)
|
|
100
|
+
|
|
94
101
|
return points_list
|
|
95
102
|
|
|
96
103
|
|
|
@@ -2,13 +2,13 @@ from pathlib import Path
|
|
|
2
2
|
from random import shuffle
|
|
3
3
|
from typing import Dict, List, Optional, Tuple, Union
|
|
4
4
|
|
|
5
|
+
import keras
|
|
5
6
|
import numpy as np
|
|
6
|
-
import tensorflow as tf
|
|
7
7
|
from brainglobe_utils.cells.cells import Cell, group_cells_by_z
|
|
8
8
|
from brainglobe_utils.general.numerical import is_even
|
|
9
|
+
from keras.utils import Sequence
|
|
9
10
|
from scipy.ndimage import zoom
|
|
10
11
|
from skimage.io import imread
|
|
11
|
-
from tensorflow.keras.utils import Sequence
|
|
12
12
|
|
|
13
13
|
from cellfinder.core import types
|
|
14
14
|
from cellfinder.core.classify.augment import AugmentationParameters, augment
|
|
@@ -40,7 +40,7 @@ class CubeGeneratorFromFile(Sequence):
|
|
|
40
40
|
background_array: types.array,
|
|
41
41
|
voxel_sizes: Tuple[int, int, int],
|
|
42
42
|
network_voxel_sizes: Tuple[int, int, int],
|
|
43
|
-
batch_size: int =
|
|
43
|
+
batch_size: int = 64,
|
|
44
44
|
cube_width: int = 50,
|
|
45
45
|
cube_height: int = 50,
|
|
46
46
|
cube_depth: int = 20,
|
|
@@ -56,7 +56,14 @@ class CubeGeneratorFromFile(Sequence):
|
|
|
56
56
|
translate: Tuple[float, float, float] = (0.05, 0.05, 0.05),
|
|
57
57
|
shuffle: bool = False,
|
|
58
58
|
interpolation_order: int = 2,
|
|
59
|
+
*args,
|
|
60
|
+
**kwargs,
|
|
59
61
|
):
|
|
62
|
+
# pass any additional arguments not specified in signature to the
|
|
63
|
+
# constructor of the superclass (e.g.: `use_multiprocessing` or
|
|
64
|
+
# `workers`)
|
|
65
|
+
super().__init__(*args, **kwargs)
|
|
66
|
+
|
|
60
67
|
self.points = points
|
|
61
68
|
self.signal_array = signal_array
|
|
62
69
|
self.background_array = background_array
|
|
@@ -218,10 +225,10 @@ class CubeGeneratorFromFile(Sequence):
|
|
|
218
225
|
|
|
219
226
|
if self.train:
|
|
220
227
|
batch_labels = [cell.type - 1 for cell in cell_batch]
|
|
221
|
-
batch_labels =
|
|
228
|
+
batch_labels = keras.utils.to_categorical(
|
|
222
229
|
batch_labels, num_classes=self.classes
|
|
223
230
|
)
|
|
224
|
-
return images, batch_labels
|
|
231
|
+
return images, batch_labels.astype(np.float32)
|
|
225
232
|
elif self.extract:
|
|
226
233
|
batch_info = self.__get_batch_dict(cell_batch)
|
|
227
234
|
return images, batch_info
|
|
@@ -252,7 +259,8 @@ class CubeGeneratorFromFile(Sequence):
|
|
|
252
259
|
(number_images,)
|
|
253
260
|
+ (self.cube_height, self.cube_width, self.cube_depth)
|
|
254
261
|
+ (self.channels,)
|
|
255
|
-
)
|
|
262
|
+
),
|
|
263
|
+
dtype=np.float32,
|
|
256
264
|
)
|
|
257
265
|
|
|
258
266
|
for idx, cell in enumerate(cell_batch):
|
|
@@ -337,7 +345,7 @@ class CubeGeneratorFromDisk(Sequence):
|
|
|
337
345
|
signal_list: List[Union[str, Path]],
|
|
338
346
|
background_list: List[Union[str, Path]],
|
|
339
347
|
labels: Optional[List[int]] = None, # only if training or validating
|
|
340
|
-
batch_size: int =
|
|
348
|
+
batch_size: int = 64,
|
|
341
349
|
shape: Tuple[int, int, int] = (50, 50, 20),
|
|
342
350
|
channels: int = 2,
|
|
343
351
|
classes: int = 2,
|
|
@@ -350,7 +358,14 @@ class CubeGeneratorFromDisk(Sequence):
|
|
|
350
358
|
translate: Tuple[float, float, float] = (0.2, 0.2, 0.2),
|
|
351
359
|
train: bool = False, # also return labels
|
|
352
360
|
interpolation_order: int = 2,
|
|
361
|
+
*args,
|
|
362
|
+
**kwargs,
|
|
353
363
|
):
|
|
364
|
+
# pass any additional arguments not specified in signature to the
|
|
365
|
+
# constructor of the superclass (e.g.: `use_multiprocessing` or
|
|
366
|
+
# `workers`)
|
|
367
|
+
super().__init__(*args, **kwargs)
|
|
368
|
+
|
|
354
369
|
self.im_shape = shape
|
|
355
370
|
self.batch_size = batch_size
|
|
356
371
|
self.labels = labels
|
|
@@ -410,10 +425,10 @@ class CubeGeneratorFromDisk(Sequence):
|
|
|
410
425
|
|
|
411
426
|
if self.train and self.labels is not None:
|
|
412
427
|
batch_labels = [self.labels[k] for k in indexes]
|
|
413
|
-
batch_labels =
|
|
428
|
+
batch_labels = keras.utils.to_categorical(
|
|
414
429
|
batch_labels, num_classes=self.classes
|
|
415
430
|
)
|
|
416
|
-
return images, batch_labels
|
|
431
|
+
return images, batch_labels.astype(np.float32)
|
|
417
432
|
else:
|
|
418
433
|
return images
|
|
419
434
|
|
|
@@ -424,7 +439,8 @@ class CubeGeneratorFromDisk(Sequence):
|
|
|
424
439
|
) -> np.ndarray:
|
|
425
440
|
number_images = len(list_signal_tmp)
|
|
426
441
|
images = np.empty(
|
|
427
|
-
((number_images,) + self.im_shape + (self.channels,))
|
|
442
|
+
((number_images,) + self.im_shape + (self.channels,)),
|
|
443
|
+
dtype=np.float32,
|
|
428
444
|
)
|
|
429
445
|
|
|
430
446
|
for idx, signal_im in enumerate(list_signal_tmp):
|
|
@@ -433,7 +449,7 @@ class CubeGeneratorFromDisk(Sequence):
|
|
|
433
449
|
images, idx, signal_im, background_im
|
|
434
450
|
)
|
|
435
451
|
|
|
436
|
-
return images
|
|
452
|
+
return images
|
|
437
453
|
|
|
438
454
|
def __populate_array_with_cubes(
|
|
439
455
|
self,
|
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
from typing import Callable, Dict, List, Literal, Optional, Tuple, Union
|
|
2
2
|
|
|
3
|
-
from
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
from
|
|
3
|
+
from keras import (
|
|
4
|
+
KerasTensor as Tensor,
|
|
5
|
+
)
|
|
6
|
+
from keras import Model
|
|
7
|
+
from keras.initializers import Initializer
|
|
8
|
+
from keras.layers import (
|
|
7
9
|
Activation,
|
|
8
10
|
Add,
|
|
9
11
|
BatchNormalization,
|
|
@@ -14,7 +16,7 @@ from tensorflow.keras.layers import (
|
|
|
14
16
|
MaxPooling3D,
|
|
15
17
|
ZeroPadding3D,
|
|
16
18
|
)
|
|
17
|
-
from
|
|
19
|
+
from keras.optimizers import Adam, Optimizer
|
|
18
20
|
|
|
19
21
|
#####################################################################
|
|
20
22
|
# Define the types of ResNet
|
|
@@ -113,7 +115,7 @@ def non_residual_block(
|
|
|
113
115
|
activation: str = "relu",
|
|
114
116
|
use_bias: bool = False,
|
|
115
117
|
bn_epsilon: float = 1e-5,
|
|
116
|
-
pooling_padding: str = "
|
|
118
|
+
pooling_padding: str = "valid",
|
|
117
119
|
axis: int = 3,
|
|
118
120
|
) -> Tensor:
|
|
119
121
|
"""
|
|
@@ -131,6 +133,7 @@ def non_residual_block(
|
|
|
131
133
|
)(x)
|
|
132
134
|
x = BatchNormalization(axis=axis, epsilon=bn_epsilon, name="conv1_bn")(x)
|
|
133
135
|
x = Activation(activation, name="conv1_activation")(x)
|
|
136
|
+
|
|
134
137
|
x = MaxPooling3D(
|
|
135
138
|
max_pool_size,
|
|
136
139
|
strides=strides,
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import os
|
|
2
|
-
from
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
from typing import List, Optional, Tuple, Union
|
|
3
4
|
|
|
5
|
+
import keras
|
|
4
6
|
import numpy as np
|
|
5
|
-
|
|
6
|
-
from tensorflow.keras import Model
|
|
7
|
+
from keras import Model
|
|
7
8
|
|
|
8
9
|
from cellfinder.core import logger
|
|
9
10
|
from cellfinder.core.classify.resnet import build_model, layer_type
|
|
@@ -17,8 +18,7 @@ def get_model(
|
|
|
17
18
|
inference: bool = False,
|
|
18
19
|
continue_training: bool = False,
|
|
19
20
|
) -> Model:
|
|
20
|
-
"""
|
|
21
|
-
Returns the correct model based on the arguments passed
|
|
21
|
+
"""Returns the correct model based on the arguments passed
|
|
22
22
|
:param existing_model: An existing, trained model. This is returned if it
|
|
23
23
|
exists
|
|
24
24
|
:param model_weights: This file is used to set the model weights if it
|
|
@@ -30,29 +30,31 @@ def get_model(
|
|
|
30
30
|
by using the default one
|
|
31
31
|
:param continue_training: If True, will ensure that a trained model
|
|
32
32
|
exists. E.g. by using the default one
|
|
33
|
-
:return: A
|
|
33
|
+
:return: A keras model
|
|
34
34
|
|
|
35
35
|
"""
|
|
36
36
|
if existing_model is not None or network_depth is None:
|
|
37
37
|
logger.debug(f"Loading model: {existing_model}")
|
|
38
|
-
return
|
|
38
|
+
return keras.models.load_model(existing_model)
|
|
39
39
|
else:
|
|
40
40
|
logger.debug(f"Creating a new instance of model: {network_depth}")
|
|
41
41
|
model = build_model(
|
|
42
|
-
network_depth=network_depth,
|
|
42
|
+
network_depth=network_depth,
|
|
43
|
+
learning_rate=learning_rate,
|
|
43
44
|
)
|
|
44
45
|
if inference or continue_training:
|
|
45
46
|
logger.debug(
|
|
46
|
-
f"Setting model weights according to: {model_weights}"
|
|
47
|
+
f"Setting model weights according to: {model_weights}",
|
|
47
48
|
)
|
|
48
49
|
if model_weights is None:
|
|
49
|
-
raise
|
|
50
|
+
raise OSError("`model_weights` must be provided")
|
|
50
51
|
model.load_weights(model_weights)
|
|
51
52
|
return model
|
|
52
53
|
|
|
53
54
|
|
|
54
55
|
def make_lists(
|
|
55
|
-
tiff_files: Sequence,
|
|
56
|
+
tiff_files: Sequence,
|
|
57
|
+
train: bool = True,
|
|
56
58
|
) -> Union[Tuple[List, List], Tuple[List, List, np.ndarray]]:
|
|
57
59
|
signal_list = []
|
|
58
60
|
background_list = []
|
cellfinder/core/detect/detect.py
CHANGED
|
@@ -22,6 +22,7 @@ from typing import Callable, List, Optional, Sequence, Tuple, TypeVar
|
|
|
22
22
|
import numpy as np
|
|
23
23
|
from brainglobe_utils.cells.cells import Cell
|
|
24
24
|
from brainglobe_utils.general.system import get_num_processes
|
|
25
|
+
from numba import set_num_threads
|
|
25
26
|
|
|
26
27
|
from cellfinder.core import logger, types
|
|
27
28
|
from cellfinder.core.detect.filters.plane import TileProcessor
|
|
@@ -157,6 +158,13 @@ def main(
|
|
|
157
158
|
)
|
|
158
159
|
n_processes = get_num_processes(min_free_cpu_cores=n_free_cpus)
|
|
159
160
|
n_ball_procs = max(n_processes - 1, 1)
|
|
161
|
+
|
|
162
|
+
# we parallelize 2d filtering, which typically lags behind the 3d
|
|
163
|
+
# processing so for n_ball_procs 2d filtering threads, ball_z_size will
|
|
164
|
+
# typically be in use while the others stall waiting for 3d processing
|
|
165
|
+
# so we can use those for other things, such as numba threading
|
|
166
|
+
set_num_threads(max(n_ball_procs - int(ball_z_size), 1))
|
|
167
|
+
|
|
160
168
|
start_time = datetime.now()
|
|
161
169
|
|
|
162
170
|
(
|
|
@@ -236,7 +244,10 @@ def main(
|
|
|
236
244
|
# then 3D filtering has finished. As batches of planes are filtered
|
|
237
245
|
# by the 3D filter, it releases the locks of subsequent 2D filter
|
|
238
246
|
# processes.
|
|
239
|
-
|
|
247
|
+
mp_3d_filter.process(async_results, locks, callback=callback)
|
|
248
|
+
|
|
249
|
+
# it's now done filtering, get results with pool
|
|
250
|
+
cells = mp_3d_filter.get_results(worker_pool)
|
|
240
251
|
|
|
241
252
|
time_elapsed = datetime.now() - start_time
|
|
242
253
|
logger.debug(
|