careamics 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of careamics might be problematic. Click here for more details.
- careamics/__init__.py +6 -1
- careamics/careamist.py +729 -0
- careamics/config/__init__.py +39 -0
- careamics/config/architectures/__init__.py +17 -0
- careamics/config/architectures/architecture_model.py +37 -0
- careamics/config/architectures/custom_model.py +162 -0
- careamics/config/architectures/lvae_model.py +174 -0
- careamics/config/architectures/register_model.py +103 -0
- careamics/config/architectures/unet_model.py +118 -0
- careamics/config/callback_model.py +123 -0
- careamics/config/configuration_factory.py +583 -0
- careamics/config/configuration_model.py +604 -0
- careamics/config/data_model.py +527 -0
- careamics/config/fcn_algorithm_model.py +147 -0
- careamics/config/inference_model.py +239 -0
- careamics/config/likelihood_model.py +43 -0
- careamics/config/nm_model.py +101 -0
- careamics/config/optimizer_models.py +187 -0
- careamics/config/references/__init__.py +45 -0
- careamics/config/references/algorithm_descriptions.py +132 -0
- careamics/config/references/references.py +39 -0
- careamics/config/support/__init__.py +31 -0
- careamics/config/support/supported_activations.py +27 -0
- careamics/config/support/supported_algorithms.py +33 -0
- careamics/config/support/supported_architectures.py +17 -0
- careamics/config/support/supported_data.py +109 -0
- careamics/config/support/supported_loggers.py +10 -0
- careamics/config/support/supported_losses.py +29 -0
- careamics/config/support/supported_optimizers.py +57 -0
- careamics/config/support/supported_pixel_manipulations.py +15 -0
- careamics/config/support/supported_struct_axis.py +21 -0
- careamics/config/support/supported_transforms.py +11 -0
- careamics/config/tile_information.py +65 -0
- careamics/config/training_model.py +72 -0
- careamics/config/transformations/__init__.py +15 -0
- careamics/config/transformations/n2v_manipulate_model.py +64 -0
- careamics/config/transformations/normalize_model.py +60 -0
- careamics/config/transformations/transform_model.py +45 -0
- careamics/config/transformations/xy_flip_model.py +43 -0
- careamics/config/transformations/xy_random_rotate90_model.py +35 -0
- careamics/config/vae_algorithm_model.py +171 -0
- careamics/config/validators/__init__.py +5 -0
- careamics/config/validators/validator_utils.py +101 -0
- careamics/conftest.py +39 -0
- careamics/dataset/__init__.py +17 -0
- careamics/dataset/dataset_utils/__init__.py +19 -0
- careamics/dataset/dataset_utils/dataset_utils.py +101 -0
- careamics/dataset/dataset_utils/file_utils.py +141 -0
- careamics/dataset/dataset_utils/iterate_over_files.py +83 -0
- careamics/dataset/dataset_utils/running_stats.py +186 -0
- careamics/dataset/in_memory_dataset.py +310 -0
- careamics/dataset/in_memory_pred_dataset.py +88 -0
- careamics/dataset/in_memory_tiled_pred_dataset.py +129 -0
- careamics/dataset/iterable_dataset.py +295 -0
- careamics/dataset/iterable_pred_dataset.py +122 -0
- careamics/dataset/iterable_tiled_pred_dataset.py +140 -0
- careamics/dataset/patching/__init__.py +1 -0
- careamics/dataset/patching/patching.py +299 -0
- careamics/dataset/patching/random_patching.py +201 -0
- careamics/dataset/patching/sequential_patching.py +212 -0
- careamics/dataset/patching/validate_patch_dimension.py +64 -0
- careamics/dataset/tiling/__init__.py +10 -0
- careamics/dataset/tiling/collate_tiles.py +33 -0
- careamics/dataset/tiling/lvae_tiled_patching.py +282 -0
- careamics/dataset/tiling/tiled_patching.py +164 -0
- careamics/dataset/zarr_dataset.py +151 -0
- careamics/file_io/__init__.py +15 -0
- careamics/file_io/read/__init__.py +12 -0
- careamics/file_io/read/get_func.py +56 -0
- careamics/file_io/read/tiff.py +58 -0
- careamics/file_io/read/zarr.py +60 -0
- careamics/file_io/write/__init__.py +15 -0
- careamics/file_io/write/get_func.py +63 -0
- careamics/file_io/write/tiff.py +40 -0
- careamics/lightning/__init__.py +18 -0
- careamics/lightning/callbacks/__init__.py +11 -0
- careamics/lightning/callbacks/hyperparameters_callback.py +49 -0
- careamics/lightning/callbacks/prediction_writer_callback/__init__.py +20 -0
- careamics/lightning/callbacks/prediction_writer_callback/file_path_utils.py +56 -0
- careamics/lightning/callbacks/prediction_writer_callback/prediction_writer_callback.py +233 -0
- careamics/lightning/callbacks/prediction_writer_callback/write_strategy.py +398 -0
- careamics/lightning/callbacks/prediction_writer_callback/write_strategy_factory.py +215 -0
- careamics/lightning/callbacks/progress_bar_callback.py +90 -0
- careamics/lightning/lightning_module.py +632 -0
- careamics/lightning/predict_data_module.py +333 -0
- careamics/lightning/train_data_module.py +680 -0
- careamics/losses/__init__.py +15 -0
- careamics/losses/fcn/__init__.py +1 -0
- careamics/losses/fcn/losses.py +98 -0
- careamics/losses/loss_factory.py +155 -0
- careamics/losses/lvae/__init__.py +1 -0
- careamics/losses/lvae/loss_utils.py +83 -0
- careamics/losses/lvae/losses.py +445 -0
- careamics/lvae_training/__init__.py +0 -0
- careamics/lvae_training/dataset/__init__.py +0 -0
- careamics/lvae_training/dataset/data_utils.py +701 -0
- careamics/lvae_training/dataset/lc_dataset.py +259 -0
- careamics/lvae_training/dataset/lc_dataset_config.py +13 -0
- careamics/lvae_training/dataset/vae_data_config.py +179 -0
- careamics/lvae_training/dataset/vae_dataset.py +1054 -0
- careamics/lvae_training/eval_utils.py +905 -0
- careamics/lvae_training/get_config.py +84 -0
- careamics/lvae_training/lightning_module.py +701 -0
- careamics/lvae_training/metrics.py +214 -0
- careamics/lvae_training/train_lvae.py +342 -0
- careamics/lvae_training/train_utils.py +121 -0
- careamics/model_io/__init__.py +7 -0
- careamics/model_io/bioimage/__init__.py +11 -0
- careamics/model_io/bioimage/_readme_factory.py +121 -0
- careamics/model_io/bioimage/bioimage_utils.py +52 -0
- careamics/model_io/bioimage/model_description.py +327 -0
- careamics/model_io/bmz_io.py +246 -0
- careamics/model_io/model_io_utils.py +95 -0
- careamics/models/__init__.py +5 -0
- careamics/models/activation.py +39 -0
- careamics/models/layers.py +493 -0
- careamics/models/lvae/__init__.py +3 -0
- careamics/models/lvae/layers.py +1998 -0
- careamics/models/lvae/likelihoods.py +364 -0
- careamics/models/lvae/lvae.py +901 -0
- careamics/models/lvae/noise_models.py +541 -0
- careamics/models/lvae/utils.py +395 -0
- careamics/models/model_factory.py +67 -0
- careamics/models/unet.py +443 -0
- careamics/prediction_utils/__init__.py +10 -0
- careamics/prediction_utils/lvae_prediction.py +158 -0
- careamics/prediction_utils/lvae_tiling_manager.py +362 -0
- careamics/prediction_utils/prediction_outputs.py +135 -0
- careamics/prediction_utils/stitch_prediction.py +112 -0
- careamics/transforms/__init__.py +20 -0
- careamics/transforms/compose.py +107 -0
- careamics/transforms/n2v_manipulate.py +146 -0
- careamics/transforms/normalize.py +243 -0
- careamics/transforms/pixel_manipulation.py +407 -0
- careamics/transforms/struct_mask_parameters.py +20 -0
- careamics/transforms/transform.py +24 -0
- careamics/transforms/tta.py +88 -0
- careamics/transforms/xy_flip.py +123 -0
- careamics/transforms/xy_random_rotate90.py +101 -0
- careamics/utils/__init__.py +19 -0
- careamics/utils/autocorrelation.py +40 -0
- careamics/utils/base_enum.py +60 -0
- careamics/utils/context.py +66 -0
- careamics/utils/logging.py +322 -0
- careamics/utils/metrics.py +188 -0
- careamics/utils/path_utils.py +26 -0
- careamics/utils/ram.py +15 -0
- careamics/utils/receptive_field.py +108 -0
- careamics/utils/torch_utils.py +127 -0
- careamics-0.0.3.dist-info/METADATA +78 -0
- careamics-0.0.3.dist-info/RECORD +154 -0
- {careamics-0.0.1.dist-info → careamics-0.0.3.dist-info}/WHEEL +1 -1
- {careamics-0.0.1.dist-info → careamics-0.0.3.dist-info}/licenses/LICENSE +1 -1
- careamics-0.0.1.dist-info/METADATA +0 -46
- careamics-0.0.1.dist-info/RECORD +0 -6
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""Descriptions of the algorithms used in CAREmics."""
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
CUSTOM = "Custom"
|
|
6
|
+
N2V = "Noise2Void"
|
|
7
|
+
N2V2 = "N2V2"
|
|
8
|
+
STRUCT_N2V = "StructN2V"
|
|
9
|
+
STRUCT_N2V2 = "StructN2V2"
|
|
10
|
+
N2N = "Noise2Noise"
|
|
11
|
+
CARE = "CARE"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
N2V_DESCRIPTION = (
|
|
15
|
+
"Noise2Void is a UNet-based self-supervised algorithm that "
|
|
16
|
+
"uses blind-spot training to denoise images. In short, in every "
|
|
17
|
+
"patches during training, random pixels are selected and their "
|
|
18
|
+
"value replaced by a neighboring pixel value. The network is then "
|
|
19
|
+
"trained to predict the original pixel value. The algorithm "
|
|
20
|
+
"relies on the continuity of the signal (neighboring pixels have "
|
|
21
|
+
"similar values) and the pixel-wise independence of the noise "
|
|
22
|
+
"(the noise in a pixel is not correlated with the noise in "
|
|
23
|
+
"neighboring pixels)."
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AlgorithmDescription(BaseModel):
|
|
28
|
+
"""Description of an algorithm.
|
|
29
|
+
|
|
30
|
+
Attributes
|
|
31
|
+
----------
|
|
32
|
+
description : str
|
|
33
|
+
Description of the algorithm.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
description: str
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class N2VDescription(AlgorithmDescription):
|
|
40
|
+
"""Description of Noise2Void.
|
|
41
|
+
|
|
42
|
+
Attributes
|
|
43
|
+
----------
|
|
44
|
+
description : str
|
|
45
|
+
Description of Noise2Void.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
description: str = N2V_DESCRIPTION
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class N2V2Description(AlgorithmDescription):
|
|
52
|
+
"""Description of N2V2.
|
|
53
|
+
|
|
54
|
+
Attributes
|
|
55
|
+
----------
|
|
56
|
+
description : str
|
|
57
|
+
Description of N2V2.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
description: str = (
|
|
61
|
+
"N2V2 is a variant of Noise2Void. "
|
|
62
|
+
+ N2V_DESCRIPTION
|
|
63
|
+
+ "\nN2V2 introduces blur-pool layers and removed skip "
|
|
64
|
+
"connections in the UNet architecture to remove checkboard "
|
|
65
|
+
"artefacts, a common artefacts ocurring in Noise2Void."
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class StructN2VDescription(AlgorithmDescription):
|
|
70
|
+
"""Description of StructN2V.
|
|
71
|
+
|
|
72
|
+
Attributes
|
|
73
|
+
----------
|
|
74
|
+
description : str
|
|
75
|
+
Description of StructN2V.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
description: str = (
|
|
79
|
+
"StructN2V is a variant of Noise2Void. "
|
|
80
|
+
+ N2V_DESCRIPTION
|
|
81
|
+
+ "\nStructN2V uses a linear mask (horizontal or vertical) to replace "
|
|
82
|
+
"the pixel values of neighbors of the masked pixels by a random "
|
|
83
|
+
"value. Such masking allows removing 1D structured noise from the "
|
|
84
|
+
"the images, the main failure case of the original N2V."
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class StructN2V2Description(AlgorithmDescription):
|
|
89
|
+
"""Description of StructN2V2.
|
|
90
|
+
|
|
91
|
+
Attributes
|
|
92
|
+
----------
|
|
93
|
+
description : str
|
|
94
|
+
Description of StructN2V2.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
description: str = (
|
|
98
|
+
"StructN2V2 is a a variant of Noise2Void that uses both "
|
|
99
|
+
"structN2V and N2V2. "
|
|
100
|
+
+ N2V_DESCRIPTION
|
|
101
|
+
+ "\nStructN2V2 uses a linear mask (horizontal or vertical) to replace "
|
|
102
|
+
"the pixel values of neighbors of the masked pixels by a random "
|
|
103
|
+
"value. Such masking allows removing 1D structured noise from the "
|
|
104
|
+
"the images, the main failure case of the original N2V."
|
|
105
|
+
"\nN2V2 introduces blur-pool layers and removed skip connections in "
|
|
106
|
+
"the UNet architecture to remove checkboard artefacts, a common "
|
|
107
|
+
"artefacts ocurring in Noise2Void."
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class N2NDescription(AlgorithmDescription):
|
|
112
|
+
"""Description of Noise2Noise.
|
|
113
|
+
|
|
114
|
+
Attributes
|
|
115
|
+
----------
|
|
116
|
+
description : str
|
|
117
|
+
Description of Noise2Noise.
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
description: str = "Noise2Noise" # TODO
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class CAREDescription(AlgorithmDescription):
|
|
124
|
+
"""Description of CARE.
|
|
125
|
+
|
|
126
|
+
Attributes
|
|
127
|
+
----------
|
|
128
|
+
description : str
|
|
129
|
+
Description of CARE.
|
|
130
|
+
"""
|
|
131
|
+
|
|
132
|
+
description: str = "CARE" # TODO
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""References for the CAREamics algorithms."""
|
|
2
|
+
|
|
3
|
+
from bioimageio.spec.generic.v0_3 import CiteEntry
|
|
4
|
+
|
|
5
|
+
N2VRef = CiteEntry(
|
|
6
|
+
text='Krull, A., Buchholz, T.O. and Jug, F., 2019. "Noise2Void - Learning '
|
|
7
|
+
'denoising from single noisy images". In Proceedings of the IEEE/CVF '
|
|
8
|
+
"conference on computer vision and pattern recognition (pp. 2129-2137).",
|
|
9
|
+
doi="10.1109/cvpr.2019.00223",
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
N2V2Ref = CiteEntry(
|
|
13
|
+
text="Höck, E., Buchholz, T.O., Brachmann, A., Jug, F. and Freytag, A., "
|
|
14
|
+
'2022. "N2V2 - Fixing Noise2Void checkerboard artifacts with modified '
|
|
15
|
+
'sampling strategies and a tweaked network architecture". In European '
|
|
16
|
+
"Conference on Computer Vision (pp. 503-518).",
|
|
17
|
+
doi="10.1007/978-3-031-25069-9_33",
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
StructN2VRef = CiteEntry(
|
|
21
|
+
text="Broaddus, C., Krull, A., Weigert, M., Schmidt, U. and Myers, G., 2020."
|
|
22
|
+
'"Removing structured noise with self-supervised blind-spot '
|
|
23
|
+
'networks". In 2020 IEEE 17th International Symposium on Biomedical '
|
|
24
|
+
"Imaging (ISBI) (pp. 159-163).",
|
|
25
|
+
doi="10.1109/isbi45749.2020.9098336",
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
N2NRef = CiteEntry(
|
|
29
|
+
text="Lehtinen, J., Munkberg, J., Hasselgren, J., Laine, S., Karras, T., "
|
|
30
|
+
'Aittala, M. and Aila, T., 2018. "Noise2Noise: Learning image restoration '
|
|
31
|
+
'without clean data". arXiv preprint arXiv:1803.04189.',
|
|
32
|
+
doi="10.48550/arXiv.1803.04189",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
CARERef = CiteEntry(
|
|
36
|
+
text='Weigert, Martin, et al. "Content-aware image restoration: pushing the '
|
|
37
|
+
'limits of fluorescence microscopy." Nature methods 15.12 (2018): 1090-1097.',
|
|
38
|
+
doi="10.1038/s41592-018-0216-7",
|
|
39
|
+
)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Supported configuration options.
|
|
2
|
+
|
|
3
|
+
Used throughout the code to ensure consistency. These should be kept in sync with the
|
|
4
|
+
corresponding configuration options in the Pydantic models.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"SupportedArchitecture",
|
|
9
|
+
"SupportedActivation",
|
|
10
|
+
"SupportedOptimizer",
|
|
11
|
+
"SupportedScheduler",
|
|
12
|
+
"SupportedLoss",
|
|
13
|
+
"SupportedAlgorithm",
|
|
14
|
+
"SupportedPixelManipulation",
|
|
15
|
+
"SupportedTransform",
|
|
16
|
+
"SupportedData",
|
|
17
|
+
"SupportedStructAxis",
|
|
18
|
+
"SupportedLogger",
|
|
19
|
+
]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
from .supported_activations import SupportedActivation
|
|
23
|
+
from .supported_algorithms import SupportedAlgorithm
|
|
24
|
+
from .supported_architectures import SupportedArchitecture
|
|
25
|
+
from .supported_data import SupportedData
|
|
26
|
+
from .supported_loggers import SupportedLogger
|
|
27
|
+
from .supported_losses import SupportedLoss
|
|
28
|
+
from .supported_optimizers import SupportedOptimizer, SupportedScheduler
|
|
29
|
+
from .supported_pixel_manipulations import SupportedPixelManipulation
|
|
30
|
+
from .supported_struct_axis import SupportedStructAxis
|
|
31
|
+
from .supported_transforms import SupportedTransform
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Activations supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from careamics.utils import BaseEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SupportedActivation(str, BaseEnum):
|
|
7
|
+
"""Supported activation functions.
|
|
8
|
+
|
|
9
|
+
- None, no activation will be used.
|
|
10
|
+
- Sigmoid
|
|
11
|
+
- Softmax
|
|
12
|
+
- Tanh
|
|
13
|
+
- ReLU
|
|
14
|
+
- LeakyReLU
|
|
15
|
+
|
|
16
|
+
All activations are defined in PyTorch.
|
|
17
|
+
|
|
18
|
+
See: https://pytorch.org/docs/stable/nn.html#loss-functions
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
NONE = "None"
|
|
22
|
+
SIGMOID = "Sigmoid"
|
|
23
|
+
SOFTMAX = "Softmax"
|
|
24
|
+
TANH = "Tanh"
|
|
25
|
+
RELU = "ReLU"
|
|
26
|
+
LEAKYRELU = "LeakyReLU"
|
|
27
|
+
ELU = "ELU"
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Algorithms supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from careamics.utils import BaseEnum
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class SupportedAlgorithm(str, BaseEnum):
|
|
9
|
+
"""Algorithms available in CAREamics."""
|
|
10
|
+
|
|
11
|
+
N2V = "n2v"
|
|
12
|
+
"""Noise2Void algorithm, a self-supervised approach based on blind denoising."""
|
|
13
|
+
|
|
14
|
+
CARE = "care"
|
|
15
|
+
"""Content-aware image restoration, a supervised algorithm used for a variety
|
|
16
|
+
of tasks."""
|
|
17
|
+
|
|
18
|
+
N2N = "n2n"
|
|
19
|
+
"""Noise2Noise algorithm, a self-supervised denoising scheme based on comparing
|
|
20
|
+
noisy images of the same sample."""
|
|
21
|
+
|
|
22
|
+
MUSPLIT = "musplit"
|
|
23
|
+
"""An image splitting approach based on ladder VAE architectures."""
|
|
24
|
+
|
|
25
|
+
DENOISPLIT = "denoisplit"
|
|
26
|
+
"""An image splitting and denoising approach based on ladder VAE architectures."""
|
|
27
|
+
|
|
28
|
+
CUSTOM = "custom"
|
|
29
|
+
"""Custom algorithm, used for cases where a custom architecture is provided."""
|
|
30
|
+
|
|
31
|
+
# PN2V = "pn2v"
|
|
32
|
+
# HDN = "hdn"
|
|
33
|
+
# SEG = "segmentation"
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Architectures supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from careamics.utils import BaseEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SupportedArchitecture(str, BaseEnum):
|
|
7
|
+
"""Supported architectures."""
|
|
8
|
+
|
|
9
|
+
UNET = "UNet"
|
|
10
|
+
"""UNet architecture used with N2V, CARE and Noise2Noise."""
|
|
11
|
+
|
|
12
|
+
LVAE = "LVAE"
|
|
13
|
+
"""Ladder Variational Autoencoder used for muSplit and denoiSplit."""
|
|
14
|
+
|
|
15
|
+
CUSTOM = "custom"
|
|
16
|
+
"""Keyword used for custom architectures provided by users and only compatible
|
|
17
|
+
with `FCNAlgorithmConfig` configuration."""
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""Data supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union
|
|
6
|
+
|
|
7
|
+
from careamics.utils import BaseEnum
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SupportedData(str, BaseEnum):
|
|
11
|
+
"""Supported data types.
|
|
12
|
+
|
|
13
|
+
Attributes
|
|
14
|
+
----------
|
|
15
|
+
ARRAY : str
|
|
16
|
+
Array data.
|
|
17
|
+
TIFF : str
|
|
18
|
+
TIFF image data.
|
|
19
|
+
CUSTOM : str
|
|
20
|
+
Custom data.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
ARRAY = "array"
|
|
24
|
+
TIFF = "tiff"
|
|
25
|
+
CUSTOM = "custom"
|
|
26
|
+
# ZARR = "zarr"
|
|
27
|
+
|
|
28
|
+
# TODO remove?
|
|
29
|
+
@classmethod
|
|
30
|
+
def _missing_(cls, value: object) -> str:
|
|
31
|
+
"""
|
|
32
|
+
Override default behaviour for missing values.
|
|
33
|
+
|
|
34
|
+
This method is called when `value` is not found in the enum values. It converts
|
|
35
|
+
`value` to lowercase, removes "." if it is the first character and tries to
|
|
36
|
+
match it with enum values.
|
|
37
|
+
|
|
38
|
+
Parameters
|
|
39
|
+
----------
|
|
40
|
+
value : object
|
|
41
|
+
Value to be matched with enum values.
|
|
42
|
+
|
|
43
|
+
Returns
|
|
44
|
+
-------
|
|
45
|
+
str
|
|
46
|
+
Matched enum value.
|
|
47
|
+
"""
|
|
48
|
+
if isinstance(value, str):
|
|
49
|
+
lower_value = value.lower()
|
|
50
|
+
|
|
51
|
+
if lower_value.startswith("."):
|
|
52
|
+
lower_value = lower_value[1:]
|
|
53
|
+
|
|
54
|
+
# attempt to match lowercase value with enum values
|
|
55
|
+
for member in cls:
|
|
56
|
+
if member.value == lower_value:
|
|
57
|
+
return member
|
|
58
|
+
|
|
59
|
+
# still missing
|
|
60
|
+
return super()._missing_(value)
|
|
61
|
+
|
|
62
|
+
@classmethod
|
|
63
|
+
def get_extension_pattern(cls, data_type: Union[str, SupportedData]) -> str:
|
|
64
|
+
"""
|
|
65
|
+
Get Path.rglob and fnmatch compatible extension.
|
|
66
|
+
|
|
67
|
+
Parameters
|
|
68
|
+
----------
|
|
69
|
+
data_type : SupportedData
|
|
70
|
+
Data type.
|
|
71
|
+
|
|
72
|
+
Returns
|
|
73
|
+
-------
|
|
74
|
+
str
|
|
75
|
+
Corresponding extension pattern.
|
|
76
|
+
"""
|
|
77
|
+
if data_type == cls.ARRAY:
|
|
78
|
+
raise NotImplementedError(f"Data '{data_type}' is not loaded from a file.")
|
|
79
|
+
elif data_type == cls.TIFF:
|
|
80
|
+
return "*.tif*"
|
|
81
|
+
elif data_type == cls.CUSTOM:
|
|
82
|
+
return "*.*"
|
|
83
|
+
else:
|
|
84
|
+
raise ValueError(f"Data type {data_type} is not supported.")
|
|
85
|
+
|
|
86
|
+
@classmethod
|
|
87
|
+
def get_extension(cls, data_type: Union[str, SupportedData]) -> str:
|
|
88
|
+
"""
|
|
89
|
+
Get file extension of corresponding data type.
|
|
90
|
+
|
|
91
|
+
Parameters
|
|
92
|
+
----------
|
|
93
|
+
data_type : str or SupportedData
|
|
94
|
+
Data type.
|
|
95
|
+
|
|
96
|
+
Returns
|
|
97
|
+
-------
|
|
98
|
+
str
|
|
99
|
+
Corresponding extension.
|
|
100
|
+
"""
|
|
101
|
+
if data_type == cls.ARRAY:
|
|
102
|
+
raise NotImplementedError(f"Data '{data_type}' is not loaded from a file.")
|
|
103
|
+
elif data_type == cls.TIFF:
|
|
104
|
+
return ".tiff"
|
|
105
|
+
elif data_type == cls.CUSTOM:
|
|
106
|
+
# TODO: improve this message
|
|
107
|
+
raise NotImplementedError("Custom extensions have to be passed elsewhere.")
|
|
108
|
+
else:
|
|
109
|
+
raise ValueError(f"Data type {data_type} is not supported.")
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Losses supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from careamics.utils import BaseEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# TODO register loss with custom_loss decorator?
|
|
7
|
+
class SupportedLoss(str, BaseEnum):
|
|
8
|
+
"""Supported losses.
|
|
9
|
+
|
|
10
|
+
Attributes
|
|
11
|
+
----------
|
|
12
|
+
MSE : str
|
|
13
|
+
Mean Squared Error loss.
|
|
14
|
+
MAE : str
|
|
15
|
+
Mean Absolute Error loss.
|
|
16
|
+
N2V : str
|
|
17
|
+
Noise2Void loss.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
MSE = "mse"
|
|
21
|
+
MAE = "mae"
|
|
22
|
+
N2V = "n2v"
|
|
23
|
+
# PN2V = "pn2v"
|
|
24
|
+
# HDN = "hdn"
|
|
25
|
+
MUSPLIT = "musplit"
|
|
26
|
+
DENOISPLIT = "denoisplit"
|
|
27
|
+
DENOISPLIT_MUSPLIT = "denoisplit_musplit"
|
|
28
|
+
# CE = "ce"
|
|
29
|
+
# DICE = "dice"
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""Optimizers and schedulers supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from careamics.utils import BaseEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SupportedOptimizer(str, BaseEnum):
|
|
7
|
+
"""Supported optimizers.
|
|
8
|
+
|
|
9
|
+
Attributes
|
|
10
|
+
----------
|
|
11
|
+
Adam : str
|
|
12
|
+
Adam optimizer.
|
|
13
|
+
SGD : str
|
|
14
|
+
Stochastic Gradient Descent optimizer.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
# ASGD = "ASGD"
|
|
18
|
+
# Adadelta = "Adadelta"
|
|
19
|
+
# Adagrad = "Adagrad"
|
|
20
|
+
ADAM = "Adam"
|
|
21
|
+
# AdamW = "AdamW"
|
|
22
|
+
# Adamax = "Adamax"
|
|
23
|
+
# LBFGS = "LBFGS"
|
|
24
|
+
# NAdam = "NAdam"
|
|
25
|
+
# RAdam = "RAdam"
|
|
26
|
+
# RMSprop = "RMSprop"
|
|
27
|
+
# Rprop = "Rprop"
|
|
28
|
+
SGD = "SGD"
|
|
29
|
+
# SparseAdam = "SparseAdam"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class SupportedScheduler(str, BaseEnum):
|
|
33
|
+
"""Supported schedulers.
|
|
34
|
+
|
|
35
|
+
Attributes
|
|
36
|
+
----------
|
|
37
|
+
ReduceLROnPlateau : str
|
|
38
|
+
Reduce learning rate on plateau.
|
|
39
|
+
StepLR : str
|
|
40
|
+
Step learning rate.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
# ChainedScheduler = "ChainedScheduler"
|
|
44
|
+
# ConstantLR = "ConstantLR"
|
|
45
|
+
# CosineAnnealingLR = "CosineAnnealingLR"
|
|
46
|
+
# CosineAnnealingWarmRestarts = "CosineAnnealingWarmRestarts"
|
|
47
|
+
# CyclicLR = "CyclicLR"
|
|
48
|
+
# ExponentialLR = "ExponentialLR"
|
|
49
|
+
# LambdaLR = "LambdaLR"
|
|
50
|
+
# LinearLR = "LinearLR"
|
|
51
|
+
# MultiStepLR = "MultiStepLR"
|
|
52
|
+
# MultiplicativeLR = "MultiplicativeLR"
|
|
53
|
+
# OneCycleLR = "OneCycleLR"
|
|
54
|
+
# PolynomialLR = "PolynomialLR"
|
|
55
|
+
REDUCE_LR_ON_PLATEAU = "ReduceLROnPlateau"
|
|
56
|
+
# SequentialLR = "SequentialLR"
|
|
57
|
+
STEP_LR = "StepLR"
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Pixel manipulation methods supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from careamics.utils import BaseEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SupportedPixelManipulation(str, BaseEnum):
|
|
7
|
+
"""Supported Noise2Void pixel manipulations.
|
|
8
|
+
|
|
9
|
+
- Uniform: Replace masked pixel value by a (uniformly) randomly selected neighbor
|
|
10
|
+
pixel value.
|
|
11
|
+
- Median: Replace masked pixel value by the mean of the neighborhood.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
UNIFORM = "uniform"
|
|
15
|
+
MEDIAN = "median"
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""StructN2V axes supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from careamics.utils import BaseEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SupportedStructAxis(str, BaseEnum):
|
|
7
|
+
"""Supported structN2V mask axes.
|
|
8
|
+
|
|
9
|
+
Attributes
|
|
10
|
+
----------
|
|
11
|
+
HORIZONTAL : str
|
|
12
|
+
Horizontal axis.
|
|
13
|
+
VERTICAL : str
|
|
14
|
+
Vertical axis.
|
|
15
|
+
NONE : str
|
|
16
|
+
No axis, the mask is not applied.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
HORIZONTAL = "horizontal"
|
|
20
|
+
VERTICAL = "vertical"
|
|
21
|
+
NONE = "none"
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""Transforms supported by CAREamics."""
|
|
2
|
+
|
|
3
|
+
from careamics.utils import BaseEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SupportedTransform(str, BaseEnum):
|
|
7
|
+
"""Transforms officially supported by CAREamics."""
|
|
8
|
+
|
|
9
|
+
XY_FLIP = "XYFlip"
|
|
10
|
+
XY_RANDOM_ROTATE90 = "XYRandomRotate90"
|
|
11
|
+
N2V_MANIPULATE = "N2VManipulate"
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""Pydantic model representing the metadata of a prediction tile."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Annotated
|
|
6
|
+
|
|
7
|
+
from annotated_types import Len
|
|
8
|
+
from pydantic import BaseModel, ConfigDict
|
|
9
|
+
|
|
10
|
+
DimTuple = Annotated[tuple, Len(min_length=3, max_length=4)]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TileInformation(BaseModel):
|
|
14
|
+
"""
|
|
15
|
+
Pydantic model containing tile information.
|
|
16
|
+
|
|
17
|
+
This model is used to represent the information required to stitch back a tile into
|
|
18
|
+
a larger image. It is used throughout the prediction pipeline of CAREamics.
|
|
19
|
+
|
|
20
|
+
Array shape should be C(Z)YX, where Z is an optional dimensions.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
model_config = ConfigDict(validate_default=True)
|
|
24
|
+
|
|
25
|
+
array_shape: DimTuple # TODO: find a way to add custom error message?
|
|
26
|
+
"""Shape of the original (untiled) array."""
|
|
27
|
+
|
|
28
|
+
last_tile: bool = False
|
|
29
|
+
"""Whether this tile is the last one of the array."""
|
|
30
|
+
|
|
31
|
+
overlap_crop_coords: tuple[tuple[int, ...], ...]
|
|
32
|
+
"""Inner coordinates of the tile where to crop the prediction in order to stitch
|
|
33
|
+
it back into the original image."""
|
|
34
|
+
|
|
35
|
+
stitch_coords: tuple[tuple[int, ...], ...]
|
|
36
|
+
"""Coordinates in the original image where to stitch the cropped tile back."""
|
|
37
|
+
|
|
38
|
+
sample_id: int
|
|
39
|
+
"""Sample ID of the tile."""
|
|
40
|
+
|
|
41
|
+
# TODO: Test that ZYX axes are not singleton ?
|
|
42
|
+
|
|
43
|
+
def __eq__(self, other_tile: object):
|
|
44
|
+
"""Check if two tile information objects are equal.
|
|
45
|
+
|
|
46
|
+
Parameters
|
|
47
|
+
----------
|
|
48
|
+
other_tile : object
|
|
49
|
+
Tile information object to compare with.
|
|
50
|
+
|
|
51
|
+
Returns
|
|
52
|
+
-------
|
|
53
|
+
bool
|
|
54
|
+
Whether the two tile information objects are equal.
|
|
55
|
+
"""
|
|
56
|
+
if not isinstance(other_tile, TileInformation):
|
|
57
|
+
return NotImplemented
|
|
58
|
+
|
|
59
|
+
return (
|
|
60
|
+
self.array_shape == other_tile.array_shape
|
|
61
|
+
and self.last_tile == other_tile.last_tile
|
|
62
|
+
and self.overlap_crop_coords == other_tile.overlap_crop_coords
|
|
63
|
+
and self.stitch_coords == other_tile.stitch_coords
|
|
64
|
+
and self.sample_id == other_tile.sample_id
|
|
65
|
+
)
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""Training configuration."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pprint import pformat
|
|
6
|
+
from typing import Literal, Optional
|
|
7
|
+
|
|
8
|
+
from pydantic import (
|
|
9
|
+
BaseModel,
|
|
10
|
+
ConfigDict,
|
|
11
|
+
Field,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from .callback_model import CheckpointModel, EarlyStoppingModel
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class TrainingConfig(BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Parameters related to the training.
|
|
20
|
+
|
|
21
|
+
Mandatory parameters are:
|
|
22
|
+
- num_epochs: number of epochs, greater than 0.
|
|
23
|
+
- batch_size: batch size, greater than 0.
|
|
24
|
+
- augmentation: whether to use data augmentation or not (True or False).
|
|
25
|
+
|
|
26
|
+
Attributes
|
|
27
|
+
----------
|
|
28
|
+
num_epochs : int
|
|
29
|
+
Number of epochs, greater than 0.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
# Pydantic class configuration
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
validate_assignment=True,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
num_epochs: int = Field(default=20, ge=1)
|
|
38
|
+
"""Number of epochs, greater than 0."""
|
|
39
|
+
|
|
40
|
+
logger: Optional[Literal["wandb", "tensorboard"]] = None
|
|
41
|
+
"""Logger to use during training. If None, no logger will be used. Available
|
|
42
|
+
loggers are defined in SupportedLogger."""
|
|
43
|
+
|
|
44
|
+
checkpoint_callback: CheckpointModel = CheckpointModel()
|
|
45
|
+
"""Checkpoint callback configuration, following PyTorch Lightning Checkpoint
|
|
46
|
+
callback."""
|
|
47
|
+
|
|
48
|
+
early_stopping_callback: Optional[EarlyStoppingModel] = Field(
|
|
49
|
+
default=None, validate_default=True
|
|
50
|
+
)
|
|
51
|
+
"""Early stopping callback configuration, following PyTorch Lightning Checkpoint
|
|
52
|
+
callback."""
|
|
53
|
+
|
|
54
|
+
def __str__(self) -> str:
|
|
55
|
+
"""Pretty string reprensenting the configuration.
|
|
56
|
+
|
|
57
|
+
Returns
|
|
58
|
+
-------
|
|
59
|
+
str
|
|
60
|
+
Pretty string.
|
|
61
|
+
"""
|
|
62
|
+
return pformat(self.model_dump())
|
|
63
|
+
|
|
64
|
+
def has_logger(self) -> bool:
|
|
65
|
+
"""Check if the logger is defined.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
bool
|
|
70
|
+
Whether the logger is defined or not.
|
|
71
|
+
"""
|
|
72
|
+
return self.logger is not None
|