careamics 0.1.0rc2__py3-none-any.whl → 0.1.0rc4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of careamics might be problematic. Click here for more details.
- careamics/__init__.py +16 -4
- careamics/callbacks/__init__.py +6 -0
- careamics/callbacks/hyperparameters_callback.py +42 -0
- careamics/callbacks/progress_bar_callback.py +57 -0
- careamics/careamist.py +761 -0
- careamics/config/__init__.py +31 -3
- careamics/config/algorithm_model.py +167 -0
- careamics/config/architectures/__init__.py +17 -0
- careamics/config/architectures/architecture_model.py +29 -0
- careamics/config/architectures/custom_model.py +150 -0
- careamics/config/architectures/register_model.py +101 -0
- careamics/config/architectures/unet_model.py +96 -0
- careamics/config/architectures/vae_model.py +39 -0
- careamics/config/callback_model.py +92 -0
- careamics/config/configuration_example.py +89 -0
- careamics/config/configuration_factory.py +597 -0
- careamics/config/configuration_model.py +597 -0
- careamics/config/data_model.py +555 -0
- careamics/config/inference_model.py +283 -0
- careamics/config/noise_models.py +162 -0
- careamics/config/optimizer_models.py +181 -0
- careamics/config/references/__init__.py +45 -0
- careamics/config/references/algorithm_descriptions.py +131 -0
- careamics/config/references/references.py +38 -0
- careamics/config/support/__init__.py +33 -0
- careamics/config/support/supported_activations.py +24 -0
- careamics/config/support/supported_algorithms.py +18 -0
- careamics/config/support/supported_architectures.py +18 -0
- careamics/config/support/supported_data.py +82 -0
- careamics/{dataset/extraction_strategy.py → config/support/supported_extraction_strategies.py} +5 -2
- careamics/config/support/supported_loggers.py +8 -0
- careamics/config/support/supported_losses.py +25 -0
- careamics/config/support/supported_optimizers.py +55 -0
- careamics/config/support/supported_pixel_manipulations.py +15 -0
- careamics/config/support/supported_struct_axis.py +19 -0
- careamics/config/support/supported_transforms.py +23 -0
- careamics/config/tile_information.py +104 -0
- careamics/config/training_model.py +65 -0
- careamics/config/transformations/__init__.py +14 -0
- careamics/config/transformations/n2v_manipulate_model.py +63 -0
- careamics/config/transformations/nd_flip_model.py +32 -0
- careamics/config/transformations/normalize_model.py +31 -0
- careamics/config/transformations/transform_model.py +44 -0
- careamics/config/transformations/xy_random_rotate90_model.py +29 -0
- careamics/config/validators/__init__.py +5 -0
- careamics/config/validators/validator_utils.py +100 -0
- careamics/conftest.py +26 -0
- careamics/dataset/__init__.py +5 -0
- careamics/dataset/dataset_utils/__init__.py +19 -0
- careamics/dataset/dataset_utils/dataset_utils.py +100 -0
- careamics/dataset/dataset_utils/file_utils.py +140 -0
- careamics/dataset/dataset_utils/read_tiff.py +61 -0
- careamics/dataset/dataset_utils/read_utils.py +25 -0
- careamics/dataset/dataset_utils/read_zarr.py +56 -0
- careamics/dataset/in_memory_dataset.py +323 -134
- careamics/dataset/iterable_dataset.py +416 -0
- careamics/dataset/patching/__init__.py +8 -0
- careamics/dataset/patching/patch_transform.py +44 -0
- careamics/dataset/patching/patching.py +212 -0
- careamics/dataset/patching/random_patching.py +190 -0
- careamics/dataset/patching/sequential_patching.py +206 -0
- careamics/dataset/patching/tiled_patching.py +158 -0
- careamics/dataset/patching/validate_patch_dimension.py +60 -0
- careamics/dataset/zarr_dataset.py +149 -0
- careamics/lightning_datamodule.py +743 -0
- careamics/lightning_module.py +292 -0
- careamics/lightning_prediction_datamodule.py +396 -0
- careamics/lightning_prediction_loop.py +116 -0
- careamics/losses/__init__.py +4 -1
- careamics/losses/loss_factory.py +24 -14
- careamics/losses/losses.py +65 -5
- careamics/losses/noise_model_factory.py +40 -0
- careamics/losses/noise_models.py +524 -0
- careamics/model_io/__init__.py +8 -0
- careamics/model_io/bioimage/__init__.py +11 -0
- careamics/model_io/bioimage/_readme_factory.py +120 -0
- careamics/model_io/bioimage/bioimage_utils.py +48 -0
- careamics/model_io/bioimage/model_description.py +318 -0
- careamics/model_io/bmz_io.py +231 -0
- careamics/model_io/model_io_utils.py +80 -0
- careamics/models/__init__.py +4 -1
- careamics/models/activation.py +35 -0
- careamics/models/layers.py +244 -0
- careamics/models/model_factory.py +21 -221
- careamics/models/unet.py +46 -20
- careamics/prediction/__init__.py +1 -3
- careamics/prediction/stitch_prediction.py +73 -0
- careamics/transforms/__init__.py +41 -0
- careamics/transforms/n2v_manipulate.py +113 -0
- careamics/transforms/nd_flip.py +93 -0
- careamics/transforms/normalize.py +109 -0
- careamics/transforms/pixel_manipulation.py +383 -0
- careamics/transforms/struct_mask_parameters.py +18 -0
- careamics/transforms/tta.py +74 -0
- careamics/transforms/xy_random_rotate90.py +95 -0
- careamics/utils/__init__.py +10 -12
- careamics/utils/base_enum.py +32 -0
- careamics/utils/context.py +22 -2
- careamics/utils/metrics.py +0 -46
- careamics/utils/path_utils.py +24 -0
- careamics/utils/ram.py +13 -0
- careamics/utils/receptive_field.py +102 -0
- careamics/utils/running_stats.py +43 -0
- careamics/utils/torch_utils.py +112 -75
- careamics-0.1.0rc4.dist-info/METADATA +122 -0
- careamics-0.1.0rc4.dist-info/RECORD +110 -0
- {careamics-0.1.0rc2.dist-info → careamics-0.1.0rc4.dist-info}/WHEEL +1 -1
- careamics/bioimage/__init__.py +0 -15
- careamics/bioimage/docs/Noise2Void.md +0 -5
- careamics/bioimage/docs/__init__.py +0 -1
- careamics/bioimage/io.py +0 -182
- careamics/bioimage/rdf.py +0 -105
- careamics/config/algorithm.py +0 -231
- careamics/config/config.py +0 -297
- careamics/config/config_filter.py +0 -44
- careamics/config/data.py +0 -194
- careamics/config/torch_optim.py +0 -118
- careamics/config/training.py +0 -534
- careamics/dataset/dataset_utils.py +0 -111
- careamics/dataset/patching.py +0 -492
- careamics/dataset/prepare_dataset.py +0 -175
- careamics/dataset/tiff_dataset.py +0 -212
- careamics/engine.py +0 -1014
- careamics/manipulation/__init__.py +0 -4
- careamics/manipulation/pixel_manipulation.py +0 -158
- careamics/prediction/prediction_utils.py +0 -106
- careamics/utils/ascii_logo.txt +0 -9
- careamics/utils/augment.py +0 -65
- careamics/utils/normalization.py +0 -55
- careamics/utils/validators.py +0 -170
- careamics/utils/wandb.py +0 -121
- careamics-0.1.0rc2.dist-info/METADATA +0 -81
- careamics-0.1.0rc2.dist-info/RECORD +0 -47
- {careamics-0.1.0rc2.dist-info → careamics-0.1.0rc4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"""Checkpoint saving configuration."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from datetime import timedelta
|
|
5
|
+
from typing import Literal, Optional
|
|
6
|
+
|
|
7
|
+
from pydantic import (
|
|
8
|
+
BaseModel,
|
|
9
|
+
ConfigDict,
|
|
10
|
+
Field,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CheckpointModel(BaseModel):
|
|
15
|
+
"""_summary_.
|
|
16
|
+
|
|
17
|
+
Parameters
|
|
18
|
+
----------
|
|
19
|
+
BaseModel : _type_
|
|
20
|
+
_description_
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
model_config = ConfigDict(
|
|
24
|
+
validate_assignment=True,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
monitor: Literal["val_loss"] = Field(default="val_loss", validate_default=True)
|
|
28
|
+
verbose: bool = Field(default=False, validate_default=True)
|
|
29
|
+
save_weights_only: bool = Field(default=False, validate_default=True)
|
|
30
|
+
mode: Literal["min", "max"] = Field(default="min", validate_default=True)
|
|
31
|
+
auto_insert_metric_name: bool = Field(default=False, validate_default=True)
|
|
32
|
+
every_n_train_steps: Optional[int] = Field(
|
|
33
|
+
default=None, ge=1, le=10, validate_default=True
|
|
34
|
+
)
|
|
35
|
+
train_time_interval: Optional[timedelta] = Field(
|
|
36
|
+
default=None, validate_default=True
|
|
37
|
+
)
|
|
38
|
+
every_n_epochs: Optional[int] = Field(
|
|
39
|
+
default=None, ge=1, le=10, validate_default=True
|
|
40
|
+
)
|
|
41
|
+
save_last: Optional[Literal[True, False, "link"]] = Field(
|
|
42
|
+
default=True, validate_default=True
|
|
43
|
+
)
|
|
44
|
+
save_top_k: int = Field(default=3, ge=1, le=10, validate_default=True)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class EarlyStoppingModel(BaseModel):
|
|
48
|
+
"""_summary_.
|
|
49
|
+
|
|
50
|
+
Parameters
|
|
51
|
+
----------
|
|
52
|
+
BaseModel : _type_
|
|
53
|
+
_description_
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
model_config = ConfigDict(
|
|
57
|
+
validate_assignment=True,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
monitor: Literal["val_loss"] = Field(default="val_loss", validate_default=True)
|
|
61
|
+
patience: int = Field(default=3, ge=1, le=10, validate_default=True)
|
|
62
|
+
mode: Literal["min", "max", "auto"] = Field(default="min", validate_default=True)
|
|
63
|
+
min_delta: float = Field(default=0.0, ge=0.0, le=1.0, validate_default=True)
|
|
64
|
+
check_finite: bool = Field(default=True, validate_default=True)
|
|
65
|
+
stop_on_nan: bool = Field(default=True, validate_default=True)
|
|
66
|
+
verbose: bool = Field(default=False, validate_default=True)
|
|
67
|
+
restore_best_weights: bool = Field(default=True, validate_default=True)
|
|
68
|
+
auto_lr_find: bool = Field(default=False, validate_default=True)
|
|
69
|
+
auto_lr_find_patience: int = Field(default=3, ge=1, le=10, validate_default=True)
|
|
70
|
+
auto_lr_find_mode: Literal["min", "max", "auto"] = Field(
|
|
71
|
+
default="min", validate_default=True
|
|
72
|
+
)
|
|
73
|
+
auto_lr_find_direction: Literal["forward", "backward"] = Field(
|
|
74
|
+
default="backward", validate_default=True
|
|
75
|
+
)
|
|
76
|
+
auto_lr_find_max_lr: float = Field(
|
|
77
|
+
default=10.0, ge=0.0, le=1e6, validate_default=True
|
|
78
|
+
)
|
|
79
|
+
auto_lr_find_min_lr: float = Field(
|
|
80
|
+
default=1e-8, ge=0.0, le=1e6, validate_default=True
|
|
81
|
+
)
|
|
82
|
+
auto_lr_find_num_training: int = Field(
|
|
83
|
+
default=100, ge=1, le=1e6, validate_default=True
|
|
84
|
+
)
|
|
85
|
+
auto_lr_find_divergence_threshold: float = Field(
|
|
86
|
+
default=5.0, ge=0.0, le=1e6, validate_default=True
|
|
87
|
+
)
|
|
88
|
+
auto_lr_find_accumulate_grad_batches: int = Field(
|
|
89
|
+
default=1, ge=1, le=1e6, validate_default=True
|
|
90
|
+
)
|
|
91
|
+
auto_lr_find_stop_divergence: bool = Field(default=True, validate_default=True)
|
|
92
|
+
auto_lr_find_step_scale: float = Field(default=0.1, ge=0.0, le=10)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
from .algorithm_model import AlgorithmConfig
|
|
2
|
+
from .architectures import UNetModel
|
|
3
|
+
from .configuration_model import Configuration
|
|
4
|
+
from .data_model import DataConfig
|
|
5
|
+
from .optimizer_models import LrSchedulerModel, OptimizerModel
|
|
6
|
+
from .support import (
|
|
7
|
+
SupportedActivation,
|
|
8
|
+
SupportedAlgorithm,
|
|
9
|
+
SupportedArchitecture,
|
|
10
|
+
SupportedData,
|
|
11
|
+
SupportedLogger,
|
|
12
|
+
SupportedLoss,
|
|
13
|
+
SupportedOptimizer,
|
|
14
|
+
SupportedPixelManipulation,
|
|
15
|
+
SupportedScheduler,
|
|
16
|
+
SupportedTransform,
|
|
17
|
+
)
|
|
18
|
+
from .training_model import TrainingConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def full_configuration_example() -> Configuration:
|
|
22
|
+
"""Returns a dictionnary representing a full configuration example.
|
|
23
|
+
|
|
24
|
+
Returns
|
|
25
|
+
-------
|
|
26
|
+
Configuration
|
|
27
|
+
Full configuration example.
|
|
28
|
+
"""
|
|
29
|
+
experiment_name = "Full example"
|
|
30
|
+
algorithm_model = AlgorithmConfig(
|
|
31
|
+
algorithm=SupportedAlgorithm.N2V.value,
|
|
32
|
+
loss=SupportedLoss.N2V.value,
|
|
33
|
+
model=UNetModel(
|
|
34
|
+
architecture=SupportedArchitecture.UNET.value,
|
|
35
|
+
in_channels=1,
|
|
36
|
+
num_classes=1,
|
|
37
|
+
depth=2,
|
|
38
|
+
num_channels_init=32,
|
|
39
|
+
final_activation=SupportedActivation.NONE.value,
|
|
40
|
+
n2v2=True,
|
|
41
|
+
),
|
|
42
|
+
optimizer=OptimizerModel(
|
|
43
|
+
name=SupportedOptimizer.ADAM.value, parameters={"lr": 0.0001}
|
|
44
|
+
),
|
|
45
|
+
lr_scheduler=LrSchedulerModel(
|
|
46
|
+
name=SupportedScheduler.REDUCE_LR_ON_PLATEAU.value,
|
|
47
|
+
),
|
|
48
|
+
)
|
|
49
|
+
data_model = DataConfig(
|
|
50
|
+
data_type=SupportedData.ARRAY.value,
|
|
51
|
+
patch_size=(256, 256),
|
|
52
|
+
batch_size=8,
|
|
53
|
+
axes="YX",
|
|
54
|
+
transforms=[
|
|
55
|
+
{
|
|
56
|
+
"name": SupportedTransform.NORMALIZE.value,
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"name": SupportedTransform.NDFLIP.value,
|
|
60
|
+
"is_3D": False,
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
"name": SupportedTransform.XY_RANDOM_ROTATE90.value,
|
|
64
|
+
"is_3D": False,
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
"name": SupportedTransform.N2V_MANIPULATE.value,
|
|
68
|
+
"roi_size": 11,
|
|
69
|
+
"masked_pixel_percentage": 0.2,
|
|
70
|
+
"strategy": SupportedPixelManipulation.MEDIAN.value,
|
|
71
|
+
},
|
|
72
|
+
],
|
|
73
|
+
mean=0.485,
|
|
74
|
+
std=0.229,
|
|
75
|
+
dataloader_params={
|
|
76
|
+
"num_workers": 4,
|
|
77
|
+
},
|
|
78
|
+
)
|
|
79
|
+
training_model = TrainingConfig(
|
|
80
|
+
num_epochs=30,
|
|
81
|
+
logger=SupportedLogger.WANDB.value,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
return Configuration(
|
|
85
|
+
experiment_name=experiment_name,
|
|
86
|
+
algorithm_config=algorithm_model,
|
|
87
|
+
data_config=data_model,
|
|
88
|
+
training_config=training_model,
|
|
89
|
+
)
|