careamics 0.1.0rc2__py3-none-any.whl → 0.1.0rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (134) hide show
  1. careamics/__init__.py +16 -4
  2. careamics/callbacks/__init__.py +6 -0
  3. careamics/callbacks/hyperparameters_callback.py +42 -0
  4. careamics/callbacks/progress_bar_callback.py +57 -0
  5. careamics/careamist.py +761 -0
  6. careamics/config/__init__.py +31 -3
  7. careamics/config/algorithm_model.py +167 -0
  8. careamics/config/architectures/__init__.py +17 -0
  9. careamics/config/architectures/architecture_model.py +29 -0
  10. careamics/config/architectures/custom_model.py +150 -0
  11. careamics/config/architectures/register_model.py +101 -0
  12. careamics/config/architectures/unet_model.py +96 -0
  13. careamics/config/architectures/vae_model.py +39 -0
  14. careamics/config/callback_model.py +92 -0
  15. careamics/config/configuration_example.py +89 -0
  16. careamics/config/configuration_factory.py +597 -0
  17. careamics/config/configuration_model.py +597 -0
  18. careamics/config/data_model.py +555 -0
  19. careamics/config/inference_model.py +283 -0
  20. careamics/config/noise_models.py +162 -0
  21. careamics/config/optimizer_models.py +181 -0
  22. careamics/config/references/__init__.py +45 -0
  23. careamics/config/references/algorithm_descriptions.py +131 -0
  24. careamics/config/references/references.py +38 -0
  25. careamics/config/support/__init__.py +33 -0
  26. careamics/config/support/supported_activations.py +24 -0
  27. careamics/config/support/supported_algorithms.py +18 -0
  28. careamics/config/support/supported_architectures.py +18 -0
  29. careamics/config/support/supported_data.py +82 -0
  30. careamics/{dataset/extraction_strategy.py → config/support/supported_extraction_strategies.py} +5 -2
  31. careamics/config/support/supported_loggers.py +8 -0
  32. careamics/config/support/supported_losses.py +25 -0
  33. careamics/config/support/supported_optimizers.py +55 -0
  34. careamics/config/support/supported_pixel_manipulations.py +15 -0
  35. careamics/config/support/supported_struct_axis.py +19 -0
  36. careamics/config/support/supported_transforms.py +23 -0
  37. careamics/config/tile_information.py +104 -0
  38. careamics/config/training_model.py +65 -0
  39. careamics/config/transformations/__init__.py +14 -0
  40. careamics/config/transformations/n2v_manipulate_model.py +63 -0
  41. careamics/config/transformations/nd_flip_model.py +32 -0
  42. careamics/config/transformations/normalize_model.py +31 -0
  43. careamics/config/transformations/transform_model.py +44 -0
  44. careamics/config/transformations/xy_random_rotate90_model.py +29 -0
  45. careamics/config/validators/__init__.py +5 -0
  46. careamics/config/validators/validator_utils.py +100 -0
  47. careamics/conftest.py +26 -0
  48. careamics/dataset/__init__.py +5 -0
  49. careamics/dataset/dataset_utils/__init__.py +19 -0
  50. careamics/dataset/dataset_utils/dataset_utils.py +100 -0
  51. careamics/dataset/dataset_utils/file_utils.py +140 -0
  52. careamics/dataset/dataset_utils/read_tiff.py +61 -0
  53. careamics/dataset/dataset_utils/read_utils.py +25 -0
  54. careamics/dataset/dataset_utils/read_zarr.py +56 -0
  55. careamics/dataset/in_memory_dataset.py +323 -134
  56. careamics/dataset/iterable_dataset.py +416 -0
  57. careamics/dataset/patching/__init__.py +8 -0
  58. careamics/dataset/patching/patch_transform.py +44 -0
  59. careamics/dataset/patching/patching.py +212 -0
  60. careamics/dataset/patching/random_patching.py +190 -0
  61. careamics/dataset/patching/sequential_patching.py +206 -0
  62. careamics/dataset/patching/tiled_patching.py +158 -0
  63. careamics/dataset/patching/validate_patch_dimension.py +60 -0
  64. careamics/dataset/zarr_dataset.py +149 -0
  65. careamics/lightning_datamodule.py +743 -0
  66. careamics/lightning_module.py +292 -0
  67. careamics/lightning_prediction_datamodule.py +396 -0
  68. careamics/lightning_prediction_loop.py +116 -0
  69. careamics/losses/__init__.py +4 -1
  70. careamics/losses/loss_factory.py +24 -14
  71. careamics/losses/losses.py +65 -5
  72. careamics/losses/noise_model_factory.py +40 -0
  73. careamics/losses/noise_models.py +524 -0
  74. careamics/model_io/__init__.py +8 -0
  75. careamics/model_io/bioimage/__init__.py +11 -0
  76. careamics/model_io/bioimage/_readme_factory.py +120 -0
  77. careamics/model_io/bioimage/bioimage_utils.py +48 -0
  78. careamics/model_io/bioimage/model_description.py +318 -0
  79. careamics/model_io/bmz_io.py +231 -0
  80. careamics/model_io/model_io_utils.py +80 -0
  81. careamics/models/__init__.py +4 -1
  82. careamics/models/activation.py +35 -0
  83. careamics/models/layers.py +244 -0
  84. careamics/models/model_factory.py +21 -221
  85. careamics/models/unet.py +46 -20
  86. careamics/prediction/__init__.py +1 -3
  87. careamics/prediction/stitch_prediction.py +73 -0
  88. careamics/transforms/__init__.py +41 -0
  89. careamics/transforms/n2v_manipulate.py +113 -0
  90. careamics/transforms/nd_flip.py +93 -0
  91. careamics/transforms/normalize.py +109 -0
  92. careamics/transforms/pixel_manipulation.py +383 -0
  93. careamics/transforms/struct_mask_parameters.py +18 -0
  94. careamics/transforms/tta.py +74 -0
  95. careamics/transforms/xy_random_rotate90.py +95 -0
  96. careamics/utils/__init__.py +10 -12
  97. careamics/utils/base_enum.py +32 -0
  98. careamics/utils/context.py +22 -2
  99. careamics/utils/metrics.py +0 -46
  100. careamics/utils/path_utils.py +24 -0
  101. careamics/utils/ram.py +13 -0
  102. careamics/utils/receptive_field.py +102 -0
  103. careamics/utils/running_stats.py +43 -0
  104. careamics/utils/torch_utils.py +112 -75
  105. careamics-0.1.0rc4.dist-info/METADATA +122 -0
  106. careamics-0.1.0rc4.dist-info/RECORD +110 -0
  107. {careamics-0.1.0rc2.dist-info → careamics-0.1.0rc4.dist-info}/WHEEL +1 -1
  108. careamics/bioimage/__init__.py +0 -15
  109. careamics/bioimage/docs/Noise2Void.md +0 -5
  110. careamics/bioimage/docs/__init__.py +0 -1
  111. careamics/bioimage/io.py +0 -182
  112. careamics/bioimage/rdf.py +0 -105
  113. careamics/config/algorithm.py +0 -231
  114. careamics/config/config.py +0 -297
  115. careamics/config/config_filter.py +0 -44
  116. careamics/config/data.py +0 -194
  117. careamics/config/torch_optim.py +0 -118
  118. careamics/config/training.py +0 -534
  119. careamics/dataset/dataset_utils.py +0 -111
  120. careamics/dataset/patching.py +0 -492
  121. careamics/dataset/prepare_dataset.py +0 -175
  122. careamics/dataset/tiff_dataset.py +0 -212
  123. careamics/engine.py +0 -1014
  124. careamics/manipulation/__init__.py +0 -4
  125. careamics/manipulation/pixel_manipulation.py +0 -158
  126. careamics/prediction/prediction_utils.py +0 -106
  127. careamics/utils/ascii_logo.txt +0 -9
  128. careamics/utils/augment.py +0 -65
  129. careamics/utils/normalization.py +0 -55
  130. careamics/utils/validators.py +0 -170
  131. careamics/utils/wandb.py +0 -121
  132. careamics-0.1.0rc2.dist-info/METADATA +0 -81
  133. careamics-0.1.0rc2.dist-info/RECORD +0 -47
  134. {careamics-0.1.0rc2.dist-info → careamics-0.1.0rc4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,92 @@
1
+ """Checkpoint saving configuration."""
2
+ from __future__ import annotations
3
+
4
+ from datetime import timedelta
5
+ from typing import Literal, Optional
6
+
7
+ from pydantic import (
8
+ BaseModel,
9
+ ConfigDict,
10
+ Field,
11
+ )
12
+
13
+
14
+ class CheckpointModel(BaseModel):
15
+ """_summary_.
16
+
17
+ Parameters
18
+ ----------
19
+ BaseModel : _type_
20
+ _description_
21
+ """
22
+
23
+ model_config = ConfigDict(
24
+ validate_assignment=True,
25
+ )
26
+
27
+ monitor: Literal["val_loss"] = Field(default="val_loss", validate_default=True)
28
+ verbose: bool = Field(default=False, validate_default=True)
29
+ save_weights_only: bool = Field(default=False, validate_default=True)
30
+ mode: Literal["min", "max"] = Field(default="min", validate_default=True)
31
+ auto_insert_metric_name: bool = Field(default=False, validate_default=True)
32
+ every_n_train_steps: Optional[int] = Field(
33
+ default=None, ge=1, le=10, validate_default=True
34
+ )
35
+ train_time_interval: Optional[timedelta] = Field(
36
+ default=None, validate_default=True
37
+ )
38
+ every_n_epochs: Optional[int] = Field(
39
+ default=None, ge=1, le=10, validate_default=True
40
+ )
41
+ save_last: Optional[Literal[True, False, "link"]] = Field(
42
+ default=True, validate_default=True
43
+ )
44
+ save_top_k: int = Field(default=3, ge=1, le=10, validate_default=True)
45
+
46
+
47
+ class EarlyStoppingModel(BaseModel):
48
+ """_summary_.
49
+
50
+ Parameters
51
+ ----------
52
+ BaseModel : _type_
53
+ _description_
54
+ """
55
+
56
+ model_config = ConfigDict(
57
+ validate_assignment=True,
58
+ )
59
+
60
+ monitor: Literal["val_loss"] = Field(default="val_loss", validate_default=True)
61
+ patience: int = Field(default=3, ge=1, le=10, validate_default=True)
62
+ mode: Literal["min", "max", "auto"] = Field(default="min", validate_default=True)
63
+ min_delta: float = Field(default=0.0, ge=0.0, le=1.0, validate_default=True)
64
+ check_finite: bool = Field(default=True, validate_default=True)
65
+ stop_on_nan: bool = Field(default=True, validate_default=True)
66
+ verbose: bool = Field(default=False, validate_default=True)
67
+ restore_best_weights: bool = Field(default=True, validate_default=True)
68
+ auto_lr_find: bool = Field(default=False, validate_default=True)
69
+ auto_lr_find_patience: int = Field(default=3, ge=1, le=10, validate_default=True)
70
+ auto_lr_find_mode: Literal["min", "max", "auto"] = Field(
71
+ default="min", validate_default=True
72
+ )
73
+ auto_lr_find_direction: Literal["forward", "backward"] = Field(
74
+ default="backward", validate_default=True
75
+ )
76
+ auto_lr_find_max_lr: float = Field(
77
+ default=10.0, ge=0.0, le=1e6, validate_default=True
78
+ )
79
+ auto_lr_find_min_lr: float = Field(
80
+ default=1e-8, ge=0.0, le=1e6, validate_default=True
81
+ )
82
+ auto_lr_find_num_training: int = Field(
83
+ default=100, ge=1, le=1e6, validate_default=True
84
+ )
85
+ auto_lr_find_divergence_threshold: float = Field(
86
+ default=5.0, ge=0.0, le=1e6, validate_default=True
87
+ )
88
+ auto_lr_find_accumulate_grad_batches: int = Field(
89
+ default=1, ge=1, le=1e6, validate_default=True
90
+ )
91
+ auto_lr_find_stop_divergence: bool = Field(default=True, validate_default=True)
92
+ auto_lr_find_step_scale: float = Field(default=0.1, ge=0.0, le=10)
@@ -0,0 +1,89 @@
1
+ from .algorithm_model import AlgorithmConfig
2
+ from .architectures import UNetModel
3
+ from .configuration_model import Configuration
4
+ from .data_model import DataConfig
5
+ from .optimizer_models import LrSchedulerModel, OptimizerModel
6
+ from .support import (
7
+ SupportedActivation,
8
+ SupportedAlgorithm,
9
+ SupportedArchitecture,
10
+ SupportedData,
11
+ SupportedLogger,
12
+ SupportedLoss,
13
+ SupportedOptimizer,
14
+ SupportedPixelManipulation,
15
+ SupportedScheduler,
16
+ SupportedTransform,
17
+ )
18
+ from .training_model import TrainingConfig
19
+
20
+
21
+ def full_configuration_example() -> Configuration:
22
+ """Returns a dictionnary representing a full configuration example.
23
+
24
+ Returns
25
+ -------
26
+ Configuration
27
+ Full configuration example.
28
+ """
29
+ experiment_name = "Full example"
30
+ algorithm_model = AlgorithmConfig(
31
+ algorithm=SupportedAlgorithm.N2V.value,
32
+ loss=SupportedLoss.N2V.value,
33
+ model=UNetModel(
34
+ architecture=SupportedArchitecture.UNET.value,
35
+ in_channels=1,
36
+ num_classes=1,
37
+ depth=2,
38
+ num_channels_init=32,
39
+ final_activation=SupportedActivation.NONE.value,
40
+ n2v2=True,
41
+ ),
42
+ optimizer=OptimizerModel(
43
+ name=SupportedOptimizer.ADAM.value, parameters={"lr": 0.0001}
44
+ ),
45
+ lr_scheduler=LrSchedulerModel(
46
+ name=SupportedScheduler.REDUCE_LR_ON_PLATEAU.value,
47
+ ),
48
+ )
49
+ data_model = DataConfig(
50
+ data_type=SupportedData.ARRAY.value,
51
+ patch_size=(256, 256),
52
+ batch_size=8,
53
+ axes="YX",
54
+ transforms=[
55
+ {
56
+ "name": SupportedTransform.NORMALIZE.value,
57
+ },
58
+ {
59
+ "name": SupportedTransform.NDFLIP.value,
60
+ "is_3D": False,
61
+ },
62
+ {
63
+ "name": SupportedTransform.XY_RANDOM_ROTATE90.value,
64
+ "is_3D": False,
65
+ },
66
+ {
67
+ "name": SupportedTransform.N2V_MANIPULATE.value,
68
+ "roi_size": 11,
69
+ "masked_pixel_percentage": 0.2,
70
+ "strategy": SupportedPixelManipulation.MEDIAN.value,
71
+ },
72
+ ],
73
+ mean=0.485,
74
+ std=0.229,
75
+ dataloader_params={
76
+ "num_workers": 4,
77
+ },
78
+ )
79
+ training_model = TrainingConfig(
80
+ num_epochs=30,
81
+ logger=SupportedLogger.WANDB.value,
82
+ )
83
+
84
+ return Configuration(
85
+ experiment_name=experiment_name,
86
+ algorithm_config=algorithm_model,
87
+ data_config=data_model,
88
+ training_config=training_model,
89
+ )