careamics 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (92) hide show
  1. careamics/careamist.py +55 -61
  2. careamics/cli/conf.py +24 -9
  3. careamics/cli/main.py +8 -8
  4. careamics/cli/utils.py +2 -4
  5. careamics/config/__init__.py +8 -0
  6. careamics/config/algorithms/__init__.py +4 -0
  7. careamics/config/algorithms/hdn_algorithm_model.py +103 -0
  8. careamics/config/algorithms/microsplit_algorithm_model.py +103 -0
  9. careamics/config/algorithms/n2v_algorithm_model.py +1 -2
  10. careamics/config/algorithms/vae_algorithm_model.py +53 -18
  11. careamics/config/architectures/lvae_model.py +12 -8
  12. careamics/config/callback_model.py +15 -11
  13. careamics/config/configuration.py +9 -8
  14. careamics/config/configuration_factories.py +892 -78
  15. careamics/config/data/data_model.py +7 -14
  16. careamics/config/data/ng_data_model.py +8 -15
  17. careamics/config/data/patching_strategies/_overlapping_patched_model.py +4 -5
  18. careamics/config/inference_model.py +6 -11
  19. careamics/config/likelihood_model.py +4 -4
  20. careamics/config/loss_model.py +6 -2
  21. careamics/config/nm_model.py +30 -7
  22. careamics/config/optimizer_models.py +1 -2
  23. careamics/config/support/supported_algorithms.py +5 -3
  24. careamics/config/support/supported_losses.py +5 -2
  25. careamics/config/training_model.py +8 -38
  26. careamics/config/transformations/normalize_model.py +3 -4
  27. careamics/config/transformations/xy_flip_model.py +2 -2
  28. careamics/config/transformations/xy_random_rotate90_model.py +2 -2
  29. careamics/config/validators/validator_utils.py +1 -2
  30. careamics/dataset/dataset_utils/iterate_over_files.py +3 -3
  31. careamics/dataset/in_memory_dataset.py +2 -2
  32. careamics/dataset/iterable_dataset.py +1 -2
  33. careamics/dataset/patching/random_patching.py +6 -6
  34. careamics/dataset/patching/sequential_patching.py +4 -4
  35. careamics/dataset/tiling/lvae_tiled_patching.py +2 -2
  36. careamics/dataset_ng/dataset.py +3 -3
  37. careamics/dataset_ng/factory.py +19 -19
  38. careamics/dataset_ng/patch_extractor/demo_custom_image_stack_loader.py +4 -4
  39. careamics/dataset_ng/patch_extractor/image_stack/in_memory_image_stack.py +1 -2
  40. careamics/dataset_ng/patch_extractor/image_stack/zarr_image_stack.py +33 -7
  41. careamics/dataset_ng/patch_extractor/image_stack_loader.py +2 -2
  42. careamics/dataset_ng/patching_strategies/random_patching.py +2 -3
  43. careamics/dataset_ng/patching_strategies/sequential_patching.py +1 -2
  44. careamics/file_io/read/__init__.py +0 -1
  45. careamics/lightning/__init__.py +16 -2
  46. careamics/lightning/callbacks/__init__.py +2 -0
  47. careamics/lightning/callbacks/data_stats_callback.py +23 -0
  48. careamics/lightning/callbacks/prediction_writer_callback/prediction_writer_callback.py +5 -5
  49. careamics/lightning/callbacks/prediction_writer_callback/write_strategy.py +5 -5
  50. careamics/lightning/callbacks/prediction_writer_callback/write_strategy_factory.py +8 -8
  51. careamics/lightning/dataset_ng/data_module.py +43 -43
  52. careamics/lightning/lightning_module.py +166 -68
  53. careamics/lightning/microsplit_data_module.py +631 -0
  54. careamics/lightning/predict_data_module.py +16 -9
  55. careamics/lightning/train_data_module.py +29 -18
  56. careamics/losses/__init__.py +7 -1
  57. careamics/losses/loss_factory.py +9 -1
  58. careamics/losses/lvae/losses.py +94 -9
  59. careamics/lvae_training/dataset/__init__.py +8 -8
  60. careamics/lvae_training/dataset/config.py +56 -44
  61. careamics/lvae_training/dataset/lc_dataset.py +18 -12
  62. careamics/lvae_training/dataset/ms_dataset_ref.py +5 -5
  63. careamics/lvae_training/dataset/multich_dataset.py +24 -18
  64. careamics/lvae_training/dataset/multifile_dataset.py +6 -6
  65. careamics/model_io/bioimage/model_description.py +12 -11
  66. careamics/model_io/bmz_io.py +12 -8
  67. careamics/models/layers.py +5 -5
  68. careamics/models/lvae/likelihoods.py +30 -14
  69. careamics/models/lvae/lvae.py +2 -2
  70. careamics/models/lvae/noise_models.py +20 -14
  71. careamics/prediction_utils/__init__.py +8 -2
  72. careamics/prediction_utils/lvae_prediction.py +5 -5
  73. careamics/prediction_utils/prediction_outputs.py +48 -3
  74. careamics/prediction_utils/stitch_prediction.py +71 -0
  75. careamics/transforms/compose.py +9 -9
  76. careamics/transforms/n2v_manipulate.py +3 -3
  77. careamics/transforms/n2v_manipulate_torch.py +4 -4
  78. careamics/transforms/normalize.py +4 -6
  79. careamics/transforms/pixel_manipulation.py +6 -8
  80. careamics/transforms/pixel_manipulation_torch.py +5 -7
  81. careamics/transforms/xy_flip.py +3 -5
  82. careamics/transforms/xy_random_rotate90.py +4 -6
  83. careamics/utils/logging.py +8 -8
  84. careamics/utils/metrics.py +2 -2
  85. careamics/utils/plotting.py +1 -3
  86. {careamics-0.0.14.dist-info → careamics-0.0.16.dist-info}/METADATA +18 -16
  87. {careamics-0.0.14.dist-info → careamics-0.0.16.dist-info}/RECORD +90 -88
  88. careamics/dataset/zarr_dataset.py +0 -151
  89. careamics/file_io/read/zarr.py +0 -60
  90. {careamics-0.0.14.dist-info → careamics-0.0.16.dist-info}/WHEEL +0 -0
  91. {careamics-0.0.14.dist-info → careamics-0.0.16.dist-info}/entry_points.txt +0 -0
  92. {careamics-0.0.14.dist-info → careamics-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,103 @@
1
+ """MicroSplit algorithm configuration."""
2
+
3
+ from typing import Literal
4
+
5
+ from bioimageio.spec.generic.v0_3 import CiteEntry
6
+ from pydantic import ConfigDict
7
+
8
+ from careamics.config.algorithms.vae_algorithm_model import VAEBasedAlgorithm
9
+ from careamics.config.architectures import LVAEModel
10
+ from careamics.config.loss_model import LVAELossConfig
11
+
12
+ MICROSPLIT = "MicroSplit"
13
+
14
+ MICROSPLIT_DESCRIPTION = """MicroSplit is a self-supervised deep learning method for
15
+ microscopy image splitting that combines the strengths of both denoising and
16
+ representation learning approaches."""
17
+
18
+ MICROSPLIT_REF = CiteEntry(
19
+ text='Prakash, M., Delbracio, M., Milanfar, P., Jug, F. 2022. "Interpretable '
20
+ 'Unsupervised Diversity Denoising and Artefact Removal." The International '
21
+ "Conference on Learning Representations (ICLR).",
22
+ doi="10.1561/2200000056",
23
+ )
24
+
25
+
26
+ class MicroSplitAlgorithm(VAEBasedAlgorithm):
27
+ """MicroSplit algorithm configuration."""
28
+
29
+ model_config = ConfigDict(validate_assignment=True)
30
+
31
+ algorithm: Literal["microsplit"] = "microsplit"
32
+
33
+ loss: LVAELossConfig
34
+
35
+ model: LVAEModel # TODO add validators
36
+
37
+ is_supervised: bool = True
38
+
39
+ def get_algorithm_friendly_name(self) -> str:
40
+ """
41
+ Get the algorithm friendly name.
42
+
43
+ Returns
44
+ -------
45
+ str
46
+ Friendly name of the algorithm.
47
+ """
48
+ return MICROSPLIT
49
+
50
+ def get_algorithm_keywords(self) -> list[str]:
51
+ """
52
+ Get algorithm keywords.
53
+
54
+ Returns
55
+ -------
56
+ list[str]
57
+ List of keywords.
58
+ """
59
+ return [
60
+ "restoration",
61
+ "VAE",
62
+ "self-supervised",
63
+ "3D" if self.model.is_3D() else "2D",
64
+ "CAREamics",
65
+ "pytorch",
66
+ ]
67
+
68
+ def get_algorithm_references(self) -> str:
69
+ """
70
+ Get the algorithm references.
71
+
72
+ This is used to generate the README of the BioImage Model Zoo export.
73
+
74
+ Returns
75
+ -------
76
+ str
77
+ Algorithm references.
78
+ """
79
+ return MICROSPLIT_REF.text + " doi: " + MICROSPLIT_REF.doi
80
+
81
+ def get_algorithm_citations(self) -> list[CiteEntry]:
82
+ """
83
+ Return a list of citation entries of the current algorithm.
84
+
85
+ This is used to generate the model description for the BioImage Model Zoo.
86
+
87
+ Returns
88
+ -------
89
+ List[CiteEntry]
90
+ List of citation entries.
91
+ """
92
+ return [MICROSPLIT_REF]
93
+
94
+ def get_algorithm_description(self) -> str:
95
+ """
96
+ Get the algorithm description.
97
+
98
+ Returns
99
+ -------
100
+ str
101
+ Algorithm description.
102
+ """
103
+ return MICROSPLIT_DESCRIPTION
@@ -1,10 +1,9 @@
1
1
  """N2V Algorithm configuration."""
2
2
 
3
- from typing import Annotated, Literal
3
+ from typing import Annotated, Literal, Self
4
4
 
5
5
  from bioimageio.spec.generic.v0_3 import CiteEntry
6
6
  from pydantic import AfterValidator, ConfigDict, model_validator
7
- from typing_extensions import Self
8
7
 
9
8
  from careamics.config.architectures import UNetModel
10
9
  from careamics.config.support import SupportedPixelManipulation, SupportedStructAxis
@@ -3,10 +3,9 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from pprint import pformat
6
- from typing import Literal, Optional
6
+ from typing import Literal, Self
7
7
 
8
8
  from pydantic import BaseModel, ConfigDict, model_validator
9
- from typing_extensions import Self
10
9
 
11
10
  from careamics.config.architectures import LVAEModel
12
11
  from careamics.config.likelihood_model import (
@@ -40,14 +39,17 @@ class VAEBasedAlgorithm(BaseModel):
40
39
  # defined in SupportedAlgorithm
41
40
  # TODO: Use supported Enum classes for typing?
42
41
  # - values can still be passed as strings and they will be cast to Enum
43
- algorithm: Literal["musplit", "denoisplit"]
42
+ algorithm: Literal["hdn", "microsplit"]
44
43
 
45
44
  # NOTE: these are all configs (pydantic models)
46
45
  loss: LVAELossConfig
47
46
  model: LVAEModel
48
- noise_model: Optional[MultiChannelNMConfig] = None
49
- noise_model_likelihood: Optional[NMLikelihoodConfig] = None
50
- gaussian_likelihood: Optional[GaussianLikelihoodConfig] = None
47
+ noise_model: MultiChannelNMConfig | None = None
48
+ noise_model_likelihood: NMLikelihoodConfig | None = None
49
+ gaussian_likelihood: GaussianLikelihoodConfig | None = None # TODO change to str
50
+
51
+ mmse_count: int = 1
52
+ is_supervised: bool = False
51
53
 
52
54
  # Optional fields
53
55
  optimizer: OptimizerModel = OptimizerModel()
@@ -64,22 +66,26 @@ class VAEBasedAlgorithm(BaseModel):
64
66
  Self
65
67
  The validated model.
66
68
  """
67
- # musplit
68
- if self.algorithm == SupportedAlgorithm.MUSPLIT:
69
- if self.loss.loss_type != SupportedLoss.MUSPLIT:
69
+ # hdn
70
+ # TODO move to designated configurations
71
+ if self.algorithm == SupportedAlgorithm.HDN:
72
+ if self.loss.loss_type != SupportedLoss.HDN:
70
73
  raise ValueError(
71
- f"Algorithm {self.algorithm} only supports loss `musplit`."
74
+ f"Algorithm {self.algorithm} only supports loss `hdn`."
72
75
  )
73
-
74
- if self.algorithm == SupportedAlgorithm.DENOISPLIT:
76
+ if self.model.multiscale_count > 1:
77
+ raise ValueError("Algorithm `hdn` does not support multiscale models.")
78
+ # musplit
79
+ if self.algorithm == SupportedAlgorithm.MICROSPLIT:
75
80
  if self.loss.loss_type not in [
81
+ SupportedLoss.MUSPLIT,
76
82
  SupportedLoss.DENOISPLIT,
77
83
  SupportedLoss.DENOISPLIT_MUSPLIT,
78
- ]:
84
+ ]: # TODO Update losses configs, make loss just microsplit
79
85
  raise ValueError(
80
- f"Algorithm {self.algorithm} only supports loss `denoisplit` "
81
- "or `denoisplit_musplit."
82
- )
86
+ f"Algorithm {self.algorithm} only supports loss `microsplit`."
87
+ ) # TODO Update losses configs
88
+
83
89
  if (
84
90
  self.loss.loss_type == SupportedLoss.DENOISPLIT
85
91
  and self.model.predict_logvar is not None
@@ -88,8 +94,10 @@ class VAEBasedAlgorithm(BaseModel):
88
94
  "Algorithm `denoisplit` with loss `denoisplit` only supports "
89
95
  "`predict_logvar` as `None`."
90
96
  )
91
-
92
- if self.noise_model is None:
97
+ if (
98
+ self.loss.loss_type == SupportedLoss.DENOISPLIT
99
+ and self.noise_model is None
100
+ ):
93
101
  raise ValueError("Algorithm `denoisplit` requires a noise model.")
94
102
  # TODO: what if algorithm is not musplit or denoisplit
95
103
  return self
@@ -108,6 +116,12 @@ class VAEBasedAlgorithm(BaseModel):
108
116
  f"Number of output channels ({self.model.output_channels}) must match "
109
117
  f"the number of noise models ({len(self.noise_model.noise_models)})."
110
118
  )
119
+
120
+ if self.algorithm == SupportedAlgorithm.HDN:
121
+ assert self.model.output_channels == 1, (
122
+ f"Number of output channels ({self.model.output_channels}) must be 1 "
123
+ "for algorithm `hdn`."
124
+ )
111
125
  return self
112
126
 
113
127
  @model_validator(mode="after")
@@ -127,6 +141,16 @@ class VAEBasedAlgorithm(BaseModel):
127
141
  "Gaussian likelihood model `predict_logvar` "
128
142
  f"({self.gaussian_likelihood.predict_logvar}).",
129
143
  )
144
+ # if self.algorithm == SupportedAlgorithm.HDN:
145
+ # assert (
146
+ # self.model.predict_logvar is None
147
+ # ), "Model `predict_logvar` must be `None` for algorithm `hdn`."
148
+ # if self.gaussian_likelihood is not None:
149
+ # assert self.gaussian_likelihood.predict_logvar is None, (
150
+ # "Gaussian likelihood model `predict_logvar` must be `None` "
151
+ # "for algorithm `hdn`."
152
+ # )
153
+ # TODO check this
130
154
  return self
131
155
 
132
156
  def __str__(self) -> str:
@@ -138,3 +162,14 @@ class VAEBasedAlgorithm(BaseModel):
138
162
  Pretty string.
139
163
  """
140
164
  return pformat(self.model_dump())
165
+
166
+ @classmethod
167
+ def get_compatible_algorithms(cls) -> list[str]:
168
+ """Get the list of compatible algorithms.
169
+
170
+ Returns
171
+ -------
172
+ list of str
173
+ List of compatible algorithms.
174
+ """
175
+ return ["hdn", "microsplit"]
@@ -1,9 +1,8 @@
1
1
  """LVAE Pydantic model."""
2
2
 
3
- from typing import Literal
3
+ from typing import Literal, Self
4
4
 
5
5
  from pydantic import ConfigDict, Field, field_validator, model_validator
6
- from typing_extensions import Self
7
6
 
8
7
  from .architecture_model import ArchitectureModel
9
8
 
@@ -15,11 +14,9 @@ class LVAEModel(ArchitectureModel):
15
14
  model_config = ConfigDict(validate_assignment=True, validate_default=True)
16
15
 
17
16
  architecture: Literal["LVAE"]
18
- """Name of the architecture."""
19
-
20
- input_shape: list[int] = Field(default=[64, 64], validate_default=True)
21
- """Shape of the input patch (C, Z, Y, X) or (C, Y, X) if the data is 2D."""
22
17
 
18
+ input_shape: tuple[int, ...] = Field(default=(64, 64), validate_default=True)
19
+ """Shape of the input patch (Z, Y, X) or (Y, X) if the data is 2D."""
23
20
  encoder_conv_strides: list = Field(default=[2, 2], validate_default=True)
24
21
 
25
22
  # TODO make this per hierarchy step ?
@@ -42,7 +39,7 @@ class LVAEModel(ArchitectureModel):
42
39
  default="ELU",
43
40
  )
44
41
 
45
- predict_logvar: Literal[None, "pixelwise"] = None
42
+ predict_logvar: Literal[None, "pixelwise"] = "pixelwise"
46
43
  analytical_kl: bool = Field(default=False)
47
44
 
48
45
  @model_validator(mode="after")
@@ -126,6 +123,13 @@ class LVAEModel(ArchitectureModel):
126
123
  f"Input shape must be greater than 1 in all dimensions"
127
124
  f"(got {input_shape})."
128
125
  )
126
+
127
+ if any(s < 64 for s in input_shape[-2:]):
128
+ raise ValueError(
129
+ f"Input shape must be greater or equal to 64 in XY dimensions"
130
+ f"(got {input_shape})."
131
+ )
132
+
129
133
  return input_shape
130
134
 
131
135
  @field_validator("encoder_n_filters")
@@ -255,4 +259,4 @@ class LVAEModel(ArchitectureModel):
255
259
  bool
256
260
  Whether the model is 3D or not.
257
261
  """
258
- return self.conv_dims == 3
262
+ return len(self.input_shape) == 3
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from datetime import timedelta
6
- from typing import Literal, Optional
6
+ from typing import Literal
7
7
 
8
8
  from pydantic import (
9
9
  BaseModel,
@@ -24,7 +24,7 @@ class CheckpointModel(BaseModel):
24
24
 
25
25
  model_config = ConfigDict(validate_assignment=True, validate_default=True)
26
26
 
27
- monitor: Literal["val_loss"] = Field(default="val_loss")
27
+ monitor: Literal["val_loss"] | str | None = Field(default="val_loss")
28
28
  """Quantity to monitor, currently only `val_loss`."""
29
29
 
30
30
  verbose: bool = Field(default=False)
@@ -33,11 +33,15 @@ class CheckpointModel(BaseModel):
33
33
  save_weights_only: bool = Field(default=False)
34
34
  """When `True`, only the model's weights will be saved (model.save_weights)."""
35
35
 
36
- save_last: Optional[Literal[True, False, "link"]] = Field(default=True)
36
+ save_last: Literal[True, False, "link"] | None = Field(default=True)
37
37
  """When `True`, saves a last.ckpt copy whenever a checkpoint file gets saved."""
38
38
 
39
- save_top_k: int = Field(default=3, ge=-1, le=100)
40
- """If `save_top_k == kz, the best k models according to the quantity monitored
39
+ save_top_k: int = Field(
40
+ default=3,
41
+ ge=-1,
42
+ le=100,
43
+ )
44
+ """If `save_top_k == k, the best k models according to the quantity monitored
41
45
  will be saved. If `save_top_k == 0`, no models are saved. if `save_top_k == -1`,
42
46
  all models are saved."""
43
47
 
@@ -51,13 +55,13 @@ class CheckpointModel(BaseModel):
51
55
  auto_insert_metric_name: bool = Field(default=False)
52
56
  """When `True`, the checkpoints filenames will contain the metric name."""
53
57
 
54
- every_n_train_steps: Optional[int] = Field(default=None, ge=1, le=1000)
58
+ every_n_train_steps: int | None = Field(default=None, ge=1, le=1000)
55
59
  """Number of training steps between checkpoints."""
56
60
 
57
- train_time_interval: Optional[timedelta] = Field(default=None)
61
+ train_time_interval: timedelta | None = Field(default=None)
58
62
  """Checkpoints are monitored at the specified time interval."""
59
63
 
60
- every_n_epochs: Optional[int] = Field(default=None, ge=1, le=100)
64
+ every_n_epochs: int | None = Field(default=None, ge=1, le=100)
61
65
  """Number of epochs between checkpoints."""
62
66
 
63
67
 
@@ -96,14 +100,14 @@ class EarlyStoppingModel(BaseModel):
96
100
  """When `True`, stops training when the monitored quantity becomes `NaN` or
97
101
  `inf`."""
98
102
 
99
- stopping_threshold: Optional[float] = Field(default=None)
103
+ stopping_threshold: float | None = Field(default=None)
100
104
  """Stop training immediately once the monitored quantity reaches this threshold."""
101
105
 
102
- divergence_threshold: Optional[float] = Field(default=None)
106
+ divergence_threshold: float | None = Field(default=None)
103
107
  """Stop training as soon as the monitored quantity becomes worse than this
104
108
  threshold."""
105
109
 
106
- check_on_train_epoch_end: Optional[bool] = Field(default=False)
110
+ check_on_train_epoch_end: bool | None = Field(default=False)
107
111
  """Whether to run early stopping at the end of the training epoch. If this is
108
112
  `False`, then the check runs at the end of the validation."""
109
113
 
@@ -5,24 +5,28 @@ from __future__ import annotations
5
5
  import re
6
6
  from collections.abc import Callable
7
7
  from pprint import pformat
8
- from typing import Any, Literal, Union
8
+ from typing import Any, Literal, Self, Union
9
9
 
10
10
  import numpy as np
11
11
  from bioimageio.spec.generic.v0_3 import CiteEntry
12
12
  from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
13
13
  from pydantic.main import IncEx
14
- from typing_extensions import Self
15
14
 
16
15
  from careamics.config.algorithms import (
17
16
  CAREAlgorithm,
17
+ HDNAlgorithm,
18
+ MicroSplitAlgorithm,
18
19
  N2NAlgorithm,
19
20
  N2VAlgorithm,
20
21
  )
21
22
  from careamics.config.data import DataConfig
22
23
  from careamics.config.training_model import TrainingConfig
24
+ from careamics.lvae_training.dataset.config import MicroSplitDataConfig
23
25
 
24
26
  ALGORITHMS = Union[
25
27
  CAREAlgorithm,
28
+ HDNAlgorithm,
29
+ MicroSplitAlgorithm,
26
30
  N2NAlgorithm,
27
31
  N2VAlgorithm,
28
32
  ]
@@ -86,7 +90,6 @@ class Configuration(BaseModel):
86
90
  ... axes="YX",
87
91
  ... patch_size=[64, 64],
88
92
  ... batch_size=32,
89
- ... num_epochs=100
90
93
  ... )
91
94
 
92
95
  The configuration can be exported to a dictionary using the model_dump method:
@@ -110,9 +113,7 @@ class Configuration(BaseModel):
110
113
  ... "architecture": "UNet",
111
114
  ... },
112
115
  ... },
113
- ... "training_config": {
114
- ... "num_epochs": 200,
115
- ... },
116
+ ... "training_config": {},
116
117
  ... "data_config": {
117
118
  ... "data_type": "tiff",
118
119
  ... "patch_size": [64, 64],
@@ -140,7 +141,7 @@ class Configuration(BaseModel):
140
141
  """Algorithm configuration, holding all parameters required to configure the
141
142
  model."""
142
143
 
143
- data_config: DataConfig
144
+ data_config: DataConfig | MicroSplitDataConfig
144
145
  """Data configuration, holding all parameters required to configure the training
145
146
  data loader."""
146
147
 
@@ -185,7 +186,7 @@ class Configuration(BaseModel):
185
186
 
186
187
  return name
187
188
 
188
- @model_validator(mode="after")
189
+ @model_validator(mode="after") # TODO move to n2v configs or remove
189
190
  def validate_n2v_mask_pixel_perc(self: Self) -> Self:
190
191
  """
191
192
  Validate that there will always be at least one blind-spot pixel in every patch.