careamics 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (79) hide show
  1. careamics/careamist.py +11 -14
  2. careamics/cli/conf.py +18 -3
  3. careamics/config/__init__.py +8 -0
  4. careamics/config/algorithms/__init__.py +4 -0
  5. careamics/config/algorithms/hdn_algorithm_model.py +103 -0
  6. careamics/config/algorithms/microsplit_algorithm_model.py +103 -0
  7. careamics/config/algorithms/n2v_algorithm_model.py +1 -2
  8. careamics/config/algorithms/vae_algorithm_model.py +51 -16
  9. careamics/config/architectures/lvae_model.py +12 -8
  10. careamics/config/callback_model.py +7 -3
  11. careamics/config/configuration.py +15 -63
  12. careamics/config/configuration_factories.py +853 -29
  13. careamics/config/data/data_model.py +50 -11
  14. careamics/config/data/ng_data_model.py +168 -4
  15. careamics/config/data/patch_filter/__init__.py +15 -0
  16. careamics/config/data/patch_filter/filter_model.py +16 -0
  17. careamics/config/data/patch_filter/mask_filter_model.py +17 -0
  18. careamics/config/data/patch_filter/max_filter_model.py +15 -0
  19. careamics/config/data/patch_filter/meanstd_filter_model.py +18 -0
  20. careamics/config/data/patch_filter/shannon_filter_model.py +15 -0
  21. careamics/config/inference_model.py +1 -2
  22. careamics/config/likelihood_model.py +2 -2
  23. careamics/config/loss_model.py +6 -2
  24. careamics/config/nm_model.py +26 -1
  25. careamics/config/optimizer_models.py +1 -2
  26. careamics/config/support/supported_algorithms.py +5 -3
  27. careamics/config/support/supported_filters.py +17 -0
  28. careamics/config/support/supported_losses.py +5 -2
  29. careamics/config/training_model.py +6 -36
  30. careamics/config/transformations/normalize_model.py +1 -2
  31. careamics/dataset_ng/dataset.py +57 -5
  32. careamics/dataset_ng/factory.py +101 -18
  33. careamics/dataset_ng/patch_extractor/demo_custom_image_stack_loader.py +4 -4
  34. careamics/dataset_ng/patch_extractor/image_stack/in_memory_image_stack.py +1 -2
  35. careamics/dataset_ng/patch_extractor/image_stack/zarr_image_stack.py +33 -7
  36. careamics/dataset_ng/patch_extractor/image_stack_loader.py +2 -2
  37. careamics/dataset_ng/patch_filter/__init__.py +20 -0
  38. careamics/dataset_ng/patch_filter/coordinate_filter_protocol.py +27 -0
  39. careamics/dataset_ng/patch_filter/filter_factory.py +94 -0
  40. careamics/dataset_ng/patch_filter/mask_filter.py +95 -0
  41. careamics/dataset_ng/patch_filter/max_filter.py +188 -0
  42. careamics/dataset_ng/patch_filter/mean_std_filter.py +218 -0
  43. careamics/dataset_ng/patch_filter/patch_filter_protocol.py +50 -0
  44. careamics/dataset_ng/patch_filter/shannon_filter.py +188 -0
  45. careamics/file_io/read/__init__.py +0 -1
  46. careamics/lightning/__init__.py +16 -2
  47. careamics/lightning/callbacks/__init__.py +2 -0
  48. careamics/lightning/callbacks/data_stats_callback.py +33 -0
  49. careamics/lightning/dataset_ng/data_module.py +79 -2
  50. careamics/lightning/lightning_module.py +162 -61
  51. careamics/lightning/microsplit_data_module.py +636 -0
  52. careamics/lightning/predict_data_module.py +8 -1
  53. careamics/lightning/train_data_module.py +19 -8
  54. careamics/losses/__init__.py +7 -1
  55. careamics/losses/loss_factory.py +9 -1
  56. careamics/losses/lvae/losses.py +85 -0
  57. careamics/lvae_training/dataset/__init__.py +8 -8
  58. careamics/lvae_training/dataset/config.py +56 -44
  59. careamics/lvae_training/dataset/lc_dataset.py +18 -12
  60. careamics/lvae_training/dataset/ms_dataset_ref.py +5 -5
  61. careamics/lvae_training/dataset/multich_dataset.py +24 -18
  62. careamics/lvae_training/dataset/multifile_dataset.py +6 -6
  63. careamics/lvae_training/eval_utils.py +46 -24
  64. careamics/model_io/bmz_io.py +9 -5
  65. careamics/models/lvae/likelihoods.py +31 -14
  66. careamics/models/lvae/lvae.py +2 -2
  67. careamics/models/lvae/noise_models.py +20 -14
  68. careamics/prediction_utils/__init__.py +8 -2
  69. careamics/prediction_utils/prediction_outputs.py +49 -3
  70. careamics/prediction_utils/stitch_prediction.py +83 -1
  71. careamics/transforms/xy_random_rotate90.py +1 -1
  72. careamics/utils/version.py +4 -4
  73. {careamics-0.0.15.dist-info → careamics-0.0.17.dist-info}/METADATA +19 -22
  74. {careamics-0.0.15.dist-info → careamics-0.0.17.dist-info}/RECORD +77 -60
  75. careamics/dataset/zarr_dataset.py +0 -151
  76. careamics/file_io/read/zarr.py +0 -60
  77. {careamics-0.0.15.dist-info → careamics-0.0.17.dist-info}/WHEEL +0 -0
  78. {careamics-0.0.15.dist-info → careamics-0.0.17.dist-info}/entry_points.txt +0 -0
  79. {careamics-0.0.15.dist-info → careamics-0.0.17.dist-info}/licenses/LICENSE +0 -0
careamics/careamist.py CHANGED
@@ -41,6 +41,7 @@ logger = get_logger(__name__)
41
41
  LOGGER_TYPES = list[Union[TensorBoardLogger, WandbLogger, CSVLogger]]
42
42
 
43
43
 
44
+ # TODO type ignore have been added because of the czi data type in data configuration
44
45
  class CAREamist:
45
46
  """Main CAREamics class, allowing training and prediction using various algorithms.
46
47
 
@@ -208,17 +209,11 @@ class CAREamist:
208
209
 
209
210
  # instantiate trainer
210
211
  self.trainer = Trainer(
211
- max_epochs=self.cfg.training_config.num_epochs,
212
- precision=self.cfg.training_config.precision,
213
- max_steps=self.cfg.training_config.max_steps,
214
- check_val_every_n_epoch=self.cfg.training_config.check_val_every_n_epoch,
215
212
  enable_progress_bar=enable_progress_bar,
216
- accumulate_grad_batches=self.cfg.training_config.accumulate_grad_batches,
217
- gradient_clip_val=self.cfg.training_config.gradient_clip_val,
218
- gradient_clip_algorithm=self.cfg.training_config.gradient_clip_algorithm,
219
213
  callbacks=self.callbacks,
220
214
  default_root_dir=self.work_dir,
221
215
  logger=experiment_logger,
216
+ **self.cfg.training_config.lightning_trainer_config or {},
222
217
  )
223
218
 
224
219
  # place holder for the datamodules
@@ -264,7 +259,7 @@ class CAREamist:
264
259
  HyperParametersCallback(self.cfg),
265
260
  ModelCheckpoint(
266
261
  dirpath=self.work_dir / Path("checkpoints"),
267
- filename=self.cfg.experiment_name,
262
+ filename=f"{self.cfg.experiment_name}_{{epoch:02d}}_step_{{step}}",
268
263
  **self.cfg.training_config.checkpoint_callback.model_dump(),
269
264
  ),
270
265
  ]
@@ -680,7 +675,7 @@ class CAREamist:
680
675
  # create the prediction
681
676
  self.pred_datamodule = create_predict_datamodule(
682
677
  pred_data=source,
683
- data_type=data_type or self.cfg.data_config.data_type,
678
+ data_type=data_type or self.cfg.data_config.data_type, # type: ignore
684
679
  axes=axes or self.cfg.data_config.axes,
685
680
  image_means=self.cfg.data_config.image_means,
686
681
  image_stds=self.cfg.data_config.image_stds,
@@ -826,11 +821,13 @@ class CAREamist:
826
821
  source_data_type: Literal["array", "tiff", "custom"]
827
822
  if isinstance(source, PredictDataModule):
828
823
  source_path = source.pred_data
829
- source_data_type = source.data_type
824
+ source_data_type = source.data_type # type: ignore
830
825
  extension_filter = source.extension_filter
831
- elif isinstance(source, str | Path):
826
+ elif isinstance(source, (str | Path)):
832
827
  source_path = source
833
- source_data_type = data_type or self.cfg.data_config.data_type
828
+ source_data_type = (
829
+ data_type or self.cfg.data_config.data_type # type: ignore
830
+ )
834
831
  extension_filter = SupportedData.get_extension_pattern(
835
832
  SupportedData(source_data_type)
836
833
  )
@@ -841,7 +838,7 @@ class CAREamist:
841
838
  raise ValueError(
842
839
  "Predicting to disk is not supported for input type 'array'."
843
840
  )
844
- assert isinstance(source_path, str | Path) # because data_type != "array"
841
+ assert isinstance(source_path, (Path | str)) # because data_type != "array"
845
842
  source_path = Path(source_path)
846
843
 
847
844
  file_paths = list_files(source_path, source_data_type, extension_filter)
@@ -879,7 +876,7 @@ class CAREamist:
879
876
 
880
877
  def export_to_bmz(
881
878
  self,
882
- path_to_archive: Union[Path, str],
879
+ path_to_archive: Union[Path | str],
883
880
  friendly_model_name: str,
884
881
  input_array: NDArray,
885
882
  authors: list[dict],
careamics/cli/conf.py CHANGED
@@ -116,7 +116,11 @@ def care( # numpydoc ignore=PR01
116
116
  ),
117
117
  ],
118
118
  batch_size: Annotated[int, typer.Option(help="Batch size.")],
119
- num_epochs: Annotated[int, typer.Option(help="Number of epochs.")],
119
+ num_epochs: Annotated[int, typer.Option(help="Number of epochs.")] = 100,
120
+ num_steps: Annotated[
121
+ int | None,
122
+ typer.Option(help="Number of batches per epoch (limit_train_batches)."),
123
+ ] = None,
120
124
  data_type: Annotated[
121
125
  click.Choice,
122
126
  typer.Option(click_type=click.Choice(["tiff"]), help="Type of the data."),
@@ -175,6 +179,7 @@ def care( # numpydoc ignore=PR01
175
179
  patch_size=patch_size,
176
180
  batch_size=batch_size,
177
181
  num_epochs=num_epochs,
182
+ num_steps=num_steps,
178
183
  # TODO: fix choosing augmentations
179
184
  augmentations=None if use_augmentations else [],
180
185
  independent_channels=independent_channels,
@@ -203,7 +208,11 @@ def n2n( # numpydoc ignore=PR01
203
208
  ),
204
209
  ],
205
210
  batch_size: Annotated[int, typer.Option(help="Batch size.")],
206
- num_epochs: Annotated[int, typer.Option(help="Number of epochs.")],
211
+ num_epochs: Annotated[int, typer.Option(help="Number of epochs.")] = 100,
212
+ num_steps: Annotated[
213
+ int | None,
214
+ typer.Option(help="Number of batches per epoch (limit_train_batches)."),
215
+ ] = None,
207
216
  data_type: Annotated[
208
217
  click.Choice,
209
218
  typer.Option(click_type=click.Choice(["tiff"]), help="Type of the data."),
@@ -259,6 +268,7 @@ def n2n( # numpydoc ignore=PR01
259
268
  patch_size=patch_size,
260
269
  batch_size=batch_size,
261
270
  num_epochs=num_epochs,
271
+ num_steps=num_steps,
262
272
  # TODO: fix choosing augmentations
263
273
  augmentations=None if use_augmentations else [],
264
274
  independent_channels=independent_channels,
@@ -287,7 +297,11 @@ def n2v( # numpydoc ignore=PR01
287
297
  ),
288
298
  ],
289
299
  batch_size: Annotated[int, typer.Option(help="Batch size.")],
290
- num_epochs: Annotated[int, typer.Option(help="Number of epochs.")],
300
+ num_epochs: Annotated[int, typer.Option(help="Number of epochs.")] = 100,
301
+ num_steps: Annotated[
302
+ int | None,
303
+ typer.Option(help="Number of batches per epoch (limit_train_batches)."),
304
+ ] = None,
291
305
  data_type: Annotated[
292
306
  click.Choice,
293
307
  typer.Option(click_type=click.Choice(["tiff"]), help="Type of the data."),
@@ -364,6 +378,7 @@ def n2v( # numpydoc ignore=PR01
364
378
  patch_size=patch_size,
365
379
  batch_size=batch_size,
366
380
  num_epochs=num_epochs,
381
+ num_steps=num_steps,
367
382
  # TODO: fix choosing augmentations
368
383
  augmentations=None if use_augmentations else [],
369
384
  independent_channels=independent_channels,
@@ -12,8 +12,10 @@ __all__ = [
12
12
  "Configuration",
13
13
  "DataConfig",
14
14
  "GaussianMixtureNMConfig",
15
+ "HDNAlgorithm",
15
16
  "InferenceConfig",
16
17
  "LVAELossConfig",
18
+ "MicroSplitAlgorithm",
17
19
  "MultiChannelNMConfig",
18
20
  "N2NAlgorithm",
19
21
  "N2VAlgorithm",
@@ -22,6 +24,8 @@ __all__ = [
22
24
  "VAEBasedAlgorithm",
23
25
  "algorithm_factory",
24
26
  "create_care_configuration",
27
+ "create_hdn_configuration",
28
+ "create_microsplit_configuration",
25
29
  "create_n2n_configuration",
26
30
  "create_n2v_configuration",
27
31
  "load_configuration",
@@ -30,6 +34,8 @@ __all__ = [
30
34
 
31
35
  from .algorithms import (
32
36
  CAREAlgorithm,
37
+ HDNAlgorithm,
38
+ MicroSplitAlgorithm,
33
39
  N2NAlgorithm,
34
40
  N2VAlgorithm,
35
41
  UNetBasedAlgorithm,
@@ -40,6 +46,8 @@ from .configuration import Configuration
40
46
  from .configuration_factories import (
41
47
  algorithm_factory,
42
48
  create_care_configuration,
49
+ create_hdn_configuration,
50
+ create_microsplit_configuration,
43
51
  create_n2n_configuration,
44
52
  create_n2v_configuration,
45
53
  )
@@ -2,6 +2,8 @@
2
2
 
3
3
  __all__ = [
4
4
  "CAREAlgorithm",
5
+ "HDNAlgorithm",
6
+ "MicroSplitAlgorithm",
5
7
  "N2NAlgorithm",
6
8
  "N2VAlgorithm",
7
9
  "UNetBasedAlgorithm",
@@ -9,6 +11,8 @@ __all__ = [
9
11
  ]
10
12
 
11
13
  from .care_algorithm_model import CAREAlgorithm
14
+ from .hdn_algorithm_model import HDNAlgorithm
15
+ from .microsplit_algorithm_model import MicroSplitAlgorithm
12
16
  from .n2n_algorithm_model import N2NAlgorithm
13
17
  from .n2v_algorithm_model import N2VAlgorithm
14
18
  from .unet_algorithm_model import UNetBasedAlgorithm
@@ -0,0 +1,103 @@
1
+ """HDN algorithm configuration."""
2
+
3
+ from typing import Literal
4
+
5
+ from bioimageio.spec.generic.v0_3 import CiteEntry
6
+ from pydantic import ConfigDict
7
+
8
+ from careamics.config.algorithms.vae_algorithm_model import VAEBasedAlgorithm
9
+ from careamics.config.architectures import LVAEModel
10
+ from careamics.config.loss_model import LVAELossConfig
11
+
12
+ HDN = "Hierarchical DivNoising"
13
+
14
+ HDN_DESCRIPTION = (
15
+ "HDN leverages a hierarchical VAE to perform image "
16
+ "restoration. It is designed to be interpretable and unsupervised, "
17
+ "making it suitable for a wide range of microscopy images."
18
+ )
19
+ HDN_REF = CiteEntry(
20
+ text='Prakash, M., Delbracio, M., Milanfar, P., Jug, F. 2022. "Interpretable '
21
+ 'Unsupervised Diversity Denoising and Artefact Removal." The International '
22
+ "Conference on Learning Representations (ICLR).",
23
+ doi="10.1561/2200000056",
24
+ )
25
+
26
+
27
+ class HDNAlgorithm(VAEBasedAlgorithm):
28
+ """HDN algorithm configuration."""
29
+
30
+ model_config = ConfigDict(validate_assignment=True)
31
+
32
+ algorithm: Literal["hdn"] = "hdn"
33
+
34
+ loss: LVAELossConfig
35
+
36
+ model: LVAEModel # TODO add validators
37
+
38
+ is_supervised: bool = False
39
+
40
+ def get_algorithm_friendly_name(self) -> str:
41
+ """
42
+ Get the algorithm friendly name.
43
+
44
+ Returns
45
+ -------
46
+ str
47
+ Friendly name of the algorithm.
48
+ """
49
+ return HDN
50
+
51
+ def get_algorithm_keywords(self) -> list[str]:
52
+ """
53
+ Get algorithm keywords.
54
+
55
+ Returns
56
+ -------
57
+ list[str]
58
+ List of keywords.
59
+ """
60
+ return [
61
+ "restoration",
62
+ "VAE",
63
+ "3D" if self.model.is_3D() else "2D",
64
+ "CAREamics",
65
+ "pytorch",
66
+ ]
67
+
68
+ def get_algorithm_references(self) -> str:
69
+ """
70
+ Get the algorithm references.
71
+
72
+ This is used to generate the README of the BioImage Model Zoo export.
73
+
74
+ Returns
75
+ -------
76
+ str
77
+ Algorithm references.
78
+ """
79
+ return HDN_REF.text + " doi: " + HDN_REF.doi
80
+
81
+ def get_algorithm_citations(self) -> list[CiteEntry]:
82
+ """
83
+ Return a list of citation entries of the current algorithm.
84
+
85
+ This is used to generate the model description for the BioImage Model Zoo.
86
+
87
+ Returns
88
+ -------
89
+ List[CiteEntry]
90
+ List of citation entries.
91
+ """
92
+ return [HDN_REF]
93
+
94
+ def get_algorithm_description(self) -> str:
95
+ """
96
+ Get the algorithm description.
97
+
98
+ Returns
99
+ -------
100
+ str
101
+ Algorithm description.
102
+ """
103
+ return HDN_DESCRIPTION
@@ -0,0 +1,103 @@
1
+ """MicroSplit algorithm configuration."""
2
+
3
+ from typing import Literal
4
+
5
+ from bioimageio.spec.generic.v0_3 import CiteEntry
6
+ from pydantic import ConfigDict
7
+
8
+ from careamics.config.algorithms.vae_algorithm_model import VAEBasedAlgorithm
9
+ from careamics.config.architectures import LVAEModel
10
+ from careamics.config.loss_model import LVAELossConfig
11
+
12
+ MICROSPLIT = "MicroSplit"
13
+
14
+ MICROSPLIT_DESCRIPTION = """MicroSplit is a self-supervised deep learning method for
15
+ microscopy image splitting that combines the strengths of both denoising and
16
+ representation learning approaches."""
17
+
18
+ MICROSPLIT_REF = CiteEntry(
19
+ text='Prakash, M., Delbracio, M., Milanfar, P., Jug, F. 2022. "Interpretable '
20
+ 'Unsupervised Diversity Denoising and Artefact Removal." The International '
21
+ "Conference on Learning Representations (ICLR).",
22
+ doi="10.1561/2200000056",
23
+ )
24
+
25
+
26
+ class MicroSplitAlgorithm(VAEBasedAlgorithm):
27
+ """MicroSplit algorithm configuration."""
28
+
29
+ model_config = ConfigDict(validate_assignment=True)
30
+
31
+ algorithm: Literal["microsplit"] = "microsplit"
32
+
33
+ loss: LVAELossConfig
34
+
35
+ model: LVAEModel # TODO add validators
36
+
37
+ is_supervised: bool = True
38
+
39
+ def get_algorithm_friendly_name(self) -> str:
40
+ """
41
+ Get the algorithm friendly name.
42
+
43
+ Returns
44
+ -------
45
+ str
46
+ Friendly name of the algorithm.
47
+ """
48
+ return MICROSPLIT
49
+
50
+ def get_algorithm_keywords(self) -> list[str]:
51
+ """
52
+ Get algorithm keywords.
53
+
54
+ Returns
55
+ -------
56
+ list[str]
57
+ List of keywords.
58
+ """
59
+ return [
60
+ "restoration",
61
+ "VAE",
62
+ "self-supervised",
63
+ "3D" if self.model.is_3D() else "2D",
64
+ "CAREamics",
65
+ "pytorch",
66
+ ]
67
+
68
+ def get_algorithm_references(self) -> str:
69
+ """
70
+ Get the algorithm references.
71
+
72
+ This is used to generate the README of the BioImage Model Zoo export.
73
+
74
+ Returns
75
+ -------
76
+ str
77
+ Algorithm references.
78
+ """
79
+ return MICROSPLIT_REF.text + " doi: " + MICROSPLIT_REF.doi
80
+
81
+ def get_algorithm_citations(self) -> list[CiteEntry]:
82
+ """
83
+ Return a list of citation entries of the current algorithm.
84
+
85
+ This is used to generate the model description for the BioImage Model Zoo.
86
+
87
+ Returns
88
+ -------
89
+ List[CiteEntry]
90
+ List of citation entries.
91
+ """
92
+ return [MICROSPLIT_REF]
93
+
94
+ def get_algorithm_description(self) -> str:
95
+ """
96
+ Get the algorithm description.
97
+
98
+ Returns
99
+ -------
100
+ str
101
+ Algorithm description.
102
+ """
103
+ return MICROSPLIT_DESCRIPTION
@@ -1,10 +1,9 @@
1
1
  """N2V Algorithm configuration."""
2
2
 
3
- from typing import Annotated, Literal
3
+ from typing import Annotated, Literal, Self
4
4
 
5
5
  from bioimageio.spec.generic.v0_3 import CiteEntry
6
6
  from pydantic import AfterValidator, ConfigDict, model_validator
7
- from typing_extensions import Self
8
7
 
9
8
  from careamics.config.architectures import UNetModel
10
9
  from careamics.config.support import SupportedPixelManipulation, SupportedStructAxis
@@ -3,10 +3,9 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from pprint import pformat
6
- from typing import Literal
6
+ from typing import Literal, Self
7
7
 
8
8
  from pydantic import BaseModel, ConfigDict, model_validator
9
- from typing_extensions import Self
10
9
 
11
10
  from careamics.config.architectures import LVAEModel
12
11
  from careamics.config.likelihood_model import (
@@ -40,14 +39,17 @@ class VAEBasedAlgorithm(BaseModel):
40
39
  # defined in SupportedAlgorithm
41
40
  # TODO: Use supported Enum classes for typing?
42
41
  # - values can still be passed as strings and they will be cast to Enum
43
- algorithm: Literal["musplit", "denoisplit"]
42
+ algorithm: Literal["hdn", "microsplit"]
44
43
 
45
44
  # NOTE: these are all configs (pydantic models)
46
45
  loss: LVAELossConfig
47
46
  model: LVAEModel
48
47
  noise_model: MultiChannelNMConfig | None = None
49
48
  noise_model_likelihood: NMLikelihoodConfig | None = None
50
- gaussian_likelihood: GaussianLikelihoodConfig | None = None
49
+ gaussian_likelihood: GaussianLikelihoodConfig | None = None # TODO change to str
50
+
51
+ mmse_count: int = 1
52
+ is_supervised: bool = False
51
53
 
52
54
  # Optional fields
53
55
  optimizer: OptimizerModel = OptimizerModel()
@@ -64,22 +66,26 @@ class VAEBasedAlgorithm(BaseModel):
64
66
  Self
65
67
  The validated model.
66
68
  """
67
- # musplit
68
- if self.algorithm == SupportedAlgorithm.MUSPLIT:
69
- if self.loss.loss_type != SupportedLoss.MUSPLIT:
69
+ # hdn
70
+ # TODO move to designated configurations
71
+ if self.algorithm == SupportedAlgorithm.HDN:
72
+ if self.loss.loss_type != SupportedLoss.HDN:
70
73
  raise ValueError(
71
- f"Algorithm {self.algorithm} only supports loss `musplit`."
74
+ f"Algorithm {self.algorithm} only supports loss `hdn`."
72
75
  )
73
-
74
- if self.algorithm == SupportedAlgorithm.DENOISPLIT:
76
+ if self.model.multiscale_count > 1:
77
+ raise ValueError("Algorithm `hdn` does not support multiscale models.")
78
+ # musplit
79
+ if self.algorithm == SupportedAlgorithm.MICROSPLIT:
75
80
  if self.loss.loss_type not in [
81
+ SupportedLoss.MUSPLIT,
76
82
  SupportedLoss.DENOISPLIT,
77
83
  SupportedLoss.DENOISPLIT_MUSPLIT,
78
- ]:
84
+ ]: # TODO Update losses configs, make loss just microsplit
79
85
  raise ValueError(
80
- f"Algorithm {self.algorithm} only supports loss `denoisplit` "
81
- "or `denoisplit_musplit."
82
- )
86
+ f"Algorithm {self.algorithm} only supports loss `microsplit`."
87
+ ) # TODO Update losses configs
88
+
83
89
  if (
84
90
  self.loss.loss_type == SupportedLoss.DENOISPLIT
85
91
  and self.model.predict_logvar is not None
@@ -88,8 +94,10 @@ class VAEBasedAlgorithm(BaseModel):
88
94
  "Algorithm `denoisplit` with loss `denoisplit` only supports "
89
95
  "`predict_logvar` as `None`."
90
96
  )
91
-
92
- if self.noise_model is None:
97
+ if (
98
+ self.loss.loss_type == SupportedLoss.DENOISPLIT
99
+ and self.noise_model is None
100
+ ):
93
101
  raise ValueError("Algorithm `denoisplit` requires a noise model.")
94
102
  # TODO: what if algorithm is not musplit or denoisplit
95
103
  return self
@@ -108,6 +116,12 @@ class VAEBasedAlgorithm(BaseModel):
108
116
  f"Number of output channels ({self.model.output_channels}) must match "
109
117
  f"the number of noise models ({len(self.noise_model.noise_models)})."
110
118
  )
119
+
120
+ if self.algorithm == SupportedAlgorithm.HDN:
121
+ assert self.model.output_channels == 1, (
122
+ f"Number of output channels ({self.model.output_channels}) must be 1 "
123
+ "for algorithm `hdn`."
124
+ )
111
125
  return self
112
126
 
113
127
  @model_validator(mode="after")
@@ -127,6 +141,16 @@ class VAEBasedAlgorithm(BaseModel):
127
141
  "Gaussian likelihood model `predict_logvar` "
128
142
  f"({self.gaussian_likelihood.predict_logvar}).",
129
143
  )
144
+ # if self.algorithm == SupportedAlgorithm.HDN:
145
+ # assert (
146
+ # self.model.predict_logvar is None
147
+ # ), "Model `predict_logvar` must be `None` for algorithm `hdn`."
148
+ # if self.gaussian_likelihood is not None:
149
+ # assert self.gaussian_likelihood.predict_logvar is None, (
150
+ # "Gaussian likelihood model `predict_logvar` must be `None` "
151
+ # "for algorithm `hdn`."
152
+ # )
153
+ # TODO check this
130
154
  return self
131
155
 
132
156
  def __str__(self) -> str:
@@ -138,3 +162,14 @@ class VAEBasedAlgorithm(BaseModel):
138
162
  Pretty string.
139
163
  """
140
164
  return pformat(self.model_dump())
165
+
166
+ @classmethod
167
+ def get_compatible_algorithms(cls) -> list[str]:
168
+ """Get the list of compatible algorithms.
169
+
170
+ Returns
171
+ -------
172
+ list of str
173
+ List of compatible algorithms.
174
+ """
175
+ return ["hdn", "microsplit"]
@@ -1,9 +1,8 @@
1
1
  """LVAE Pydantic model."""
2
2
 
3
- from typing import Literal
3
+ from typing import Literal, Self
4
4
 
5
5
  from pydantic import ConfigDict, Field, field_validator, model_validator
6
- from typing_extensions import Self
7
6
 
8
7
  from .architecture_model import ArchitectureModel
9
8
 
@@ -15,11 +14,9 @@ class LVAEModel(ArchitectureModel):
15
14
  model_config = ConfigDict(validate_assignment=True, validate_default=True)
16
15
 
17
16
  architecture: Literal["LVAE"]
18
- """Name of the architecture."""
19
-
20
- input_shape: list[int] = Field(default=[64, 64], validate_default=True)
21
- """Shape of the input patch (C, Z, Y, X) or (C, Y, X) if the data is 2D."""
22
17
 
18
+ input_shape: tuple[int, ...] = Field(default=(64, 64), validate_default=True)
19
+ """Shape of the input patch (Z, Y, X) or (Y, X) if the data is 2D."""
23
20
  encoder_conv_strides: list = Field(default=[2, 2], validate_default=True)
24
21
 
25
22
  # TODO make this per hierarchy step ?
@@ -42,7 +39,7 @@ class LVAEModel(ArchitectureModel):
42
39
  default="ELU",
43
40
  )
44
41
 
45
- predict_logvar: Literal[None, "pixelwise"] = None
42
+ predict_logvar: Literal[None, "pixelwise"] = "pixelwise"
46
43
  analytical_kl: bool = Field(default=False)
47
44
 
48
45
  @model_validator(mode="after")
@@ -126,6 +123,13 @@ class LVAEModel(ArchitectureModel):
126
123
  f"Input shape must be greater than 1 in all dimensions"
127
124
  f"(got {input_shape})."
128
125
  )
126
+
127
+ if any(s < 64 for s in input_shape[-2:]):
128
+ raise ValueError(
129
+ f"Input shape must be greater or equal to 64 in XY dimensions"
130
+ f"(got {input_shape})."
131
+ )
132
+
129
133
  return input_shape
130
134
 
131
135
  @field_validator("encoder_n_filters")
@@ -255,4 +259,4 @@ class LVAEModel(ArchitectureModel):
255
259
  bool
256
260
  Whether the model is 3D or not.
257
261
  """
258
- return self.conv_dims == 3
262
+ return len(self.input_shape) == 3
@@ -24,7 +24,7 @@ class CheckpointModel(BaseModel):
24
24
 
25
25
  model_config = ConfigDict(validate_assignment=True, validate_default=True)
26
26
 
27
- monitor: Literal["val_loss"] = Field(default="val_loss")
27
+ monitor: Literal["val_loss"] | str | None = Field(default="val_loss")
28
28
  """Quantity to monitor, currently only `val_loss`."""
29
29
 
30
30
  verbose: bool = Field(default=False)
@@ -36,8 +36,12 @@ class CheckpointModel(BaseModel):
36
36
  save_last: Literal[True, False, "link"] | None = Field(default=True)
37
37
  """When `True`, saves a last.ckpt copy whenever a checkpoint file gets saved."""
38
38
 
39
- save_top_k: int = Field(default=3, ge=-1, le=100)
40
- """If `save_top_k == kz, the best k models according to the quantity monitored
39
+ save_top_k: int = Field(
40
+ default=3,
41
+ ge=-1,
42
+ le=100,
43
+ )
44
+ """If `save_top_k == k, the best k models according to the quantity monitored
41
45
  will be saved. If `save_top_k == 0`, no models are saved. if `save_top_k == -1`,
42
46
  all models are saved."""
43
47