careamics 0.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (279) hide show
  1. careamics/__init__.py +24 -0
  2. careamics/careamist.py +961 -0
  3. careamics/cli/__init__.py +5 -0
  4. careamics/cli/conf.py +394 -0
  5. careamics/cli/main.py +234 -0
  6. careamics/cli/utils.py +27 -0
  7. careamics/config/__init__.py +66 -0
  8. careamics/config/algorithms/__init__.py +21 -0
  9. careamics/config/algorithms/care_algorithm_config.py +122 -0
  10. careamics/config/algorithms/hdn_algorithm_config.py +103 -0
  11. careamics/config/algorithms/microsplit_algorithm_config.py +103 -0
  12. careamics/config/algorithms/n2n_algorithm_config.py +115 -0
  13. careamics/config/algorithms/n2v_algorithm_config.py +296 -0
  14. careamics/config/algorithms/pn2v_algorithm_config.py +301 -0
  15. careamics/config/algorithms/unet_algorithm_config.py +91 -0
  16. careamics/config/algorithms/vae_algorithm_config.py +178 -0
  17. careamics/config/architectures/__init__.py +7 -0
  18. careamics/config/architectures/architecture_config.py +37 -0
  19. careamics/config/architectures/lvae_config.py +262 -0
  20. careamics/config/architectures/unet_config.py +125 -0
  21. careamics/config/configuration.py +367 -0
  22. careamics/config/configuration_factories.py +2400 -0
  23. careamics/config/data/__init__.py +27 -0
  24. careamics/config/data/data_config.py +472 -0
  25. careamics/config/data/inference_config.py +237 -0
  26. careamics/config/data/ng_data_config.py +1038 -0
  27. careamics/config/data/patch_filter/__init__.py +15 -0
  28. careamics/config/data/patch_filter/filter_config.py +16 -0
  29. careamics/config/data/patch_filter/mask_filter_config.py +17 -0
  30. careamics/config/data/patch_filter/max_filter_config.py +15 -0
  31. careamics/config/data/patch_filter/meanstd_filter_config.py +18 -0
  32. careamics/config/data/patch_filter/shannon_filter_config.py +15 -0
  33. careamics/config/data/patching_strategies/__init__.py +15 -0
  34. careamics/config/data/patching_strategies/_overlapping_patched_config.py +102 -0
  35. careamics/config/data/patching_strategies/_patched_config.py +56 -0
  36. careamics/config/data/patching_strategies/random_patching_config.py +45 -0
  37. careamics/config/data/patching_strategies/sequential_patching_config.py +25 -0
  38. careamics/config/data/patching_strategies/tiled_patching_config.py +40 -0
  39. careamics/config/data/patching_strategies/whole_patching_config.py +12 -0
  40. careamics/config/data/tile_information.py +65 -0
  41. careamics/config/lightning/__init__.py +15 -0
  42. careamics/config/lightning/callbacks/__init__.py +8 -0
  43. careamics/config/lightning/callbacks/callback_config.py +116 -0
  44. careamics/config/lightning/optimizer_configs.py +186 -0
  45. careamics/config/lightning/training_config.py +70 -0
  46. careamics/config/losses/__init__.py +8 -0
  47. careamics/config/losses/loss_config.py +60 -0
  48. careamics/config/ng_configs/__init__.py +5 -0
  49. careamics/config/ng_configs/n2v_configuration.py +64 -0
  50. careamics/config/ng_configs/ng_configuration.py +256 -0
  51. careamics/config/ng_factories/__init__.py +9 -0
  52. careamics/config/ng_factories/algorithm_factory.py +120 -0
  53. careamics/config/ng_factories/data_factory.py +154 -0
  54. careamics/config/ng_factories/n2v_factory.py +256 -0
  55. careamics/config/ng_factories/training_factory.py +69 -0
  56. careamics/config/noise_model/__init__.py +12 -0
  57. careamics/config/noise_model/likelihood_config.py +60 -0
  58. careamics/config/noise_model/noise_model_config.py +149 -0
  59. careamics/config/support/__init__.py +31 -0
  60. careamics/config/support/supported_activations.py +27 -0
  61. careamics/config/support/supported_algorithms.py +40 -0
  62. careamics/config/support/supported_architectures.py +13 -0
  63. careamics/config/support/supported_data.py +122 -0
  64. careamics/config/support/supported_filters.py +17 -0
  65. careamics/config/support/supported_loggers.py +10 -0
  66. careamics/config/support/supported_losses.py +32 -0
  67. careamics/config/support/supported_optimizers.py +57 -0
  68. careamics/config/support/supported_patching_strategies.py +22 -0
  69. careamics/config/support/supported_pixel_manipulations.py +15 -0
  70. careamics/config/support/supported_struct_axis.py +21 -0
  71. careamics/config/support/supported_transforms.py +12 -0
  72. careamics/config/transformations/__init__.py +22 -0
  73. careamics/config/transformations/n2v_manipulate_config.py +79 -0
  74. careamics/config/transformations/normalize_config.py +59 -0
  75. careamics/config/transformations/transform_config.py +45 -0
  76. careamics/config/transformations/transform_unions.py +29 -0
  77. careamics/config/transformations/xy_flip_config.py +43 -0
  78. careamics/config/transformations/xy_random_rotate90_config.py +35 -0
  79. careamics/config/utils/__init__.py +8 -0
  80. careamics/config/utils/configuration_io.py +85 -0
  81. careamics/config/validators/__init__.py +18 -0
  82. careamics/config/validators/axes_validators.py +90 -0
  83. careamics/config/validators/model_validators.py +84 -0
  84. careamics/config/validators/patch_validators.py +55 -0
  85. careamics/conftest.py +39 -0
  86. careamics/dataset/__init__.py +17 -0
  87. careamics/dataset/dataset_utils/__init__.py +19 -0
  88. careamics/dataset/dataset_utils/dataset_utils.py +118 -0
  89. careamics/dataset/dataset_utils/file_utils.py +141 -0
  90. careamics/dataset/dataset_utils/iterate_over_files.py +84 -0
  91. careamics/dataset/dataset_utils/running_stats.py +189 -0
  92. careamics/dataset/in_memory_dataset.py +303 -0
  93. careamics/dataset/in_memory_pred_dataset.py +88 -0
  94. careamics/dataset/in_memory_tiled_pred_dataset.py +131 -0
  95. careamics/dataset/iterable_dataset.py +294 -0
  96. careamics/dataset/iterable_pred_dataset.py +121 -0
  97. careamics/dataset/iterable_tiled_pred_dataset.py +141 -0
  98. careamics/dataset/patching/__init__.py +1 -0
  99. careamics/dataset/patching/patching.py +300 -0
  100. careamics/dataset/patching/random_patching.py +110 -0
  101. careamics/dataset/patching/sequential_patching.py +212 -0
  102. careamics/dataset/patching/validate_patch_dimension.py +64 -0
  103. careamics/dataset/tiling/__init__.py +10 -0
  104. careamics/dataset/tiling/collate_tiles.py +33 -0
  105. careamics/dataset/tiling/lvae_tiled_patching.py +375 -0
  106. careamics/dataset/tiling/tiled_patching.py +166 -0
  107. careamics/dataset_ng/README.md +212 -0
  108. careamics/dataset_ng/__init__.py +0 -0
  109. careamics/dataset_ng/dataset.py +365 -0
  110. careamics/dataset_ng/demos/bsd68_demo.ipynb +361 -0
  111. careamics/dataset_ng/demos/bsd68_zarr_demo.ipynb +453 -0
  112. careamics/dataset_ng/demos/care_U2OS_demo.ipynb +330 -0
  113. careamics/dataset_ng/demos/demo_custom_image_stack.ipynb +736 -0
  114. careamics/dataset_ng/demos/demo_datamodule.ipynb +447 -0
  115. careamics/dataset_ng/demos/demo_dataset.ipynb +278 -0
  116. careamics/dataset_ng/demos/demo_patch_extractor.py +51 -0
  117. careamics/dataset_ng/demos/mouse_nuclei_demo.ipynb +293 -0
  118. careamics/dataset_ng/factory.py +180 -0
  119. careamics/dataset_ng/grouped_index_sampler.py +73 -0
  120. careamics/dataset_ng/image_stack/__init__.py +14 -0
  121. careamics/dataset_ng/image_stack/czi_image_stack.py +396 -0
  122. careamics/dataset_ng/image_stack/file_image_stack.py +140 -0
  123. careamics/dataset_ng/image_stack/image_stack_protocol.py +93 -0
  124. careamics/dataset_ng/image_stack/image_utils/__init__.py +6 -0
  125. careamics/dataset_ng/image_stack/image_utils/image_stack_utils.py +125 -0
  126. careamics/dataset_ng/image_stack/in_memory_image_stack.py +93 -0
  127. careamics/dataset_ng/image_stack/zarr_image_stack.py +170 -0
  128. careamics/dataset_ng/image_stack_loader/__init__.py +19 -0
  129. careamics/dataset_ng/image_stack_loader/image_stack_loader_protocol.py +70 -0
  130. careamics/dataset_ng/image_stack_loader/image_stack_loaders.py +273 -0
  131. careamics/dataset_ng/image_stack_loader/zarr_utils.py +130 -0
  132. careamics/dataset_ng/legacy_interoperability.py +175 -0
  133. careamics/dataset_ng/microsplit_input_synth.py +377 -0
  134. careamics/dataset_ng/patch_extractor/__init__.py +7 -0
  135. careamics/dataset_ng/patch_extractor/limit_file_extractor.py +50 -0
  136. careamics/dataset_ng/patch_extractor/patch_construction.py +151 -0
  137. careamics/dataset_ng/patch_extractor/patch_extractor.py +117 -0
  138. careamics/dataset_ng/patch_filter/__init__.py +20 -0
  139. careamics/dataset_ng/patch_filter/coordinate_filter_protocol.py +27 -0
  140. careamics/dataset_ng/patch_filter/filter_factory.py +95 -0
  141. careamics/dataset_ng/patch_filter/mask_filter.py +96 -0
  142. careamics/dataset_ng/patch_filter/max_filter.py +188 -0
  143. careamics/dataset_ng/patch_filter/mean_std_filter.py +218 -0
  144. careamics/dataset_ng/patch_filter/patch_filter_protocol.py +50 -0
  145. careamics/dataset_ng/patch_filter/shannon_filter.py +188 -0
  146. careamics/dataset_ng/patching_strategies/__init__.py +26 -0
  147. careamics/dataset_ng/patching_strategies/patching_strategy_factory.py +50 -0
  148. careamics/dataset_ng/patching_strategies/patching_strategy_protocol.py +161 -0
  149. careamics/dataset_ng/patching_strategies/random_patching.py +393 -0
  150. careamics/dataset_ng/patching_strategies/sequential_patching.py +99 -0
  151. careamics/dataset_ng/patching_strategies/tiling_strategy.py +207 -0
  152. careamics/dataset_ng/patching_strategies/whole_sample.py +61 -0
  153. careamics/file_io/__init__.py +15 -0
  154. careamics/file_io/read/__init__.py +11 -0
  155. careamics/file_io/read/get_func.py +57 -0
  156. careamics/file_io/read/tiff.py +58 -0
  157. careamics/file_io/write/__init__.py +15 -0
  158. careamics/file_io/write/get_func.py +63 -0
  159. careamics/file_io/write/tiff.py +40 -0
  160. careamics/lightning/__init__.py +32 -0
  161. careamics/lightning/callbacks/__init__.py +13 -0
  162. careamics/lightning/callbacks/data_stats_callback.py +33 -0
  163. careamics/lightning/callbacks/hyperparameters_callback.py +49 -0
  164. careamics/lightning/callbacks/prediction_writer_callback/__init__.py +20 -0
  165. careamics/lightning/callbacks/prediction_writer_callback/file_path_utils.py +56 -0
  166. careamics/lightning/callbacks/prediction_writer_callback/prediction_writer_callback.py +234 -0
  167. careamics/lightning/callbacks/prediction_writer_callback/write_strategy.py +399 -0
  168. careamics/lightning/callbacks/prediction_writer_callback/write_strategy_factory.py +215 -0
  169. careamics/lightning/callbacks/progress_bar_callback.py +90 -0
  170. careamics/lightning/dataset_ng/__init__.py +1 -0
  171. careamics/lightning/dataset_ng/callbacks/__init__.py +1 -0
  172. careamics/lightning/dataset_ng/callbacks/prediction_writer/__init__.py +29 -0
  173. careamics/lightning/dataset_ng/callbacks/prediction_writer/cached_tiles_strategy.py +164 -0
  174. careamics/lightning/dataset_ng/callbacks/prediction_writer/file_path_utils.py +33 -0
  175. careamics/lightning/dataset_ng/callbacks/prediction_writer/prediction_writer_callback.py +219 -0
  176. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_image_strategy.py +91 -0
  177. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_strategy.py +27 -0
  178. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_strategy_factory.py +214 -0
  179. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_tiles_zarr_strategy.py +375 -0
  180. careamics/lightning/dataset_ng/data_module.py +529 -0
  181. careamics/lightning/dataset_ng/data_module_utils.py +395 -0
  182. careamics/lightning/dataset_ng/lightning_modules/__init__.py +9 -0
  183. careamics/lightning/dataset_ng/lightning_modules/care_module.py +97 -0
  184. careamics/lightning/dataset_ng/lightning_modules/n2v_module.py +106 -0
  185. careamics/lightning/dataset_ng/lightning_modules/unet_module.py +221 -0
  186. careamics/lightning/dataset_ng/prediction/__init__.py +16 -0
  187. careamics/lightning/dataset_ng/prediction/convert_prediction.py +198 -0
  188. careamics/lightning/dataset_ng/prediction/stitch_prediction.py +171 -0
  189. careamics/lightning/lightning_module.py +914 -0
  190. careamics/lightning/microsplit_data_module.py +632 -0
  191. careamics/lightning/predict_data_module.py +341 -0
  192. careamics/lightning/train_data_module.py +666 -0
  193. careamics/losses/__init__.py +21 -0
  194. careamics/losses/fcn/__init__.py +1 -0
  195. careamics/losses/fcn/losses.py +125 -0
  196. careamics/losses/loss_factory.py +80 -0
  197. careamics/losses/lvae/__init__.py +1 -0
  198. careamics/losses/lvae/loss_utils.py +83 -0
  199. careamics/losses/lvae/losses.py +589 -0
  200. careamics/lvae_training/__init__.py +0 -0
  201. careamics/lvae_training/calibration.py +191 -0
  202. careamics/lvae_training/dataset/__init__.py +20 -0
  203. careamics/lvae_training/dataset/config.py +135 -0
  204. careamics/lvae_training/dataset/lc_dataset.py +274 -0
  205. careamics/lvae_training/dataset/ms_dataset_ref.py +1067 -0
  206. careamics/lvae_training/dataset/multich_dataset.py +1121 -0
  207. careamics/lvae_training/dataset/multicrop_dset.py +196 -0
  208. careamics/lvae_training/dataset/multifile_dataset.py +335 -0
  209. careamics/lvae_training/dataset/types.py +32 -0
  210. careamics/lvae_training/dataset/utils/__init__.py +0 -0
  211. careamics/lvae_training/dataset/utils/data_utils.py +114 -0
  212. careamics/lvae_training/dataset/utils/empty_patch_fetcher.py +65 -0
  213. careamics/lvae_training/dataset/utils/index_manager.py +491 -0
  214. careamics/lvae_training/dataset/utils/index_switcher.py +165 -0
  215. careamics/lvae_training/eval_utils.py +987 -0
  216. careamics/lvae_training/get_config.py +84 -0
  217. careamics/lvae_training/lightning_module.py +701 -0
  218. careamics/lvae_training/metrics.py +214 -0
  219. careamics/lvae_training/train_lvae.py +342 -0
  220. careamics/lvae_training/train_utils.py +121 -0
  221. careamics/model_io/__init__.py +7 -0
  222. careamics/model_io/bioimage/__init__.py +11 -0
  223. careamics/model_io/bioimage/_readme_factory.py +113 -0
  224. careamics/model_io/bioimage/bioimage_utils.py +56 -0
  225. careamics/model_io/bioimage/cover_factory.py +171 -0
  226. careamics/model_io/bioimage/model_description.py +341 -0
  227. careamics/model_io/bmz_io.py +251 -0
  228. careamics/model_io/model_io_utils.py +95 -0
  229. careamics/models/__init__.py +5 -0
  230. careamics/models/activation.py +40 -0
  231. careamics/models/layers.py +495 -0
  232. careamics/models/lvae/__init__.py +3 -0
  233. careamics/models/lvae/layers.py +1371 -0
  234. careamics/models/lvae/likelihoods.py +394 -0
  235. careamics/models/lvae/lvae.py +848 -0
  236. careamics/models/lvae/noise_models.py +738 -0
  237. careamics/models/lvae/stochastic.py +394 -0
  238. careamics/models/lvae/utils.py +404 -0
  239. careamics/models/model_factory.py +54 -0
  240. careamics/models/unet.py +449 -0
  241. careamics/nm_training_placeholder.py +203 -0
  242. careamics/prediction_utils/__init__.py +21 -0
  243. careamics/prediction_utils/lvae_prediction.py +158 -0
  244. careamics/prediction_utils/lvae_tiling_manager.py +362 -0
  245. careamics/prediction_utils/prediction_outputs.py +238 -0
  246. careamics/prediction_utils/stitch_prediction.py +193 -0
  247. careamics/py.typed +5 -0
  248. careamics/transforms/__init__.py +22 -0
  249. careamics/transforms/compose.py +173 -0
  250. careamics/transforms/n2v_manipulate.py +150 -0
  251. careamics/transforms/n2v_manipulate_torch.py +149 -0
  252. careamics/transforms/normalize.py +374 -0
  253. careamics/transforms/pixel_manipulation.py +406 -0
  254. careamics/transforms/pixel_manipulation_torch.py +388 -0
  255. careamics/transforms/struct_mask_parameters.py +20 -0
  256. careamics/transforms/transform.py +24 -0
  257. careamics/transforms/tta.py +88 -0
  258. careamics/transforms/xy_flip.py +131 -0
  259. careamics/transforms/xy_random_rotate90.py +108 -0
  260. careamics/utils/__init__.py +19 -0
  261. careamics/utils/autocorrelation.py +40 -0
  262. careamics/utils/base_enum.py +60 -0
  263. careamics/utils/context.py +67 -0
  264. careamics/utils/deprecation.py +63 -0
  265. careamics/utils/lightning_utils.py +71 -0
  266. careamics/utils/logging.py +323 -0
  267. careamics/utils/metrics.py +394 -0
  268. careamics/utils/path_utils.py +26 -0
  269. careamics/utils/plotting.py +76 -0
  270. careamics/utils/ram.py +15 -0
  271. careamics/utils/receptive_field.py +108 -0
  272. careamics/utils/serializers.py +62 -0
  273. careamics/utils/torch_utils.py +150 -0
  274. careamics/utils/version.py +38 -0
  275. careamics-0.0.19.dist-info/METADATA +80 -0
  276. careamics-0.0.19.dist-info/RECORD +279 -0
  277. careamics-0.0.19.dist-info/WHEEL +4 -0
  278. careamics-0.0.19.dist-info/entry_points.txt +2 -0
  279. careamics-0.0.19.dist-info/licenses/LICENSE +28 -0
@@ -0,0 +1,1038 @@
1
+ """Data configuration."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import random
7
+ import sys
8
+ from collections.abc import Sequence
9
+ from pprint import pformat
10
+ from typing import Annotated, Any, Literal, Self, Union
11
+ from warnings import warn
12
+
13
+ import numpy as np
14
+ from numpy.typing import NDArray
15
+ from pydantic import (
16
+ BaseModel,
17
+ ConfigDict,
18
+ Field,
19
+ PlainSerializer,
20
+ ValidationInfo,
21
+ field_validator,
22
+ model_validator,
23
+ )
24
+
25
+ from careamics.utils import BaseEnum
26
+
27
+ from ..transformations import XYFlipConfig, XYRandomRotate90Config
28
+ from ..validators import check_axes_validity, check_czi_axes_validity
29
+ from .patch_filter import (
30
+ MaskFilterConfig,
31
+ MaxFilterConfig,
32
+ MeanSTDFilterConfig,
33
+ ShannonFilterConfig,
34
+ )
35
+ from .patching_strategies import (
36
+ FixedRandomPatchingConfig,
37
+ RandomPatchingConfig,
38
+ TiledPatchingConfig,
39
+ WholePatchingConfig,
40
+ )
41
+
42
+ # TODO: Validate the specific sizes of tiles and overlaps given UNet constraints
43
+ # - needs to be done in the Configuration
44
+ # - patches and overlaps sizes must also be checked against dimensionality
45
+ # - Should we have a UNet and a LVAE NGDataConfig subclass with specific validations?
46
+
47
+ # TODO: is 3D updated anywhere in the code in CAREamist/downstream?
48
+ # - this will be important when swapping the data config in Configuration
49
+ # - `set_3D` currently not implemented here
50
+
51
+ # TODO: this module is very long, can we split the validation somewhere else and
52
+ # leverage Pydantic to add validation directly to the declaration of each field?
53
+
54
+
55
+ def generate_random_seed() -> int:
56
+ """Generate a random seed for reproducibility.
57
+
58
+ Returns
59
+ -------
60
+ int
61
+ A random integer between 1 and 2^31 - 1.
62
+ """
63
+ return random.randint(1, 2**31 - 1)
64
+
65
+
66
+ def np_float_to_scientific_str(x: float) -> str:
67
+ """Return a string scientific representation of a float.
68
+
69
+ In particular, this method is used to serialize floats to strings, allowing
70
+ numpy.float32 to be passed in the Pydantic model and written to a yaml file as str.
71
+
72
+ Parameters
73
+ ----------
74
+ x : float
75
+ Input value.
76
+
77
+ Returns
78
+ -------
79
+ str
80
+ Scientific string representation of the input value.
81
+ """
82
+ return np.format_float_scientific(x, precision=7)
83
+
84
+
85
+ Float = Annotated[float, PlainSerializer(np_float_to_scientific_str, return_type=str)]
86
+ """Annotated float type, used to serialize floats to strings."""
87
+
88
+ PatchingConfig = Union[
89
+ FixedRandomPatchingConfig,
90
+ RandomPatchingConfig,
91
+ TiledPatchingConfig,
92
+ WholePatchingConfig,
93
+ ]
94
+ """Patching strategy type."""
95
+
96
+ PatchFilterConfig = Union[
97
+ MaxFilterConfig,
98
+ MeanSTDFilterConfig,
99
+ ShannonFilterConfig,
100
+ ]
101
+ """Patch filter type."""
102
+
103
+ CoordFilterConfig = Union[MaskFilterConfig] # add more here as needed
104
+ """Coordinate filter type."""
105
+
106
+
107
+ class Mode(str, BaseEnum):
108
+ """Dataset mode."""
109
+
110
+ TRAINING = "training"
111
+ VALIDATING = "validating"
112
+ PREDICTING = "predicting"
113
+
114
+
115
+ def default_in_memory(validated_params: dict[str, Any]) -> bool:
116
+ """Default factory for the `in_memory` field.
117
+
118
+ Based on the value of `data_type`, set the default for `in_memory` to `True` if
119
+ the data type is 'array', 'tiff', or 'custom', and to `False` otherwise (`zarr`
120
+ or 'czi').
121
+
122
+ Parameters
123
+ ----------
124
+ validated_params : dict of {str: Any}
125
+ Validated parameters.
126
+
127
+ Returns
128
+ -------
129
+ bool
130
+ Default value for the `in_memory` field.
131
+ """
132
+ return validated_params.get("data_type") not in ("zarr", "czi")
133
+
134
+
135
+ class NGDataConfig(BaseModel):
136
+ """Next-Generation Dataset configuration.
137
+
138
+ NGDataConfig are used for both training and prediction, with the patching strategy
139
+ determining how the data is processed. Note that `random` is the only patching
140
+ strategy compatible with training, while `tiled` and `whole` are only used for
141
+ prediction.
142
+
143
+ If std is specified, mean must be specified as well. Note that setting the std first
144
+ and then the mean (if they were both `None` before) will raise a validation error.
145
+ Prefer instead `set_means_and_stds` to set both at once. Means and stds are expected
146
+ to be lists of floats, one for each channel. For supervised tasks, the mean and std
147
+ of the target could be different from the input data.
148
+
149
+ All supported transforms are defined in the SupportedTransform enum.
150
+ """
151
+
152
+ # Pydantic class configuration
153
+ model_config = ConfigDict(
154
+ validate_assignment=True,
155
+ )
156
+
157
+ # Dataset configuration
158
+ mode: Mode
159
+ """Dataset mode, either training, validating or predicting."""
160
+
161
+ data_type: Literal["array", "tiff", "zarr", "czi", "custom"]
162
+ """Type of input data."""
163
+
164
+ axes: str
165
+ """Axes of the data, as defined in SupportedAxes."""
166
+
167
+ patching: PatchingConfig = Field(..., discriminator="name")
168
+ """Patching strategy to use. Note that `random` is the only supported strategy for
169
+ training, while `tiled` and `whole` are only used for prediction."""
170
+
171
+ # Optional fields
172
+ batch_size: int = Field(default=1, ge=1, validate_default=True)
173
+ """Batch size for training."""
174
+
175
+ in_memory: bool = Field(default_factory=default_in_memory, validate_default=True)
176
+ """Whether to load all data into memory. This is only supported for 'array',
177
+ 'tiff' and 'custom' data types. Must be `True` for `array`. If `None`, defaults to
178
+ `True` for 'array', 'tiff' and `custom`, and `False` for 'zarr' and 'czi' data
179
+ types."""
180
+
181
+ channels: Sequence[int] | None = Field(default=None)
182
+ """Channels to use from the data. If `None`, all channels are used."""
183
+
184
+ patch_filter: PatchFilterConfig | None = Field(default=None, discriminator="name")
185
+ """Patch filter to apply when using random patching. Only available if
186
+ mode is `training`."""
187
+
188
+ coord_filter: CoordFilterConfig | None = Field(default=None, discriminator="name")
189
+ """Coordinate filter to apply when using random patching. Only available if
190
+ mode is `training`."""
191
+
192
+ patch_filter_patience: int = Field(default=5, ge=1)
193
+ """Number of consecutive patches not passing the filter before accepting the next
194
+ patch."""
195
+
196
+ image_means: list[Float] | None = Field(default=None, min_length=0, max_length=32)
197
+ """Means of the data across channels, used for normalization."""
198
+
199
+ image_stds: list[Float] | None = Field(default=None, min_length=0, max_length=32)
200
+ """Standard deviations of the data across channels, used for normalization."""
201
+
202
+ target_means: list[Float] | None = Field(default=None, min_length=0, max_length=32)
203
+ """Means of the target data across channels, used for normalization."""
204
+
205
+ target_stds: list[Float] | None = Field(default=None, min_length=0, max_length=32)
206
+ """Standard deviations of the target data across channels, used for
207
+ normalization."""
208
+
209
+ transforms: Sequence[Union[XYFlipConfig, XYRandomRotate90Config]] = Field(
210
+ default=(
211
+ XYFlipConfig(),
212
+ XYRandomRotate90Config(),
213
+ ),
214
+ validate_default=True,
215
+ )
216
+ """List of transformations to apply to the data, available transforms are defined
217
+ in SupportedTransform."""
218
+
219
+ train_dataloader_params: dict[str, Any] = Field(
220
+ default={"shuffle": True}, validate_default=True
221
+ )
222
+ """Dictionary of PyTorch training dataloader parameters. The dataloader parameters,
223
+ should include the `shuffle` key, which is set to `True` by default. We strongly
224
+ recommend to keep it as `True` to ensure the best training results."""
225
+
226
+ val_dataloader_params: dict[str, Any] = Field(default={})
227
+ """Dictionary of PyTorch validation dataloader parameters."""
228
+
229
+ pred_dataloader_params: dict[str, Any] = Field(default={})
230
+ """Dictionary of PyTorch prediction dataloader parameters."""
231
+
232
+ seed: int | None = Field(default_factory=generate_random_seed, gt=0)
233
+ """Random seed for reproducibility. If not specified, a random seed is generated."""
234
+
235
+ @field_validator("axes")
236
+ @classmethod
237
+ def axes_valid(cls, axes: str, info: ValidationInfo) -> str:
238
+ """
239
+ Validate axes.
240
+
241
+ Axes must:
242
+ - be a combination of 'STCZYX'
243
+ - not contain duplicates
244
+ - contain at least 2 contiguous axes: X and Y
245
+ - contain at most 4 axes
246
+ - not contain both S and T axes
247
+
248
+ Parameters
249
+ ----------
250
+ axes : str
251
+ Axes to validate.
252
+ info : ValidationInfo
253
+ Validation information.
254
+
255
+ Returns
256
+ -------
257
+ str
258
+ Validated axes.
259
+
260
+ Raises
261
+ ------
262
+ ValueError
263
+ If axes are not valid.
264
+ """
265
+ # Additional validation for CZI files
266
+ if info.data["data_type"] == "czi":
267
+ if not check_czi_axes_validity(axes):
268
+ raise ValueError(
269
+ f"Provided axes '{axes}' are not valid. Axes must be in the "
270
+ f"`SC(Z/T)YX` format, where Z or T are optional, and S and C can be"
271
+ f" singleton dimensions, but must be provided."
272
+ )
273
+ else:
274
+ check_axes_validity(axes)
275
+
276
+ return axes
277
+
278
+ @field_validator("in_memory")
279
+ @classmethod
280
+ def validate_in_memory_with_data_type(cls, in_memory: bool, info: Any) -> bool:
281
+ """
282
+ Validate that in_memory is compatible with data_type.
283
+
284
+ `in_memory` can only be True for 'array', 'tiff' and 'custom' data types.
285
+
286
+ Parameters
287
+ ----------
288
+ in_memory : bool
289
+ Whether to load data into memory.
290
+ info : Any
291
+ Additional information about the field being validated.
292
+
293
+ Returns
294
+ -------
295
+ bool
296
+ Validated in_memory value.
297
+
298
+ Raises
299
+ ------
300
+ ValueError
301
+ If in_memory is True for unsupported data types.
302
+ """
303
+ data_type = info.data.get("data_type")
304
+
305
+ if in_memory and data_type not in ("array", "tiff", "custom"):
306
+ raise ValueError(
307
+ f"`in_memory` can only be True for 'array', 'tiff' and 'custom' "
308
+ f"data types, got '{data_type}'. In memory loading of zarr and czi "
309
+ f"data types is not currently not implemented."
310
+ )
311
+
312
+ if not in_memory and data_type == "array":
313
+ raise ValueError(
314
+ "`in_memory` must be True for 'array' data type, got False."
315
+ )
316
+
317
+ return in_memory
318
+
319
+ @field_validator("channels", mode="before")
320
+ @classmethod
321
+ def validate_channels(
322
+ cls,
323
+ channels: Sequence[int] | None,
324
+ info: ValidationInfo,
325
+ ) -> Sequence[int] | None:
326
+ """
327
+ Validate channels.
328
+
329
+ Channels must be a sequence of non-negative integers without duplicates. If
330
+ channels are not `None`, then `C` must be present in the axes.
331
+
332
+ Parameters
333
+ ----------
334
+ channels : Sequence of int or None
335
+ Channels to validate.
336
+ info : ValidationInfo
337
+ Validation information.
338
+
339
+ Returns
340
+ -------
341
+ Sequence of int or None
342
+ Validated channels.
343
+
344
+ Raises
345
+ ------
346
+ ValueError
347
+ If channels are not valid.
348
+ """
349
+ if channels is not None:
350
+ if "C" not in info.data["axes"]:
351
+ raise ValueError(
352
+ "Channels were specified but 'C' is not present in the axes."
353
+ )
354
+
355
+ if isinstance(channels, int):
356
+ channels = [channels]
357
+
358
+ if not isinstance(channels, Sequence):
359
+ raise ValueError("Channels must be a sequence of integers.")
360
+
361
+ if len(channels) == 0:
362
+ return None
363
+
364
+ if not all(isinstance(ch, int) for ch in channels):
365
+ raise ValueError("Channels must be integers.")
366
+
367
+ if any(ch < 0 for ch in channels):
368
+ raise ValueError("Channels must be non-negative integers.")
369
+
370
+ if len(set(channels)) != len(channels):
371
+ raise ValueError("Channels must not contain duplicates.")
372
+ return channels
373
+
374
+ @field_validator("patching")
375
+ @classmethod
376
+ def validate_patching_strategy_against_mode(
377
+ cls, patching: PatchingConfig, info: ValidationInfo
378
+ ) -> PatchingConfig:
379
+ """
380
+ Validate that the patching strategy is compatible with the dataset mode.
381
+
382
+ - If mode is `training`, patching strategy must be `random`.
383
+ - If mode is `validating`, patching must be `fixed_random`.
384
+ - If mode is `predicting`, patching strategy must be `tiled` or `whole`.
385
+
386
+ Parameters
387
+ ----------
388
+ patching : PatchingStrategies
389
+ Patching strategy to validate.
390
+ info : ValidationInfo
391
+ Validation information.
392
+
393
+ Returns
394
+ -------
395
+ PatchingStrategies
396
+ Validated patching strategy.
397
+
398
+ Raises
399
+ ------
400
+ ValueError
401
+ If the patching strategy is not compatible with the dataset mode.
402
+ """
403
+ mode = info.data["mode"]
404
+ if mode == Mode.TRAINING:
405
+ if patching.name != "random":
406
+ raise ValueError(
407
+ f"Patching strategy '{patching.name}' is not compatible with "
408
+ f"mode '{mode.value}'. Use 'random' for training."
409
+ )
410
+ elif mode == Mode.VALIDATING:
411
+ if patching.name != "fixed_random":
412
+ raise ValueError(
413
+ f"Patching strategy '{patching.name}' is not compatible with "
414
+ f"mode '{mode.value}'. Use 'fixed_random' for validating."
415
+ )
416
+ elif mode == Mode.PREDICTING:
417
+ if patching.name not in ["tiled", "whole"]:
418
+ raise ValueError(
419
+ f"Patching strategy '{patching.name}' is not compatible with "
420
+ f"mode '{mode.value}'. Use 'tiled' or 'whole' for predicting."
421
+ )
422
+ return patching
423
+
424
+ @field_validator("patch_filter", "coord_filter")
425
+ @classmethod
426
+ def validate_filters_against_mode(
427
+ cls,
428
+ filter_obj: PatchFilterConfig | CoordFilterConfig | None,
429
+ info: ValidationInfo,
430
+ ) -> PatchFilterConfig | CoordFilterConfig | None:
431
+ """
432
+ Validate that the filters are only used during training.
433
+
434
+ Parameters
435
+ ----------
436
+ filter_obj : PatchFilters or CoordFilters or None
437
+ Filter to validate.
438
+ info : ValidationInfo
439
+ Validation information.
440
+
441
+ Returns
442
+ -------
443
+ PatchFilters or CoordFilters or None
444
+ Validated filter.
445
+
446
+ Raises
447
+ ------
448
+ ValueError
449
+ If a filter is used in a mode other than training.
450
+ """
451
+ mode = info.data["mode"]
452
+ if filter_obj is not None and mode != Mode.TRAINING:
453
+ raise ValueError(
454
+ f"Filter '{filter_obj.name}' can only be used in 'training' mode, "
455
+ f"got mode '{mode.value}'."
456
+ )
457
+ return filter_obj
458
+
459
+ @field_validator("train_dataloader_params")
460
+ @classmethod
461
+ def shuffle_train_dataloader(
462
+ cls, train_dataloader_params: dict[str, Any]
463
+ ) -> dict[str, Any]:
464
+ """
465
+ Validate that "shuffle" is included in the training dataloader params.
466
+
467
+ A warning will be raised if `shuffle=False`.
468
+
469
+ Parameters
470
+ ----------
471
+ train_dataloader_params : dict of {str: Any}
472
+ The training dataloader parameters.
473
+
474
+ Returns
475
+ -------
476
+ dict of {str: Any}
477
+ The validated training dataloader parameters.
478
+
479
+ Raises
480
+ ------
481
+ ValueError
482
+ If "shuffle" is not included in the training dataloader params.
483
+ """
484
+ if "shuffle" not in train_dataloader_params:
485
+ raise ValueError(
486
+ "Value for 'shuffle' was not included in the `train_dataloader_params`."
487
+ )
488
+ elif ("shuffle" in train_dataloader_params) and (
489
+ not train_dataloader_params["shuffle"]
490
+ ):
491
+ warn(
492
+ "Dataloader parameters include `shuffle=False`, this will be passed to "
493
+ "the training dataloader and may lead to lower quality results.",
494
+ stacklevel=1,
495
+ )
496
+ return train_dataloader_params
497
+
498
+ @model_validator(mode="after")
499
+ def std_only_with_mean(self: Self) -> Self:
500
+ """
501
+ Check that mean and std are either both None, or both specified.
502
+
503
+ Returns
504
+ -------
505
+ Self
506
+ Validated data model.
507
+
508
+ Raises
509
+ ------
510
+ ValueError
511
+ If std is not None and mean is None.
512
+ """
513
+ # check that mean and std are either both None, or both specified
514
+ if (self.image_means and not self.image_stds) or (
515
+ self.image_stds and not self.image_means
516
+ ):
517
+ raise ValueError(
518
+ "Mean and std must be either both None, or both specified."
519
+ )
520
+
521
+ elif (self.image_means is not None and self.image_stds is not None) and (
522
+ len(self.image_means) != len(self.image_stds)
523
+ ):
524
+ raise ValueError("Mean and std must be specified for each input channel.")
525
+
526
+ if (self.target_means and not self.target_stds) or (
527
+ self.target_stds and not self.target_means
528
+ ):
529
+ raise ValueError(
530
+ "Mean and std must be either both None, or both specified "
531
+ )
532
+
533
+ elif self.target_means is not None and self.target_stds is not None:
534
+ if len(self.target_means) != len(self.target_stds):
535
+ raise ValueError(
536
+ "Mean and std must be either both None, or both specified for each "
537
+ "target channel."
538
+ )
539
+
540
+ return self
541
+
542
+ @model_validator(mode="after")
543
+ def validate_dimensions(self: Self) -> Self:
544
+ """
545
+ Validate 2D/3D dimensions between axes and patch size.
546
+
547
+ Returns
548
+ -------
549
+ Self
550
+ Validated data model.
551
+
552
+ Raises
553
+ ------
554
+ ValueError
555
+ If the patch size dimension is not compatible with the axes.
556
+ """
557
+ # "whole" patching does not have dimensions to validate
558
+ if not hasattr(self.patching, "patch_size"):
559
+ return self
560
+
561
+ if self.data_type == "czi":
562
+ # Z and T are both depth axes for CZI data
563
+ expected_dims = 3 if ("Z" in self.axes or "T" in self.axes) else 2
564
+ additional_message = " (`Z` and `T` are depth axes for CZI data)"
565
+ else:
566
+ expected_dims = 3 if "Z" in self.axes else 2
567
+ additional_message = ""
568
+
569
+ # infer dimension from requested patch size
570
+ actual_dims = len(self.patching.patch_size)
571
+ if actual_dims != expected_dims:
572
+ raise ValueError(
573
+ f"`patch_size` in `patching` must have {expected_dims} dimensions, "
574
+ f"got {self.patching.patch_size} with axes {self.axes}"
575
+ f"{additional_message}."
576
+ )
577
+
578
+ return self
579
+
580
+ @model_validator(mode="after")
581
+ def propagate_seed_to_filters(self: Self) -> Self:
582
+ """
583
+ Propagate the main seed to patch and coordinate filters that support seeds.
584
+
585
+ This ensures that all filters use the same seed for reproducibility,
586
+ unless they already have a seed explicitly set.
587
+
588
+ Returns
589
+ -------
590
+ Self
591
+ Data model with propagated seeds.
592
+ """
593
+ if self.seed is not None:
594
+ if self.patch_filter is not None:
595
+ if (
596
+ hasattr(self.patch_filter, "seed")
597
+ and self.patch_filter.seed is None
598
+ ):
599
+ self.patch_filter.seed = self.seed
600
+
601
+ if self.coord_filter is not None:
602
+ if (
603
+ hasattr(self.coord_filter, "seed")
604
+ and self.coord_filter.seed is None
605
+ ):
606
+ self.coord_filter.seed = self.seed
607
+
608
+ return self
609
+
610
+ @model_validator(mode="after")
611
+ def propagate_seed_to_transforms(self: Self) -> Self:
612
+ """
613
+ Propagate the main seed to all transforms that support seeds.
614
+
615
+ This ensures that all transforms use the same seed for reproducibility,
616
+ unless they already have a seed explicitly set.
617
+
618
+ Returns
619
+ -------
620
+ Self
621
+ Data model with propagated seeds.
622
+ """
623
+ if self.seed is not None:
624
+ for transform in self.transforms:
625
+ if hasattr(transform, "seed") and transform.seed is None:
626
+ transform.seed = self.seed
627
+ return self
628
+
629
+ @model_validator(mode="after")
630
+ def propagate_seed_to_patching(self: Self) -> Self:
631
+ """
632
+ Propagate the main seed to the patching strategy if it supports seeds.
633
+
634
+ This ensures that the patching strategy uses the same seed for reproducibility,
635
+ unless it already has a seed explicitly set.
636
+
637
+ Returns
638
+ -------
639
+ Self
640
+ Data model with propagated seed.
641
+ """
642
+ if self.seed is not None:
643
+ if hasattr(self.patching, "seed") and self.patching.seed is None:
644
+ self.patching.seed = self.seed
645
+ return self
646
+
647
+ @field_validator("train_dataloader_params", "val_dataloader_params", mode="before")
648
+ @classmethod
649
+ def set_default_pin_memory(
650
+ cls, dataloader_params: dict[str, Any]
651
+ ) -> dict[str, Any]:
652
+ """
653
+ Set default pin_memory for dataloader parameters if not provided.
654
+
655
+ - If 'pin_memory' is not set, it defaults to True if CUDA is available.
656
+
657
+ Parameters
658
+ ----------
659
+ dataloader_params : dict of {str: Any}
660
+ The dataloader parameters.
661
+
662
+ Returns
663
+ -------
664
+ dict of {str: Any}
665
+ The dataloader parameters with pin_memory default applied.
666
+ """
667
+ if "pin_memory" not in dataloader_params:
668
+ import torch
669
+
670
+ dataloader_params["pin_memory"] = torch.cuda.is_available()
671
+ return dataloader_params
672
+
673
+ @field_validator("train_dataloader_params", mode="before")
674
+ @classmethod
675
+ def set_default_train_workers(
676
+ cls, dataloader_params: dict[str, Any]
677
+ ) -> dict[str, Any]:
678
+ """
679
+ Set default num_workers for training dataloader if not provided.
680
+
681
+ - If 'num_workers' is not set, it defaults to the number of available CPU cores.
682
+
683
+ Parameters
684
+ ----------
685
+ dataloader_params : dict of {str: Any}
686
+ The training dataloader parameters.
687
+
688
+ Returns
689
+ -------
690
+ dict of {str: Any}
691
+ The dataloader parameters with num_workers default applied.
692
+ """
693
+ if "num_workers" not in dataloader_params:
694
+ # Use 0 workers during tests, otherwise use all available CPU cores
695
+ if "pytest" in sys.modules:
696
+ dataloader_params["num_workers"] = 0
697
+ else:
698
+ dataloader_params["num_workers"] = os.cpu_count()
699
+
700
+ return dataloader_params
701
+
702
+ @model_validator(mode="after")
703
+ def set_val_workers_to_match_train(self: Self) -> Self:
704
+ """
705
+ Set validation dataloader num_workers to match training dataloader.
706
+
707
+ If num_workers is not specified in val_dataloader_params, it will be set to the
708
+ same value as train_dataloader_params["num_workers"].
709
+
710
+ Returns
711
+ -------
712
+ Self
713
+ Validated data model with synchronized num_workers.
714
+ """
715
+ if "num_workers" not in self.val_dataloader_params:
716
+ self.val_dataloader_params["num_workers"] = self.train_dataloader_params[
717
+ "num_workers"
718
+ ]
719
+ return self
720
+
721
+ def __str__(self) -> str:
722
+ """
723
+ Pretty string reprensenting the configuration.
724
+
725
+ Returns
726
+ -------
727
+ str
728
+ Pretty string.
729
+ """
730
+ return pformat(self.model_dump())
731
+
732
+ def _update(self, **kwargs: Any) -> None:
733
+ """
734
+ Update multiple arguments at once.
735
+
736
+ Parameters
737
+ ----------
738
+ **kwargs : Any
739
+ Keyword arguments to update.
740
+ """
741
+ self.__dict__.update(kwargs)
742
+ self.__class__.model_validate(self.__dict__)
743
+
744
+ def set_means_and_stds(
745
+ self,
746
+ image_means: Union[NDArray, tuple, list, None],
747
+ image_stds: Union[NDArray, tuple, list, None],
748
+ target_means: Union[NDArray, tuple, list, None] | None = None,
749
+ target_stds: Union[NDArray, tuple, list, None] | None = None,
750
+ ) -> None:
751
+ """
752
+ Set mean and standard deviation of the data across channels.
753
+
754
+ This method should be used instead setting the fields directly, as it would
755
+ otherwise trigger a validation error.
756
+
757
+ Parameters
758
+ ----------
759
+ image_means : numpy.ndarray, tuple or list
760
+ Mean values for normalization.
761
+ image_stds : numpy.ndarray, tuple or list
762
+ Standard deviation values for normalization.
763
+ target_means : numpy.ndarray, tuple or list, optional
764
+ Target mean values for normalization, by default ().
765
+ target_stds : numpy.ndarray, tuple or list, optional
766
+ Target standard deviation values for normalization, by default ().
767
+ """
768
+ # make sure we pass a list
769
+ if image_means is not None:
770
+ image_means = list(image_means)
771
+ if image_stds is not None:
772
+ image_stds = list(image_stds)
773
+ if target_means is not None:
774
+ target_means = list(target_means)
775
+ if target_stds is not None:
776
+ target_stds = list(target_stds)
777
+
778
+ self._update(
779
+ image_means=image_means,
780
+ image_stds=image_stds,
781
+ target_means=target_means,
782
+ target_stds=target_stds,
783
+ )
784
+
785
+ def is_3D(self) -> bool:
786
+ """
787
+ Check if the data is 3D based on the axes.
788
+
789
+ Either "Z" is in the axes and patching `patch_size` has 3 dimensions, or for CZI
790
+ data, "Z" is in the axes or "T" is in the axes and patching `patch_size` has
791
+ 3 dimensions.
792
+
793
+ This method is used during NGConfiguration validation to cross checks dimensions
794
+ with the algorithm configuration.
795
+
796
+ Returns
797
+ -------
798
+ bool
799
+ True if the data is 3D, False otherwise.
800
+ """
801
+ if self.data_type == "czi":
802
+ return "Z" in self.axes or "T" in self.axes
803
+ else:
804
+ return "Z" in self.axes
805
+
806
+ # TODO: if switching from a state in which in_memory=True to an incompatible state
807
+ # an error will be raised. Should that automatically be set to False instead?
808
+ # TODO `channels=None` is ambigouous: all channels or same channels as in training?
809
+ # TODO this method could be private and we could have public `to_validation_config`
810
+ # and `to_prediction_config` methods with appropriate parameters
811
+ # TODO any use for switching to training mode?
812
+ def convert_mode(
813
+ self,
814
+ new_mode: Literal["validating", "predicting"],
815
+ new_patch_size: Sequence[int] | None = None,
816
+ overlap_size: Sequence[int] | None = None,
817
+ new_batch_size: int | None = None,
818
+ new_data_type: Literal["array", "tiff", "zarr", "czi", "custom"] | None = None,
819
+ new_axes: str | None = None,
820
+ new_channels: Sequence[int] | Literal["all"] | None = None,
821
+ new_in_memory: bool | None = None,
822
+ new_dataloader_params: dict[str, Any] | None = None,
823
+ ) -> NGDataConfig:
824
+ """
825
+ Convert a training dataset configuration to a different mode.
826
+
827
+ This method is intended to facilitate creating validation or prediction
828
+ configurations from a training configuration.
829
+
830
+ To perform tile prediction when switching to `predicting` mode, please provide
831
+ both `new_patch_size` and `overlap_size`. Switching mode to `predicting` without
832
+ specifying `new_patch_size` and `overlap_size` will apply the default patching
833
+ strategy, namely `whole` image strategy. `new_patch_size` and `overlap_size` are
834
+ only used when switching to `predicting`.
835
+
836
+ `channels=None` will retain the same channels as in the current configuration.
837
+ To select all channels, please specify all channels explicitly or pass
838
+ `channels='all'`.
839
+
840
+ New dataloader parameters will be placed in the appropriate dataloader params
841
+ field depending on the new mode.
842
+
843
+ To create a new training configuration, please use
844
+ `careamics.config.create_ng_data_configuration`.
845
+
846
+ This method compares the new parameters with the current ones and raises
847
+ errors if incompatible changes are requested, such as switching between 2D and
848
+ 3D axes, or changing the number of channels. Incompatibility across parameters
849
+ may be delegated to Pydantic validation.
850
+
851
+ Parameters
852
+ ----------
853
+ new_mode : Literal["validating", "predicting"]
854
+ The new dataset mode, one of `validating` or `predicting`.
855
+ new_patch_size : Sequence of int, default=None
856
+ New patch size. If None for `predicting`, uses default whole image strategy.
857
+ overlap_size : Sequence of int, default=None
858
+ New overlap size. Necessary when switching to `predicting` with tiled
859
+ patching.
860
+ new_batch_size : int, default=None
861
+ New batch size.
862
+ new_data_type : Literal['array', 'tiff', 'zarr', 'czi', 'custom'], default=None
863
+ New data type.
864
+ new_axes : str, default=None
865
+ New axes.
866
+ new_channels : Sequence of int or "all", default=None
867
+ New channels.
868
+ new_in_memory : bool, default=None
869
+ New in_memory value.
870
+ new_dataloader_params : dict of {str: Any}, default=None
871
+ New dataloader parameters. These will be placed in the
872
+ appropriate dataloader params field depending on the new mode.
873
+
874
+ Returns
875
+ -------
876
+ NGDataConfig
877
+ New NGDataConfig with the updated mode and parameters.
878
+
879
+ Raises
880
+ ------
881
+ ValueError
882
+ If conversion to training mode is requested, or if incompatible changes
883
+ are requested.
884
+ """
885
+ if new_mode == Mode.TRAINING:
886
+ raise ValueError(
887
+ "Converting to 'training' mode is not supported. Create a new "
888
+ "NGDataConfig instead, for instance using "
889
+ "`create_ng_data_configuration`."
890
+ )
891
+ if self.mode != Mode.TRAINING:
892
+ raise ValueError(
893
+ f"Converting from mode '{self.mode}' to '{new_mode}' is not supported. "
894
+ f"Only conversion from 'training' mode is supported."
895
+ )
896
+
897
+ # sanity checks
898
+ # switching spatial axes
899
+ if new_axes is not None and ("Z" in new_axes) != (
900
+ "Z" in self.axes
901
+ ): # switching 2D/3D
902
+ raise ValueError("Cannot switch between 2D and 3D axes.")
903
+
904
+ # normalize new_channels parameter to lift ambiguity around `None`
905
+ # - If None, keep previous parameter
906
+ # - If "all", select all channels (None value internally)
907
+ if new_channels is None:
908
+ new_channels = self.channels
909
+ elif new_channels == "all":
910
+ new_channels = None # all channels
911
+
912
+ # switching channels
913
+ # if switching C axis:
914
+ # - removing C: original channels can be `None`, singleton or multiple. New
915
+ # channels can be `None` if original were `None` or singleton, but not
916
+ # multiple.
917
+ # - adding C: original channels can only be `None`. New channels can be `None`
918
+ # (but we warn users that they need to have a singleton C axis in the data),
919
+ # or singleton, but not multiple.
920
+ adding_C_axis = (
921
+ new_axes is not None and ("C" in new_axes) and ("C" not in self.axes)
922
+ )
923
+ removing_C_axis = (
924
+ new_axes is not None and ("C" not in new_axes) and ("C" in self.axes)
925
+ )
926
+ prev_channels_not_singleton = self.channels is not None and (
927
+ len(self.channels) != 1
928
+ )
929
+
930
+ if adding_C_axis:
931
+ if new_channels is None:
932
+ warn(
933
+ f"When switching to axes with 'C' (got {new_axes}) from axes "
934
+ f"{self.axes}, errors may be raised or degraded performances may be"
935
+ f" observed if the channel dimension in the data is not a singleton"
936
+ f" dimension. To select a specific channel, use the `new_channels` "
937
+ f"parameter (e.g. `new_channels=[1]`).",
938
+ stacklevel=1,
939
+ )
940
+ elif len(new_channels) != 1:
941
+ raise ValueError(
942
+ f"When switching to axes with 'C' (got {new_axes}) from axes "
943
+ f"{self.axes}, a single channel only must be selected using the "
944
+ f"`new_channels` parameter (got {new_channels})."
945
+ )
946
+ elif removing_C_axis and prev_channels_not_singleton:
947
+ raise ValueError(
948
+ f"Cannot switch to axes without 'C' (got {new_axes}) from axes "
949
+ f"{self.axes} when multiple channels were originally specified "
950
+ f"({self.channels})."
951
+ )
952
+
953
+ # different number of channels
954
+ if new_channels is not None and self.channels is not None:
955
+ if len(new_channels) != len(self.channels):
956
+ raise ValueError(
957
+ f"New channels length ({len(new_channels)}) does not match "
958
+ f"current channels length ({len(self.channels)})."
959
+ )
960
+
961
+ if self.channels is None and new_channels is not None:
962
+ warn(
963
+ f"Switching from all channels (`channels=None`) to specifying channels "
964
+ f"{new_channels} may lead to errors or degraded performances if "
965
+ f"{new_channels} are not all channels.",
966
+ stacklevel=1,
967
+ ) # Note that in the opposite case, self.channels is kept because
968
+ # new_channels is None
969
+
970
+ # apply default values
971
+ patching_strategy: PatchingConfig
972
+ if new_mode == Mode.PREDICTING:
973
+ if new_patch_size is None:
974
+ patching_strategy = WholePatchingConfig()
975
+ else:
976
+ if overlap_size is None:
977
+ raise ValueError(
978
+ "When switching to 'predicting' mode with 'tiled' patching, "
979
+ "the `overlap_size` parameter must be specified."
980
+ )
981
+ patching_strategy = TiledPatchingConfig(
982
+ patch_size=list(new_patch_size), overlaps=list(overlap_size)
983
+ )
984
+ else: # validating
985
+ assert isinstance(self.patching, RandomPatchingConfig) # for mypy
986
+
987
+ patching_strategy = FixedRandomPatchingConfig(
988
+ patch_size=(
989
+ list(new_patch_size)
990
+ if new_patch_size is not None
991
+ else self.patching.patch_size
992
+ ),
993
+ )
994
+
995
+ # create new config
996
+ model_dict = self.model_dump()
997
+ model_dict.update(
998
+ {
999
+ "mode": new_mode,
1000
+ "patching": patching_strategy,
1001
+ "batch_size": new_batch_size or self.batch_size,
1002
+ "data_type": new_data_type or self.data_type,
1003
+ "axes": new_axes or self.axes,
1004
+ "channels": new_channels if new_channels is not None else self.channels,
1005
+ "in_memory": (
1006
+ new_in_memory if new_in_memory is not None else self.in_memory
1007
+ ),
1008
+ "val_dataloader_params": (
1009
+ new_dataloader_params
1010
+ if new_mode == Mode.VALIDATING and new_dataloader_params is not None
1011
+ else self.val_dataloader_params
1012
+ ),
1013
+ "pred_dataloader_params": (
1014
+ new_dataloader_params
1015
+ if new_mode == Mode.PREDICTING and new_dataloader_params is not None
1016
+ else self.pred_dataloader_params
1017
+ ),
1018
+ }
1019
+ )
1020
+
1021
+ # remove patch and coord filters when switching to validation or prediction
1022
+ del model_dict["patch_filter"]
1023
+ del model_dict["coord_filter"]
1024
+
1025
+ return NGDataConfig(**model_dict)
1026
+
1027
+ # def set_3D(self, axes: str, patch_size: list[int]) -> None:
1028
+ # """
1029
+ # Set 3D parameters.
1030
+
1031
+ # Parameters
1032
+ # ----------
1033
+ # axes : str
1034
+ # Axes.
1035
+ # patch_size : list of int
1036
+ # Patch size.
1037
+ # """
1038
+ # self._update(axes=axes, patch_size=patch_size)