careamics 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (155) hide show
  1. careamics/__init__.py +6 -1
  2. careamics/careamist.py +729 -0
  3. careamics/config/__init__.py +39 -0
  4. careamics/config/architectures/__init__.py +17 -0
  5. careamics/config/architectures/architecture_model.py +37 -0
  6. careamics/config/architectures/custom_model.py +162 -0
  7. careamics/config/architectures/lvae_model.py +174 -0
  8. careamics/config/architectures/register_model.py +103 -0
  9. careamics/config/architectures/unet_model.py +118 -0
  10. careamics/config/callback_model.py +123 -0
  11. careamics/config/configuration_factory.py +583 -0
  12. careamics/config/configuration_model.py +604 -0
  13. careamics/config/data_model.py +527 -0
  14. careamics/config/fcn_algorithm_model.py +147 -0
  15. careamics/config/inference_model.py +239 -0
  16. careamics/config/likelihood_model.py +43 -0
  17. careamics/config/nm_model.py +101 -0
  18. careamics/config/optimizer_models.py +187 -0
  19. careamics/config/references/__init__.py +45 -0
  20. careamics/config/references/algorithm_descriptions.py +132 -0
  21. careamics/config/references/references.py +39 -0
  22. careamics/config/support/__init__.py +31 -0
  23. careamics/config/support/supported_activations.py +27 -0
  24. careamics/config/support/supported_algorithms.py +33 -0
  25. careamics/config/support/supported_architectures.py +17 -0
  26. careamics/config/support/supported_data.py +109 -0
  27. careamics/config/support/supported_loggers.py +10 -0
  28. careamics/config/support/supported_losses.py +29 -0
  29. careamics/config/support/supported_optimizers.py +57 -0
  30. careamics/config/support/supported_pixel_manipulations.py +15 -0
  31. careamics/config/support/supported_struct_axis.py +21 -0
  32. careamics/config/support/supported_transforms.py +11 -0
  33. careamics/config/tile_information.py +65 -0
  34. careamics/config/training_model.py +72 -0
  35. careamics/config/transformations/__init__.py +15 -0
  36. careamics/config/transformations/n2v_manipulate_model.py +64 -0
  37. careamics/config/transformations/normalize_model.py +60 -0
  38. careamics/config/transformations/transform_model.py +45 -0
  39. careamics/config/transformations/xy_flip_model.py +43 -0
  40. careamics/config/transformations/xy_random_rotate90_model.py +35 -0
  41. careamics/config/vae_algorithm_model.py +171 -0
  42. careamics/config/validators/__init__.py +5 -0
  43. careamics/config/validators/validator_utils.py +101 -0
  44. careamics/conftest.py +39 -0
  45. careamics/dataset/__init__.py +17 -0
  46. careamics/dataset/dataset_utils/__init__.py +19 -0
  47. careamics/dataset/dataset_utils/dataset_utils.py +101 -0
  48. careamics/dataset/dataset_utils/file_utils.py +141 -0
  49. careamics/dataset/dataset_utils/iterate_over_files.py +83 -0
  50. careamics/dataset/dataset_utils/running_stats.py +186 -0
  51. careamics/dataset/in_memory_dataset.py +310 -0
  52. careamics/dataset/in_memory_pred_dataset.py +88 -0
  53. careamics/dataset/in_memory_tiled_pred_dataset.py +129 -0
  54. careamics/dataset/iterable_dataset.py +295 -0
  55. careamics/dataset/iterable_pred_dataset.py +122 -0
  56. careamics/dataset/iterable_tiled_pred_dataset.py +140 -0
  57. careamics/dataset/patching/__init__.py +1 -0
  58. careamics/dataset/patching/patching.py +299 -0
  59. careamics/dataset/patching/random_patching.py +201 -0
  60. careamics/dataset/patching/sequential_patching.py +212 -0
  61. careamics/dataset/patching/validate_patch_dimension.py +64 -0
  62. careamics/dataset/tiling/__init__.py +10 -0
  63. careamics/dataset/tiling/collate_tiles.py +33 -0
  64. careamics/dataset/tiling/lvae_tiled_patching.py +282 -0
  65. careamics/dataset/tiling/tiled_patching.py +164 -0
  66. careamics/dataset/zarr_dataset.py +151 -0
  67. careamics/file_io/__init__.py +15 -0
  68. careamics/file_io/read/__init__.py +12 -0
  69. careamics/file_io/read/get_func.py +56 -0
  70. careamics/file_io/read/tiff.py +58 -0
  71. careamics/file_io/read/zarr.py +60 -0
  72. careamics/file_io/write/__init__.py +15 -0
  73. careamics/file_io/write/get_func.py +63 -0
  74. careamics/file_io/write/tiff.py +40 -0
  75. careamics/lightning/__init__.py +18 -0
  76. careamics/lightning/callbacks/__init__.py +11 -0
  77. careamics/lightning/callbacks/hyperparameters_callback.py +49 -0
  78. careamics/lightning/callbacks/prediction_writer_callback/__init__.py +20 -0
  79. careamics/lightning/callbacks/prediction_writer_callback/file_path_utils.py +56 -0
  80. careamics/lightning/callbacks/prediction_writer_callback/prediction_writer_callback.py +233 -0
  81. careamics/lightning/callbacks/prediction_writer_callback/write_strategy.py +398 -0
  82. careamics/lightning/callbacks/prediction_writer_callback/write_strategy_factory.py +215 -0
  83. careamics/lightning/callbacks/progress_bar_callback.py +90 -0
  84. careamics/lightning/lightning_module.py +632 -0
  85. careamics/lightning/predict_data_module.py +333 -0
  86. careamics/lightning/train_data_module.py +680 -0
  87. careamics/losses/__init__.py +15 -0
  88. careamics/losses/fcn/__init__.py +1 -0
  89. careamics/losses/fcn/losses.py +98 -0
  90. careamics/losses/loss_factory.py +155 -0
  91. careamics/losses/lvae/__init__.py +1 -0
  92. careamics/losses/lvae/loss_utils.py +83 -0
  93. careamics/losses/lvae/losses.py +445 -0
  94. careamics/lvae_training/__init__.py +0 -0
  95. careamics/lvae_training/dataset/__init__.py +0 -0
  96. careamics/lvae_training/dataset/data_utils.py +701 -0
  97. careamics/lvae_training/dataset/lc_dataset.py +259 -0
  98. careamics/lvae_training/dataset/lc_dataset_config.py +13 -0
  99. careamics/lvae_training/dataset/vae_data_config.py +179 -0
  100. careamics/lvae_training/dataset/vae_dataset.py +1054 -0
  101. careamics/lvae_training/eval_utils.py +905 -0
  102. careamics/lvae_training/get_config.py +84 -0
  103. careamics/lvae_training/lightning_module.py +701 -0
  104. careamics/lvae_training/metrics.py +214 -0
  105. careamics/lvae_training/train_lvae.py +342 -0
  106. careamics/lvae_training/train_utils.py +121 -0
  107. careamics/model_io/__init__.py +7 -0
  108. careamics/model_io/bioimage/__init__.py +11 -0
  109. careamics/model_io/bioimage/_readme_factory.py +121 -0
  110. careamics/model_io/bioimage/bioimage_utils.py +52 -0
  111. careamics/model_io/bioimage/model_description.py +327 -0
  112. careamics/model_io/bmz_io.py +246 -0
  113. careamics/model_io/model_io_utils.py +95 -0
  114. careamics/models/__init__.py +5 -0
  115. careamics/models/activation.py +39 -0
  116. careamics/models/layers.py +493 -0
  117. careamics/models/lvae/__init__.py +3 -0
  118. careamics/models/lvae/layers.py +1998 -0
  119. careamics/models/lvae/likelihoods.py +364 -0
  120. careamics/models/lvae/lvae.py +901 -0
  121. careamics/models/lvae/noise_models.py +541 -0
  122. careamics/models/lvae/utils.py +395 -0
  123. careamics/models/model_factory.py +67 -0
  124. careamics/models/unet.py +443 -0
  125. careamics/prediction_utils/__init__.py +10 -0
  126. careamics/prediction_utils/lvae_prediction.py +158 -0
  127. careamics/prediction_utils/lvae_tiling_manager.py +362 -0
  128. careamics/prediction_utils/prediction_outputs.py +135 -0
  129. careamics/prediction_utils/stitch_prediction.py +112 -0
  130. careamics/transforms/__init__.py +20 -0
  131. careamics/transforms/compose.py +107 -0
  132. careamics/transforms/n2v_manipulate.py +146 -0
  133. careamics/transforms/normalize.py +243 -0
  134. careamics/transforms/pixel_manipulation.py +407 -0
  135. careamics/transforms/struct_mask_parameters.py +20 -0
  136. careamics/transforms/transform.py +24 -0
  137. careamics/transforms/tta.py +88 -0
  138. careamics/transforms/xy_flip.py +123 -0
  139. careamics/transforms/xy_random_rotate90.py +101 -0
  140. careamics/utils/__init__.py +19 -0
  141. careamics/utils/autocorrelation.py +40 -0
  142. careamics/utils/base_enum.py +60 -0
  143. careamics/utils/context.py +66 -0
  144. careamics/utils/logging.py +322 -0
  145. careamics/utils/metrics.py +188 -0
  146. careamics/utils/path_utils.py +26 -0
  147. careamics/utils/ram.py +15 -0
  148. careamics/utils/receptive_field.py +108 -0
  149. careamics/utils/torch_utils.py +127 -0
  150. careamics-0.0.3.dist-info/METADATA +78 -0
  151. careamics-0.0.3.dist-info/RECORD +154 -0
  152. {careamics-0.0.1.dist-info → careamics-0.0.3.dist-info}/WHEEL +1 -1
  153. {careamics-0.0.1.dist-info → careamics-0.0.3.dist-info}/licenses/LICENSE +1 -1
  154. careamics-0.0.1.dist-info/METADATA +0 -46
  155. careamics-0.0.1.dist-info/RECORD +0 -6
@@ -0,0 +1,259 @@
1
+ """
2
+ A place for Datasets and Dataloaders.
3
+ """
4
+
5
+ from typing import Tuple, Union
6
+
7
+ import numpy as np
8
+ from skimage.transform import resize
9
+
10
+ from .lc_dataset_config import LCVaeDatasetConfig
11
+ from .vae_dataset import MultiChDloader
12
+
13
+
14
+ class LCMultiChDloader(MultiChDloader):
15
+
16
+ def __init__(
17
+ self,
18
+ data_config: LCVaeDatasetConfig,
19
+ fpath: str,
20
+ val_fraction=None,
21
+ test_fraction=None,
22
+ ):
23
+ """
24
+ Args:
25
+ num_scales: The number of resolutions at which we want the input. Note that the target is formed at the
26
+ highest resolution.
27
+ """
28
+ self._padding_kwargs = (
29
+ data_config.padding_kwargs # mode=padding_mode, constant_values=constant_value
30
+ )
31
+ self._uncorrelated_channel_probab = data_config.uncorrelated_channel_probab
32
+
33
+ if data_config.overlapping_padding_kwargs is not None:
34
+ assert (
35
+ self._padding_kwargs == data_config.overlapping_padding_kwargs
36
+ ), "During evaluation, overlapping_padding_kwargs should be same as padding_args. \
37
+ It should be so since we just use overlapping_padding_kwargs when it is not None"
38
+
39
+ else:
40
+ overlapping_padding_kwargs = data_config.padding_kwargs
41
+
42
+ super().__init__(
43
+ data_config, fpath, val_fraction=val_fraction, test_fraction=test_fraction
44
+ )
45
+ self.num_scales = data_config.num_scales
46
+ assert self.num_scales is not None
47
+ self._scaled_data = [self._data]
48
+ self._scaled_noise_data = [self._noise_data]
49
+
50
+ assert isinstance(self.num_scales, int) and self.num_scales >= 1
51
+ assert isinstance(self._padding_kwargs, dict)
52
+ assert "mode" in self._padding_kwargs
53
+
54
+ for _ in range(1, self.num_scales):
55
+ shape = self._scaled_data[-1].shape
56
+ assert len(shape) == 4
57
+ new_shape = (shape[0], shape[1] // 2, shape[2] // 2, shape[3])
58
+ ds_data = resize(
59
+ self._scaled_data[-1].astype(np.float32), new_shape
60
+ ).astype(self._scaled_data[-1].dtype)
61
+ # NOTE: These asserts are important. the resize method expects np.float32. otherwise, one gets weird results.
62
+ assert (
63
+ ds_data.max() / self._scaled_data[-1].max() < 5
64
+ ), "Downsampled image should not have very different values"
65
+ assert (
66
+ ds_data.max() / self._scaled_data[-1].max() > 0.2
67
+ ), "Downsampled image should not have very different values"
68
+
69
+ self._scaled_data.append(ds_data)
70
+ # do the same for noise
71
+ if self._noise_data is not None:
72
+ noise_data = resize(self._scaled_noise_data[-1], new_shape)
73
+ self._scaled_noise_data.append(noise_data)
74
+
75
+ def reduce_data(
76
+ self, t_list=None, h_start=None, h_end=None, w_start=None, w_end=None
77
+ ):
78
+ assert t_list is not None
79
+ assert h_start is None
80
+ assert h_end is None
81
+ assert w_start is None
82
+ assert w_end is None
83
+
84
+ self._data = self._data[t_list].copy()
85
+ self._scaled_data = [
86
+ self._scaled_data[i][t_list].copy() for i in range(len(self._scaled_data))
87
+ ]
88
+
89
+ if self._noise_data is not None:
90
+ self._noise_data = self._noise_data[t_list].copy()
91
+ self._scaled_noise_data = [
92
+ self._scaled_noise_data[i][t_list].copy()
93
+ for i in range(len(self._scaled_noise_data))
94
+ ]
95
+
96
+ self.N = len(t_list)
97
+ self.set_img_sz(self._img_sz, self._grid_sz)
98
+ print(
99
+ f"[{self.__class__.__name__}] Data reduced. New data shape: {self._data.shape}"
100
+ )
101
+
102
+ def _init_msg(self):
103
+ msg = super()._init_msg()
104
+ msg += f" Pad:{self._padding_kwargs}"
105
+ if self._uncorrelated_channels:
106
+ msg += f" UncorrChProbab:{self._uncorrelated_channel_probab}"
107
+ return msg
108
+
109
+ def _load_scaled_img(
110
+ self, scaled_index, index: Union[int, Tuple[int, int]]
111
+ ) -> Tuple[np.ndarray, np.ndarray]:
112
+ if isinstance(index, int):
113
+ idx = index
114
+ else:
115
+ idx, _ = index
116
+
117
+ # tidx = self.idx_manager.get_t(idx)
118
+ patch_loc_list = self.idx_manager.get_patch_location_from_dataset_idx(idx)
119
+ nidx = patch_loc_list[0]
120
+
121
+ imgs = self._scaled_data[scaled_index][nidx]
122
+ imgs = tuple([imgs[None, ..., i] for i in range(imgs.shape[-1])])
123
+ if self._noise_data is not None:
124
+ noisedata = self._scaled_noise_data[scaled_index][nidx]
125
+ noise = tuple([noisedata[None, ..., i] for i in range(noisedata.shape[-1])])
126
+ factor = np.sqrt(2) if self._input_is_sum else 1.0
127
+ imgs = tuple([img + noise[0] * factor for img in imgs])
128
+ return imgs
129
+
130
+ def _crop_img(self, img: np.ndarray, patch_start_loc: Tuple):
131
+ """
132
+ Here, h_start, w_start could be negative. That simply means we need to pick the content from 0. So,
133
+ the cropped image will be smaller than self._img_sz * self._img_sz
134
+ """
135
+ max_len_vals = list(self.idx_manager.data_shape[1:-1])
136
+ max_len_vals[-2:] = img.shape[-2:]
137
+ return self._crop_img_with_padding(
138
+ img, patch_start_loc, max_len_vals=max_len_vals
139
+ )
140
+
141
+ def _get_img(self, index: int):
142
+ """
143
+ Returns the primary patch along with low resolution patches centered on the primary patch.
144
+ """
145
+ # Noise_tuples is populated when there is synthetic noise in training
146
+ # Should have similar type of noise with the noise model
147
+ # Starting with microsplit, dump the noise, use it instead as an augmentation if nessesary
148
+ img_tuples, noise_tuples = self._load_img(index)
149
+ assert self._img_sz is not None
150
+ h, w = img_tuples[0].shape[-2:]
151
+ if self._enable_random_cropping:
152
+ patch_start_loc = self._get_random_hw(h, w)
153
+ if self._5Ddata:
154
+ patch_start_loc = (
155
+ np.random.choice(img_tuples[0].shape[-3] - self._depth3D),
156
+ ) + patch_start_loc
157
+ else:
158
+ patch_start_loc = self._get_deterministic_loc(index)
159
+
160
+ # LC logic is located here, the function crops the image of the highest resolution
161
+ cropped_img_tuples = [
162
+ self._crop_flip_img(img, patch_start_loc, False, False)
163
+ for img in img_tuples
164
+ ]
165
+ cropped_noise_tuples = [
166
+ self._crop_flip_img(noise, patch_start_loc, False, False)
167
+ for noise in noise_tuples
168
+ ]
169
+ patch_start_loc = list(patch_start_loc)
170
+ h_start, w_start = patch_start_loc[-2], patch_start_loc[-1]
171
+ h_center = h_start + self._img_sz // 2
172
+ w_center = w_start + self._img_sz // 2
173
+ allres_versions = {
174
+ i: [cropped_img_tuples[i]] for i in range(len(cropped_img_tuples))
175
+ }
176
+ for scale_idx in range(1, self.num_scales):
177
+ # Returning the image of the lower resolution
178
+ scaled_img_tuples = self._load_scaled_img(scale_idx, index)
179
+
180
+ h_center = h_center // 2
181
+ w_center = w_center // 2
182
+
183
+ h_start = h_center - self._img_sz // 2
184
+ w_start = w_center - self._img_sz // 2
185
+ patch_start_loc[-2:] = [h_start, w_start]
186
+ scaled_cropped_img_tuples = [
187
+ self._crop_flip_img(img, patch_start_loc, False, False)
188
+ for img in scaled_img_tuples
189
+ ]
190
+ for ch_idx in range(len(img_tuples)):
191
+ allres_versions[ch_idx].append(scaled_cropped_img_tuples[ch_idx])
192
+
193
+ output_img_tuples = tuple(
194
+ [
195
+ np.concatenate(allres_versions[ch_idx])
196
+ for ch_idx in range(len(img_tuples))
197
+ ]
198
+ )
199
+ return output_img_tuples, cropped_noise_tuples
200
+
201
+ def __getitem__(self, index: Union[int, Tuple[int, int]]):
202
+ img_tuples, noise_tuples = self._get_img(index)
203
+ if self._uncorrelated_channels:
204
+ assert (
205
+ self._input_idx is None
206
+ ), "Uncorrelated channels is not implemented when there is a separate input channel."
207
+ if np.random.rand() < self._uncorrelated_channel_probab:
208
+ img_tuples_new = [None] * len(img_tuples)
209
+ img_tuples_new[0] = img_tuples[0]
210
+ for i in range(1, len(img_tuples)):
211
+ new_index = np.random.randint(len(self))
212
+ img_tuples_tmp, _ = self._get_img(new_index)
213
+ img_tuples_new[i] = img_tuples_tmp[i]
214
+ img_tuples = img_tuples_new
215
+
216
+ if self._is_train:
217
+ if self._empty_patch_replacement_enabled:
218
+ if np.random.rand() < self._empty_patch_replacement_probab:
219
+ img_tuples = self.replace_with_empty_patch(img_tuples)
220
+
221
+ if self._enable_rotation:
222
+ img_tuples, noise_tuples = self._rotate(img_tuples, noise_tuples)
223
+
224
+ # add noise to input, if noise is present combine it with the image
225
+ # factor is for the compute input not to have too much noise because the average of two gaussians
226
+ if len(noise_tuples) > 0:
227
+ factor = np.sqrt(2) if self._input_is_sum else 1.0
228
+ input_tuples = []
229
+ for x in img_tuples:
230
+ # NOTE: other LC levels already have noise added. So, we just need to add noise to the highest resolution.
231
+ x[0] = x[0] + noise_tuples[0] * factor
232
+ input_tuples.append(x)
233
+ else:
234
+ input_tuples = img_tuples
235
+
236
+ # Compute the input by sum / average the channels
237
+ # Alpha is an amount of weight which is applied to the channels when combining them
238
+ # How to sample alpha is still under research
239
+ inp, alpha = self._compute_input(input_tuples)
240
+ target_tuples = [img[:1] for img in img_tuples]
241
+ # add noise to target.
242
+ if len(noise_tuples) >= 1:
243
+ target_tuples = [
244
+ x + noise for x, noise in zip(target_tuples, noise_tuples[1:])
245
+ ]
246
+
247
+ target = self._compute_target(target_tuples, alpha)
248
+
249
+ output = [inp, target]
250
+
251
+ if self._return_alpha:
252
+ output.append(alpha)
253
+
254
+ if isinstance(index, int):
255
+ return tuple(output)
256
+
257
+ _, grid_size = index
258
+ output.append(grid_size)
259
+ return tuple(output)
@@ -0,0 +1,13 @@
1
+ from typing import Optional
2
+
3
+ from pydantic import ConfigDict, computed_field
4
+
5
+ from careamics.lvae_training.dataset.vae_data_config import VaeDatasetConfig
6
+
7
+
8
+ class LCVaeDatasetConfig(VaeDatasetConfig):
9
+ model_config = ConfigDict(validate_assignment=True)
10
+
11
+ num_scales: int = 1
12
+ """The number of resolutions at which we want the input. The target is formed at the
13
+ highest resolution."""
@@ -0,0 +1,179 @@
1
+ from typing import Any, Optional
2
+ from enum import Enum
3
+
4
+ from pydantic import BaseModel, ConfigDict, computed_field
5
+
6
+
7
+ # TODO: get rid of unnecessary enums
8
+ class DataType(Enum):
9
+ MNIST = 0
10
+ Places365 = 1
11
+ NotMNIST = 2
12
+ OptiMEM100_014 = 3
13
+ CustomSinosoid = 4
14
+ Prevedel_EMBL = 5
15
+ AllenCellMito = 6
16
+ SeparateTiffData = 7
17
+ CustomSinosoidThreeCurve = 8
18
+ SemiSupBloodVesselsEMBL = 9
19
+ Pavia2 = 10
20
+ Pavia2VanillaSplitting = 11
21
+ ExpansionMicroscopyMitoTub = 12
22
+ ShroffMitoEr = 13
23
+ HTIba1Ki67 = 14
24
+ BSD68 = 15
25
+ BioSR_MRC = 16
26
+ TavernaSox2Golgi = 17
27
+ Dao3Channel = 18
28
+ ExpMicroscopyV2 = 19
29
+ Dao3ChannelWithInput = 20
30
+ TavernaSox2GolgiV2 = 21
31
+ TwoDset = 22
32
+ PredictedTiffData = 23
33
+ Pavia3SeqData = 24
34
+ # Here, we have 16 splitting tasks.
35
+ NicolaData = 25
36
+
37
+
38
+ class DataSplitType(Enum):
39
+ All = 0
40
+ Train = 1
41
+ Val = 2
42
+ Test = 3
43
+
44
+
45
+ class GridAlignement(Enum):
46
+ """
47
+ A patch is formed by padding the grid with content. If the grids are 'Center' aligned, then padding is to done equally on all 4 sides.
48
+ On the other hand, if grids are 'LeftTop' aligned, padding is to be done on the right and bottom end of the grid.
49
+ In the former case, one needs (patch_size - grid_size)//2 amount of content on the right end of the frame.
50
+ In the latter case, one needs patch_size - grid_size amount of content on the right end of the frame.
51
+ """
52
+
53
+ LeftTop = 0
54
+ Center = 1
55
+
56
+
57
+ # TODO: for all bool params check if they are taking different values in Disentangle repo
58
+ # TODO: check if any bool logic can be removed
59
+ class VaeDatasetConfig(BaseModel):
60
+ model_config = ConfigDict(validate_assignment=True)
61
+
62
+ data_type: Optional[DataType]
63
+ """Type of the dataset, should be one of DataType"""
64
+
65
+ depth3D: Optional[int] = 1
66
+ """Number of slices in 3D. If data is 2D depth3D is equal to 1"""
67
+
68
+ datasplit_type: Optional[DataSplitType] = None
69
+ """Whether to return training, validation or test split, should be one of
70
+ DataSplitType"""
71
+
72
+ num_channels: Optional[int] = 2
73
+ """Number of channels in the input"""
74
+
75
+ # TODO: remove ch*_fname parameters, should be parsed automatically from a name list
76
+ ch1_fname: Optional[str] = None
77
+ ch2_fname: Optional[str] = None
78
+ ch_input_fname: Optional[str] = None
79
+
80
+ input_is_sum: Optional[bool] = False
81
+ """Whether the input is the sum or average of channels"""
82
+
83
+ input_idx: Optional[int] = None
84
+ """Index of the channel where the input is stored in the data"""
85
+
86
+ target_idx_list: Optional[list[int]] = None
87
+ """Indices of the channels where the targets are stored in the data"""
88
+
89
+ # TODO: where are there used?
90
+ start_alpha: Optional[Any] = None
91
+ end_alpha: Optional[Any] = None
92
+
93
+ image_size: int
94
+ """Size of one patch of data"""
95
+
96
+ grid_size: Optional[int] = None
97
+ """Frame is divided into square grids of this size. A patch centered on a grid
98
+ having size `image_size` is returned. Grid size not used in training,
99
+ used only during val / test, grid size controls the overlap of the patches"""
100
+
101
+ empty_patch_replacement_enabled: Optional[bool] = False
102
+ """Whether to replace the content of one of the channels
103
+ with background with given probability"""
104
+ empty_patch_replacement_channel_idx: Optional[Any] = None
105
+ empty_patch_replacement_probab: Optional[Any] = None
106
+ empty_patch_max_val_threshold: Optional[Any] = None
107
+
108
+ uncorrelated_channels: Optional[bool] = False
109
+ """Replace the content in one of the channels with given probability to make
110
+ channel content 'uncorrelated'"""
111
+ uncorrelated_channel_probab: Optional[float] = 0.5
112
+
113
+ poisson_noise_factor: Optional[float] = -1
114
+ """The added poisson noise factor"""
115
+
116
+ synthetic_gaussian_scale: Optional[float] = 0.1
117
+
118
+ # TODO: set to True in training code, recheck
119
+ input_has_dependant_noise: Optional[bool] = False
120
+
121
+ # TODO: sometimes max_val differs between runs with fixed seeds with noise enabled
122
+ enable_gaussian_noise: Optional[bool] = False
123
+ """Whether to enable gaussian noise"""
124
+
125
+ # TODO: is this parameter used?
126
+ allow_generation: bool = False
127
+
128
+ # TODO: both used in IndexSwitcher, insure correct passing
129
+ training_validtarget_fraction: Any = None
130
+ deterministic_grid: Any = None
131
+
132
+ # TODO: why is this not used?
133
+ enable_rotation_aug: Optional[bool] = False
134
+
135
+ grid_alignment: GridAlignement = GridAlignement.LeftTop
136
+
137
+ max_val: Optional[float] = None
138
+ """Maximum data in the dataset. Is calculated for train split, and should be
139
+ externally set for val and test splits."""
140
+
141
+ trim_boundary: Optional[bool] = True
142
+ """Whether to trim boundary of the image"""
143
+
144
+ overlapping_padding_kwargs: Any = None
145
+ """Parameters for np.pad method"""
146
+
147
+ # TODO: remove this parameter, controls debug print
148
+ print_vars: Optional[bool] = False
149
+
150
+ # Hard-coded parameters (used to be in the config file)
151
+ normalized_input: bool = True
152
+ """If this is set to true, then one mean and stdev is used
153
+ for both channels. Otherwise, two different mean and stdev are used."""
154
+ use_one_mu_std: Optional[bool] = True
155
+
156
+ # TODO: is this parameter used?
157
+ train_aug_rotate: Optional[bool] = False
158
+ enable_random_cropping: Optional[bool] = True
159
+
160
+ # TODO: not used?
161
+ multiscale_lowres_count: Optional[int] = None
162
+
163
+ @computed_field
164
+ @property
165
+ def padding_kwargs(self) -> dict:
166
+ kwargs_dict = {}
167
+ padding_kwargs = {}
168
+ if (
169
+ self.multiscale_lowres_count is not None
170
+ and self.multiscale_lowres_count is not None
171
+ ):
172
+ # Get padding attributes
173
+ if "padding_kwargs" not in kwargs_dict:
174
+ padding_kwargs = {}
175
+ padding_kwargs["mode"] = "constant"
176
+ padding_kwargs["constant_values"] = 0
177
+ else:
178
+ padding_kwargs = kwargs_dict.pop("padding_kwargs")
179
+ return padding_kwargs