careamics 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (155) hide show
  1. careamics/__init__.py +6 -1
  2. careamics/careamist.py +729 -0
  3. careamics/config/__init__.py +39 -0
  4. careamics/config/architectures/__init__.py +17 -0
  5. careamics/config/architectures/architecture_model.py +37 -0
  6. careamics/config/architectures/custom_model.py +162 -0
  7. careamics/config/architectures/lvae_model.py +174 -0
  8. careamics/config/architectures/register_model.py +103 -0
  9. careamics/config/architectures/unet_model.py +118 -0
  10. careamics/config/callback_model.py +123 -0
  11. careamics/config/configuration_factory.py +583 -0
  12. careamics/config/configuration_model.py +604 -0
  13. careamics/config/data_model.py +527 -0
  14. careamics/config/fcn_algorithm_model.py +147 -0
  15. careamics/config/inference_model.py +239 -0
  16. careamics/config/likelihood_model.py +43 -0
  17. careamics/config/nm_model.py +101 -0
  18. careamics/config/optimizer_models.py +187 -0
  19. careamics/config/references/__init__.py +45 -0
  20. careamics/config/references/algorithm_descriptions.py +132 -0
  21. careamics/config/references/references.py +39 -0
  22. careamics/config/support/__init__.py +31 -0
  23. careamics/config/support/supported_activations.py +27 -0
  24. careamics/config/support/supported_algorithms.py +33 -0
  25. careamics/config/support/supported_architectures.py +17 -0
  26. careamics/config/support/supported_data.py +109 -0
  27. careamics/config/support/supported_loggers.py +10 -0
  28. careamics/config/support/supported_losses.py +29 -0
  29. careamics/config/support/supported_optimizers.py +57 -0
  30. careamics/config/support/supported_pixel_manipulations.py +15 -0
  31. careamics/config/support/supported_struct_axis.py +21 -0
  32. careamics/config/support/supported_transforms.py +11 -0
  33. careamics/config/tile_information.py +65 -0
  34. careamics/config/training_model.py +72 -0
  35. careamics/config/transformations/__init__.py +15 -0
  36. careamics/config/transformations/n2v_manipulate_model.py +64 -0
  37. careamics/config/transformations/normalize_model.py +60 -0
  38. careamics/config/transformations/transform_model.py +45 -0
  39. careamics/config/transformations/xy_flip_model.py +43 -0
  40. careamics/config/transformations/xy_random_rotate90_model.py +35 -0
  41. careamics/config/vae_algorithm_model.py +171 -0
  42. careamics/config/validators/__init__.py +5 -0
  43. careamics/config/validators/validator_utils.py +101 -0
  44. careamics/conftest.py +39 -0
  45. careamics/dataset/__init__.py +17 -0
  46. careamics/dataset/dataset_utils/__init__.py +19 -0
  47. careamics/dataset/dataset_utils/dataset_utils.py +101 -0
  48. careamics/dataset/dataset_utils/file_utils.py +141 -0
  49. careamics/dataset/dataset_utils/iterate_over_files.py +83 -0
  50. careamics/dataset/dataset_utils/running_stats.py +186 -0
  51. careamics/dataset/in_memory_dataset.py +310 -0
  52. careamics/dataset/in_memory_pred_dataset.py +88 -0
  53. careamics/dataset/in_memory_tiled_pred_dataset.py +129 -0
  54. careamics/dataset/iterable_dataset.py +295 -0
  55. careamics/dataset/iterable_pred_dataset.py +122 -0
  56. careamics/dataset/iterable_tiled_pred_dataset.py +140 -0
  57. careamics/dataset/patching/__init__.py +1 -0
  58. careamics/dataset/patching/patching.py +299 -0
  59. careamics/dataset/patching/random_patching.py +201 -0
  60. careamics/dataset/patching/sequential_patching.py +212 -0
  61. careamics/dataset/patching/validate_patch_dimension.py +64 -0
  62. careamics/dataset/tiling/__init__.py +10 -0
  63. careamics/dataset/tiling/collate_tiles.py +33 -0
  64. careamics/dataset/tiling/lvae_tiled_patching.py +282 -0
  65. careamics/dataset/tiling/tiled_patching.py +164 -0
  66. careamics/dataset/zarr_dataset.py +151 -0
  67. careamics/file_io/__init__.py +15 -0
  68. careamics/file_io/read/__init__.py +12 -0
  69. careamics/file_io/read/get_func.py +56 -0
  70. careamics/file_io/read/tiff.py +58 -0
  71. careamics/file_io/read/zarr.py +60 -0
  72. careamics/file_io/write/__init__.py +15 -0
  73. careamics/file_io/write/get_func.py +63 -0
  74. careamics/file_io/write/tiff.py +40 -0
  75. careamics/lightning/__init__.py +18 -0
  76. careamics/lightning/callbacks/__init__.py +11 -0
  77. careamics/lightning/callbacks/hyperparameters_callback.py +49 -0
  78. careamics/lightning/callbacks/prediction_writer_callback/__init__.py +20 -0
  79. careamics/lightning/callbacks/prediction_writer_callback/file_path_utils.py +56 -0
  80. careamics/lightning/callbacks/prediction_writer_callback/prediction_writer_callback.py +233 -0
  81. careamics/lightning/callbacks/prediction_writer_callback/write_strategy.py +398 -0
  82. careamics/lightning/callbacks/prediction_writer_callback/write_strategy_factory.py +215 -0
  83. careamics/lightning/callbacks/progress_bar_callback.py +90 -0
  84. careamics/lightning/lightning_module.py +632 -0
  85. careamics/lightning/predict_data_module.py +333 -0
  86. careamics/lightning/train_data_module.py +680 -0
  87. careamics/losses/__init__.py +15 -0
  88. careamics/losses/fcn/__init__.py +1 -0
  89. careamics/losses/fcn/losses.py +98 -0
  90. careamics/losses/loss_factory.py +155 -0
  91. careamics/losses/lvae/__init__.py +1 -0
  92. careamics/losses/lvae/loss_utils.py +83 -0
  93. careamics/losses/lvae/losses.py +445 -0
  94. careamics/lvae_training/__init__.py +0 -0
  95. careamics/lvae_training/dataset/__init__.py +0 -0
  96. careamics/lvae_training/dataset/data_utils.py +701 -0
  97. careamics/lvae_training/dataset/lc_dataset.py +259 -0
  98. careamics/lvae_training/dataset/lc_dataset_config.py +13 -0
  99. careamics/lvae_training/dataset/vae_data_config.py +179 -0
  100. careamics/lvae_training/dataset/vae_dataset.py +1054 -0
  101. careamics/lvae_training/eval_utils.py +905 -0
  102. careamics/lvae_training/get_config.py +84 -0
  103. careamics/lvae_training/lightning_module.py +701 -0
  104. careamics/lvae_training/metrics.py +214 -0
  105. careamics/lvae_training/train_lvae.py +342 -0
  106. careamics/lvae_training/train_utils.py +121 -0
  107. careamics/model_io/__init__.py +7 -0
  108. careamics/model_io/bioimage/__init__.py +11 -0
  109. careamics/model_io/bioimage/_readme_factory.py +121 -0
  110. careamics/model_io/bioimage/bioimage_utils.py +52 -0
  111. careamics/model_io/bioimage/model_description.py +327 -0
  112. careamics/model_io/bmz_io.py +246 -0
  113. careamics/model_io/model_io_utils.py +95 -0
  114. careamics/models/__init__.py +5 -0
  115. careamics/models/activation.py +39 -0
  116. careamics/models/layers.py +493 -0
  117. careamics/models/lvae/__init__.py +3 -0
  118. careamics/models/lvae/layers.py +1998 -0
  119. careamics/models/lvae/likelihoods.py +364 -0
  120. careamics/models/lvae/lvae.py +901 -0
  121. careamics/models/lvae/noise_models.py +541 -0
  122. careamics/models/lvae/utils.py +395 -0
  123. careamics/models/model_factory.py +67 -0
  124. careamics/models/unet.py +443 -0
  125. careamics/prediction_utils/__init__.py +10 -0
  126. careamics/prediction_utils/lvae_prediction.py +158 -0
  127. careamics/prediction_utils/lvae_tiling_manager.py +362 -0
  128. careamics/prediction_utils/prediction_outputs.py +135 -0
  129. careamics/prediction_utils/stitch_prediction.py +112 -0
  130. careamics/transforms/__init__.py +20 -0
  131. careamics/transforms/compose.py +107 -0
  132. careamics/transforms/n2v_manipulate.py +146 -0
  133. careamics/transforms/normalize.py +243 -0
  134. careamics/transforms/pixel_manipulation.py +407 -0
  135. careamics/transforms/struct_mask_parameters.py +20 -0
  136. careamics/transforms/transform.py +24 -0
  137. careamics/transforms/tta.py +88 -0
  138. careamics/transforms/xy_flip.py +123 -0
  139. careamics/transforms/xy_random_rotate90.py +101 -0
  140. careamics/utils/__init__.py +19 -0
  141. careamics/utils/autocorrelation.py +40 -0
  142. careamics/utils/base_enum.py +60 -0
  143. careamics/utils/context.py +66 -0
  144. careamics/utils/logging.py +322 -0
  145. careamics/utils/metrics.py +188 -0
  146. careamics/utils/path_utils.py +26 -0
  147. careamics/utils/ram.py +15 -0
  148. careamics/utils/receptive_field.py +108 -0
  149. careamics/utils/torch_utils.py +127 -0
  150. careamics-0.0.3.dist-info/METADATA +78 -0
  151. careamics-0.0.3.dist-info/RECORD +154 -0
  152. {careamics-0.0.1.dist-info → careamics-0.0.3.dist-info}/WHEEL +1 -1
  153. {careamics-0.0.1.dist-info → careamics-0.0.3.dist-info}/licenses/LICENSE +1 -1
  154. careamics-0.0.1.dist-info/METADATA +0 -46
  155. careamics-0.0.1.dist-info/RECORD +0 -6
@@ -0,0 +1,395 @@
1
+ """
2
+ Script for utility functions needed by the LVAE model.
3
+ """
4
+
5
+ from typing import Iterable
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+ import torchvision.transforms.functional as F
11
+ from torch.distributions.normal import Normal
12
+
13
+
14
+ def torch_nanmean(inp):
15
+ return torch.mean(inp[~inp.isnan()])
16
+
17
+
18
+ def compute_batch_mean(x):
19
+ N = len(x)
20
+ return x.view(N, -1).mean(dim=1)
21
+
22
+
23
+ def power_of_2(self, x):
24
+ assert isinstance(x, int)
25
+ if x == 1:
26
+ return True
27
+ if x == 0:
28
+ # happens with validation
29
+ return False
30
+ if x % 2 == 1:
31
+ return False
32
+ return self.power_of_2(x // 2)
33
+
34
+
35
+ class Enum:
36
+ @classmethod
37
+ def name(cls, enum_type):
38
+ for key, value in cls.__dict__.items():
39
+ if enum_type == value:
40
+ return key
41
+
42
+ @classmethod
43
+ def contains(cls, enum_type):
44
+ for key, value in cls.__dict__.items():
45
+ if enum_type == value:
46
+ return True
47
+ return False
48
+
49
+ @classmethod
50
+ def from_name(cls, enum_type_str):
51
+ for key, value in cls.__dict__.items():
52
+ if key == enum_type_str:
53
+ return value
54
+ assert f"{cls.__name__}:{enum_type_str} doesnot exist."
55
+
56
+
57
+ class LossType(Enum):
58
+ Elbo = 0
59
+ ElboWithCritic = 1
60
+ ElboMixedReconstruction = 2
61
+ MSE = 3
62
+ ElboWithNbrConsistency = 4
63
+ ElboSemiSupMixedReconstruction = 5
64
+ ElboCL = 6
65
+ ElboRestrictedReconstruction = 7
66
+ DenoiSplitMuSplit = 8
67
+
68
+
69
+ class ModelType(Enum):
70
+ LadderVae = 3
71
+ LadderVaeTwinDecoder = 4
72
+ LadderVAECritic = 5
73
+ # Separate vampprior: two optimizers
74
+ LadderVaeSepVampprior = 6
75
+ # one encoder for mixed input, two for separate inputs.
76
+ LadderVaeSepEncoder = 7
77
+ LadderVAEMultiTarget = 8
78
+ LadderVaeSepEncoderSingleOptim = 9
79
+ UNet = 10
80
+ BraveNet = 11
81
+ LadderVaeStitch = 12
82
+ LadderVaeSemiSupervised = 13
83
+ LadderVaeStitch2Stage = 14 # Note that previously trained models will have issue.
84
+ # since earlier, LadderVaeStitch2Stage = 13, LadderVaeSemiSupervised = 14
85
+ LadderVaeMixedRecons = 15
86
+ LadderVaeCL = 16
87
+ LadderVaeTwoDataSet = (
88
+ 17 # on one subdset, apply disentanglement, on other apply reconstruction
89
+ )
90
+ LadderVaeTwoDatasetMultiBranch = 18
91
+ LadderVaeTwoDatasetMultiOptim = 19
92
+ LVaeDeepEncoderIntensityAug = 20
93
+ AutoRegresiveLadderVAE = 21
94
+ LadderVAEInterleavedOptimization = 22
95
+ Denoiser = 23
96
+ DenoiserSplitter = 24
97
+ SplitterDenoiser = 25
98
+ LadderVAERestrictedReconstruction = 26
99
+ LadderVAETwoDataSetRestRecon = 27
100
+ LadderVAETwoDataSetFinetuning = 28
101
+
102
+
103
+ def _pad_crop_img(x, size, mode) -> torch.Tensor:
104
+ """Pads or crops a tensor.
105
+ Pads or crops a tensor of shape (batch, channels, h, w) to new height
106
+ and width given by a tuple.
107
+ Args:
108
+ x (torch.Tensor): Input image
109
+ size (list or tuple): Desired size (height, width)
110
+ mode (str): Mode, either 'pad' or 'crop'
111
+ Returns:
112
+ The padded or cropped tensor
113
+ """
114
+ assert x.dim() == 4 and len(size) == 2
115
+ size = tuple(size)
116
+ x_size = x.size()[2:4]
117
+ if mode == "pad":
118
+ cond = x_size[0] > size[0] or x_size[1] > size[1]
119
+ elif mode == "crop":
120
+ cond = x_size[0] < size[0] or x_size[1] < size[1]
121
+ else:
122
+ raise ValueError(f"invalid mode '{mode}'")
123
+ if cond:
124
+ raise ValueError(f"trying to {mode} from size {x_size} to size {size}")
125
+ dr, dc = (abs(x_size[0] - size[0]), abs(x_size[1] - size[1]))
126
+ dr1, dr2 = dr // 2, dr - (dr // 2)
127
+ dc1, dc2 = dc // 2, dc - (dc // 2)
128
+ if mode == "pad":
129
+ return nn.functional.pad(x, [dc1, dc2, dr1, dr2, 0, 0, 0, 0])
130
+ elif mode == "crop":
131
+ return x[:, :, dr1 : x_size[0] - dr2, dc1 : x_size[1] - dc2]
132
+
133
+
134
+ def pad_img_tensor(x, size) -> torch.Tensor:
135
+ """Pads a tensor.
136
+ Pads a tensor of shape (batch, channels, h, w) to a desired height and width.
137
+ Args:
138
+ x (torch.Tensor): Input image
139
+ size (list or tuple): Desired size (height, width)
140
+
141
+ Returns
142
+ -------
143
+ The padded tensor
144
+ """
145
+ return _pad_crop_img(x, size, "pad")
146
+
147
+
148
+ def crop_img_tensor(x, size) -> torch.Tensor:
149
+ """Crops a tensor.
150
+ Crops a tensor of shape (batch, channels, h, w) to a desired height and width
151
+ given by a tuple.
152
+ Args:
153
+ x (torch.Tensor): Input image
154
+ size (list or tuple): Desired size (height, width)
155
+
156
+ Returns
157
+ -------
158
+ The cropped tensor
159
+ """
160
+ return _pad_crop_img(x, size, "crop")
161
+
162
+
163
+ class StableExponential:
164
+ """
165
+ Class that redefines the definition of exp() to increase numerical stability.
166
+ Naturally, also the definition of log() must change accordingly.
167
+ However, it is worth noting that the two operations remain one the inverse of the other,
168
+ meaning that x = log(exp(x)) and x = exp(log(x)) are always true.
169
+
170
+ Definition:
171
+ exp(x) = {
172
+ exp(x) if x<=0
173
+ x+1 if x>0
174
+ }
175
+
176
+ log(x) = {
177
+ x if x<=0
178
+ log(1+x) if x>0
179
+ }
180
+
181
+ NOTE 1:
182
+ Within the class everything is done on the tensor given as input to the constructor.
183
+ Therefore, when exp() is called, self._tensor.exp() is computed.
184
+ When log() is called, torch.log(self._tensor.exp()) is computed instead.
185
+
186
+ NOTE 2:
187
+ Given the output from exp(), torch.log() or the log() method of the class give identical results.
188
+ """
189
+
190
+ def __init__(self, tensor):
191
+ self._raw_tensor = tensor
192
+ posneg_dic = self.posneg_separation(self._raw_tensor)
193
+ self.pos_f, self.neg_f = posneg_dic["filter"]
194
+ self.pos_data, self.neg_data = posneg_dic["value"]
195
+
196
+ def posneg_separation(self, tensor):
197
+ pos = tensor > 0
198
+ pos_tensor = torch.clip(tensor, min=0)
199
+
200
+ neg = tensor <= 0
201
+ neg_tensor = torch.clip(tensor, max=0)
202
+
203
+ return {"filter": [pos, neg], "value": [pos_tensor, neg_tensor]}
204
+
205
+ def exp(self):
206
+ return torch.exp(self.neg_data) * self.neg_f + (1 + self.pos_data) * self.pos_f
207
+
208
+ def log(self):
209
+ return self.neg_data * self.neg_f + torch.log(1 + self.pos_data) * self.pos_f
210
+
211
+
212
+ class StableLogVar:
213
+ """
214
+ Class that provides a numerically stable implementation of Log-Variance.
215
+ Specifically, it uses the exp() and log() formulas defined in `StableExponential` class.
216
+ """
217
+
218
+ def __init__(
219
+ self, logvar: torch.Tensor, enable_stable: bool = True, var_eps: float = 1e-6
220
+ ):
221
+ """
222
+ Constructor.
223
+
224
+ Parameters
225
+ ----------
226
+ logvar: torch.Tensor
227
+ The input (true) logvar vector, to be converted in the Stable version.
228
+ enable_stable: bool, optional
229
+ Whether to compute the stable version of log-variance. Default is `True`.
230
+ var_eps: float, optional
231
+ The minimum value attainable by the variance. Default is `1e-6`.
232
+ """
233
+ self._lv = logvar
234
+ self._enable_stable = enable_stable
235
+ self._eps = var_eps
236
+
237
+ def get(self) -> torch.Tensor:
238
+ if self._enable_stable is False:
239
+ return self._lv
240
+
241
+ return torch.log(self.get_var())
242
+
243
+ def get_var(self) -> torch.Tensor:
244
+ """
245
+ Get Variance from Log-Variance.
246
+ """
247
+ if self._enable_stable is False:
248
+ return torch.exp(self._lv)
249
+ return StableExponential(self._lv).exp() + self._eps
250
+
251
+ def get_std(self) -> torch.Tensor:
252
+ return torch.sqrt(self.get_var())
253
+
254
+ def centercrop_to_size(self, size: Iterable[int]) -> None:
255
+ """
256
+ Centercrop the log-variance tensor to the desired size.
257
+
258
+ Parameters
259
+ ----------
260
+ size: torch.Tensor
261
+ The desired size of the log-variance tensor.
262
+ """
263
+ if self._lv.shape[-1] == size:
264
+ return
265
+
266
+ diff = self._lv.shape[-1] - size
267
+ assert diff > 0 and diff % 2 == 0
268
+ self._lv = F.center_crop(self._lv, (size, size))
269
+
270
+
271
+ class StableMean:
272
+
273
+ def __init__(self, mean):
274
+ self._mean = mean
275
+
276
+ def get(self) -> torch.Tensor:
277
+ return self._mean
278
+
279
+ def centercrop_to_size(self, size: Iterable[int]) -> None:
280
+ """
281
+ Centercrop the mean tensor to the desired size.
282
+
283
+ Parameters
284
+ ----------
285
+ size: torch.Tensor
286
+ The desired size of the log-variance tensor.
287
+ """
288
+ if self._mean.shape[-1] == size:
289
+ return
290
+
291
+ diff = self._mean.shape[-1] - size
292
+ assert diff > 0 and diff % 2 == 0
293
+ self._mean = F.center_crop(self._mean, (size, size))
294
+
295
+
296
+ def allow_numpy(func):
297
+ """
298
+ All optional arguments are passed as is. positional arguments are checked. if they are numpy array,
299
+ they are converted to torch Tensor.
300
+ """
301
+
302
+ def numpy_wrapper(*args, **kwargs):
303
+ new_args = []
304
+ for arg in args:
305
+ if isinstance(arg, np.ndarray):
306
+ arg = torch.Tensor(arg)
307
+ new_args.append(arg)
308
+ new_args = tuple(new_args)
309
+
310
+ output = func(*new_args, **kwargs)
311
+ return output
312
+
313
+ return numpy_wrapper
314
+
315
+
316
+ class Interpolate(nn.Module):
317
+ """Wrapper for torch.nn.functional.interpolate."""
318
+
319
+ def __init__(self, size=None, scale=None, mode="bilinear", align_corners=False):
320
+ super().__init__()
321
+ assert (size is None) == (scale is not None)
322
+ self.size = size
323
+ self.scale = scale
324
+ self.mode = mode
325
+ self.align_corners = align_corners
326
+
327
+ def forward(self, x):
328
+ out = F.interpolate(
329
+ x,
330
+ size=self.size,
331
+ scale_factor=self.scale,
332
+ mode=self.mode,
333
+ align_corners=self.align_corners,
334
+ )
335
+ return out
336
+
337
+
338
+ def kl_normal_mc(z, p_mulv, q_mulv):
339
+ """
340
+ One-sample estimation of element-wise KL between two diagonal
341
+ multivariate normal distributions. Any number of dimensions,
342
+ broadcasting supported (be careful).
343
+ :param z:
344
+ :param p_mulv:
345
+ :param q_mulv:
346
+ :return:
347
+ """
348
+ assert isinstance(p_mulv, tuple)
349
+ assert isinstance(q_mulv, tuple)
350
+ p_mu, p_lv = p_mulv
351
+ q_mu, q_lv = q_mulv
352
+
353
+ p_std = p_lv.get_std()
354
+ q_std = q_lv.get_std()
355
+
356
+ p_distrib = Normal(p_mu.get(), p_std)
357
+ q_distrib = Normal(q_mu.get(), q_std)
358
+ return q_distrib.log_prob(z) - p_distrib.log_prob(z)
359
+
360
+
361
+ def free_bits_kl(
362
+ kl: torch.Tensor, free_bits: float, batch_average: bool = False, eps: float = 1e-6
363
+ ) -> torch.Tensor:
364
+ """
365
+ Computes free-bits version of KL divergence.
366
+ Ensures that the KL doesn't go to zero for any latent dimension.
367
+ Hence, it contributes to use latent variables more efficiently,
368
+ leading to better representation learning.
369
+
370
+ NOTE:
371
+ Takes in the KL with shape (batch size, layers), returns the KL with
372
+ free bits (for optimization) with shape (layers,), which is the average
373
+ free-bits KL per layer in the current batch.
374
+ If batch_average is False (default), the free bits are per layer and
375
+ per batch element. Otherwise, the free bits are still per layer, but
376
+ are assigned on average to the whole batch. In both cases, the batch
377
+ average is returned, so it's simply a matter of doing mean(clamp(KL))
378
+ or clamp(mean(KL)).
379
+
380
+ Args:
381
+ kl (torch.Tensor)
382
+ free_bits (float)
383
+ batch_average (bool, optional))
384
+ eps (float, optional)
385
+
386
+ Returns
387
+ -------
388
+ The KL with free bits
389
+ """
390
+ assert kl.dim() == 2
391
+ if free_bits < eps:
392
+ return kl.mean(0)
393
+ if batch_average:
394
+ return kl.mean(0).clamp(min=free_bits)
395
+ return kl.clamp(min=free_bits).mean(0)
@@ -0,0 +1,67 @@
1
+ """
2
+ Model factory.
3
+
4
+ Model creation factory functions.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING, Union
10
+
11
+ import torch
12
+
13
+ from careamics.config.architectures import (
14
+ CustomModel,
15
+ get_custom_model,
16
+ )
17
+ from careamics.config.support import SupportedArchitecture
18
+ from careamics.models.lvae import LadderVAE as LVAE
19
+ from careamics.models.unet import UNet
20
+ from careamics.utils import get_logger
21
+
22
+ if TYPE_CHECKING:
23
+ from careamics.config.architectures import (
24
+ CustomModel,
25
+ LVAEModel,
26
+ UNetModel,
27
+ )
28
+
29
+
30
+ logger = get_logger(__name__)
31
+
32
+
33
+ def model_factory(
34
+ model_configuration: Union[UNetModel, LVAEModel, CustomModel],
35
+ ) -> torch.nn.Module:
36
+ """
37
+ Deep learning model factory.
38
+
39
+ Supported models are defined in careamics.config.SupportedArchitecture.
40
+
41
+ Parameters
42
+ ----------
43
+ model_configuration : Union[UNetModel, VAEModel]
44
+ Model configuration.
45
+
46
+ Returns
47
+ -------
48
+ torch.nn.Module
49
+ Model class.
50
+
51
+ Raises
52
+ ------
53
+ NotImplementedError
54
+ If the requested architecture is not implemented.
55
+ """
56
+ if model_configuration.architecture == SupportedArchitecture.UNET:
57
+ return UNet(**model_configuration.model_dump())
58
+ elif model_configuration.architecture == SupportedArchitecture.LVAE:
59
+ return LVAE(**model_configuration.model_dump())
60
+ elif model_configuration.architecture == SupportedArchitecture.CUSTOM:
61
+ assert isinstance(model_configuration, CustomModel)
62
+ model = get_custom_model(model_configuration.name)
63
+ return model(**model_configuration.model_dump())
64
+ else:
65
+ raise NotImplementedError(
66
+ f"Model {model_configuration.architecture} is not implemented or unknown."
67
+ )