careamics 0.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (279) hide show
  1. careamics/__init__.py +24 -0
  2. careamics/careamist.py +961 -0
  3. careamics/cli/__init__.py +5 -0
  4. careamics/cli/conf.py +394 -0
  5. careamics/cli/main.py +234 -0
  6. careamics/cli/utils.py +27 -0
  7. careamics/config/__init__.py +66 -0
  8. careamics/config/algorithms/__init__.py +21 -0
  9. careamics/config/algorithms/care_algorithm_config.py +122 -0
  10. careamics/config/algorithms/hdn_algorithm_config.py +103 -0
  11. careamics/config/algorithms/microsplit_algorithm_config.py +103 -0
  12. careamics/config/algorithms/n2n_algorithm_config.py +115 -0
  13. careamics/config/algorithms/n2v_algorithm_config.py +296 -0
  14. careamics/config/algorithms/pn2v_algorithm_config.py +301 -0
  15. careamics/config/algorithms/unet_algorithm_config.py +91 -0
  16. careamics/config/algorithms/vae_algorithm_config.py +178 -0
  17. careamics/config/architectures/__init__.py +7 -0
  18. careamics/config/architectures/architecture_config.py +37 -0
  19. careamics/config/architectures/lvae_config.py +262 -0
  20. careamics/config/architectures/unet_config.py +125 -0
  21. careamics/config/configuration.py +367 -0
  22. careamics/config/configuration_factories.py +2400 -0
  23. careamics/config/data/__init__.py +27 -0
  24. careamics/config/data/data_config.py +472 -0
  25. careamics/config/data/inference_config.py +237 -0
  26. careamics/config/data/ng_data_config.py +1038 -0
  27. careamics/config/data/patch_filter/__init__.py +15 -0
  28. careamics/config/data/patch_filter/filter_config.py +16 -0
  29. careamics/config/data/patch_filter/mask_filter_config.py +17 -0
  30. careamics/config/data/patch_filter/max_filter_config.py +15 -0
  31. careamics/config/data/patch_filter/meanstd_filter_config.py +18 -0
  32. careamics/config/data/patch_filter/shannon_filter_config.py +15 -0
  33. careamics/config/data/patching_strategies/__init__.py +15 -0
  34. careamics/config/data/patching_strategies/_overlapping_patched_config.py +102 -0
  35. careamics/config/data/patching_strategies/_patched_config.py +56 -0
  36. careamics/config/data/patching_strategies/random_patching_config.py +45 -0
  37. careamics/config/data/patching_strategies/sequential_patching_config.py +25 -0
  38. careamics/config/data/patching_strategies/tiled_patching_config.py +40 -0
  39. careamics/config/data/patching_strategies/whole_patching_config.py +12 -0
  40. careamics/config/data/tile_information.py +65 -0
  41. careamics/config/lightning/__init__.py +15 -0
  42. careamics/config/lightning/callbacks/__init__.py +8 -0
  43. careamics/config/lightning/callbacks/callback_config.py +116 -0
  44. careamics/config/lightning/optimizer_configs.py +186 -0
  45. careamics/config/lightning/training_config.py +70 -0
  46. careamics/config/losses/__init__.py +8 -0
  47. careamics/config/losses/loss_config.py +60 -0
  48. careamics/config/ng_configs/__init__.py +5 -0
  49. careamics/config/ng_configs/n2v_configuration.py +64 -0
  50. careamics/config/ng_configs/ng_configuration.py +256 -0
  51. careamics/config/ng_factories/__init__.py +9 -0
  52. careamics/config/ng_factories/algorithm_factory.py +120 -0
  53. careamics/config/ng_factories/data_factory.py +154 -0
  54. careamics/config/ng_factories/n2v_factory.py +256 -0
  55. careamics/config/ng_factories/training_factory.py +69 -0
  56. careamics/config/noise_model/__init__.py +12 -0
  57. careamics/config/noise_model/likelihood_config.py +60 -0
  58. careamics/config/noise_model/noise_model_config.py +149 -0
  59. careamics/config/support/__init__.py +31 -0
  60. careamics/config/support/supported_activations.py +27 -0
  61. careamics/config/support/supported_algorithms.py +40 -0
  62. careamics/config/support/supported_architectures.py +13 -0
  63. careamics/config/support/supported_data.py +122 -0
  64. careamics/config/support/supported_filters.py +17 -0
  65. careamics/config/support/supported_loggers.py +10 -0
  66. careamics/config/support/supported_losses.py +32 -0
  67. careamics/config/support/supported_optimizers.py +57 -0
  68. careamics/config/support/supported_patching_strategies.py +22 -0
  69. careamics/config/support/supported_pixel_manipulations.py +15 -0
  70. careamics/config/support/supported_struct_axis.py +21 -0
  71. careamics/config/support/supported_transforms.py +12 -0
  72. careamics/config/transformations/__init__.py +22 -0
  73. careamics/config/transformations/n2v_manipulate_config.py +79 -0
  74. careamics/config/transformations/normalize_config.py +59 -0
  75. careamics/config/transformations/transform_config.py +45 -0
  76. careamics/config/transformations/transform_unions.py +29 -0
  77. careamics/config/transformations/xy_flip_config.py +43 -0
  78. careamics/config/transformations/xy_random_rotate90_config.py +35 -0
  79. careamics/config/utils/__init__.py +8 -0
  80. careamics/config/utils/configuration_io.py +85 -0
  81. careamics/config/validators/__init__.py +18 -0
  82. careamics/config/validators/axes_validators.py +90 -0
  83. careamics/config/validators/model_validators.py +84 -0
  84. careamics/config/validators/patch_validators.py +55 -0
  85. careamics/conftest.py +39 -0
  86. careamics/dataset/__init__.py +17 -0
  87. careamics/dataset/dataset_utils/__init__.py +19 -0
  88. careamics/dataset/dataset_utils/dataset_utils.py +118 -0
  89. careamics/dataset/dataset_utils/file_utils.py +141 -0
  90. careamics/dataset/dataset_utils/iterate_over_files.py +84 -0
  91. careamics/dataset/dataset_utils/running_stats.py +189 -0
  92. careamics/dataset/in_memory_dataset.py +303 -0
  93. careamics/dataset/in_memory_pred_dataset.py +88 -0
  94. careamics/dataset/in_memory_tiled_pred_dataset.py +131 -0
  95. careamics/dataset/iterable_dataset.py +294 -0
  96. careamics/dataset/iterable_pred_dataset.py +121 -0
  97. careamics/dataset/iterable_tiled_pred_dataset.py +141 -0
  98. careamics/dataset/patching/__init__.py +1 -0
  99. careamics/dataset/patching/patching.py +300 -0
  100. careamics/dataset/patching/random_patching.py +110 -0
  101. careamics/dataset/patching/sequential_patching.py +212 -0
  102. careamics/dataset/patching/validate_patch_dimension.py +64 -0
  103. careamics/dataset/tiling/__init__.py +10 -0
  104. careamics/dataset/tiling/collate_tiles.py +33 -0
  105. careamics/dataset/tiling/lvae_tiled_patching.py +375 -0
  106. careamics/dataset/tiling/tiled_patching.py +166 -0
  107. careamics/dataset_ng/README.md +212 -0
  108. careamics/dataset_ng/__init__.py +0 -0
  109. careamics/dataset_ng/dataset.py +365 -0
  110. careamics/dataset_ng/demos/bsd68_demo.ipynb +361 -0
  111. careamics/dataset_ng/demos/bsd68_zarr_demo.ipynb +453 -0
  112. careamics/dataset_ng/demos/care_U2OS_demo.ipynb +330 -0
  113. careamics/dataset_ng/demos/demo_custom_image_stack.ipynb +736 -0
  114. careamics/dataset_ng/demos/demo_datamodule.ipynb +447 -0
  115. careamics/dataset_ng/demos/demo_dataset.ipynb +278 -0
  116. careamics/dataset_ng/demos/demo_patch_extractor.py +51 -0
  117. careamics/dataset_ng/demos/mouse_nuclei_demo.ipynb +293 -0
  118. careamics/dataset_ng/factory.py +180 -0
  119. careamics/dataset_ng/grouped_index_sampler.py +73 -0
  120. careamics/dataset_ng/image_stack/__init__.py +14 -0
  121. careamics/dataset_ng/image_stack/czi_image_stack.py +396 -0
  122. careamics/dataset_ng/image_stack/file_image_stack.py +140 -0
  123. careamics/dataset_ng/image_stack/image_stack_protocol.py +93 -0
  124. careamics/dataset_ng/image_stack/image_utils/__init__.py +6 -0
  125. careamics/dataset_ng/image_stack/image_utils/image_stack_utils.py +125 -0
  126. careamics/dataset_ng/image_stack/in_memory_image_stack.py +93 -0
  127. careamics/dataset_ng/image_stack/zarr_image_stack.py +170 -0
  128. careamics/dataset_ng/image_stack_loader/__init__.py +19 -0
  129. careamics/dataset_ng/image_stack_loader/image_stack_loader_protocol.py +70 -0
  130. careamics/dataset_ng/image_stack_loader/image_stack_loaders.py +273 -0
  131. careamics/dataset_ng/image_stack_loader/zarr_utils.py +130 -0
  132. careamics/dataset_ng/legacy_interoperability.py +175 -0
  133. careamics/dataset_ng/microsplit_input_synth.py +377 -0
  134. careamics/dataset_ng/patch_extractor/__init__.py +7 -0
  135. careamics/dataset_ng/patch_extractor/limit_file_extractor.py +50 -0
  136. careamics/dataset_ng/patch_extractor/patch_construction.py +151 -0
  137. careamics/dataset_ng/patch_extractor/patch_extractor.py +117 -0
  138. careamics/dataset_ng/patch_filter/__init__.py +20 -0
  139. careamics/dataset_ng/patch_filter/coordinate_filter_protocol.py +27 -0
  140. careamics/dataset_ng/patch_filter/filter_factory.py +95 -0
  141. careamics/dataset_ng/patch_filter/mask_filter.py +96 -0
  142. careamics/dataset_ng/patch_filter/max_filter.py +188 -0
  143. careamics/dataset_ng/patch_filter/mean_std_filter.py +218 -0
  144. careamics/dataset_ng/patch_filter/patch_filter_protocol.py +50 -0
  145. careamics/dataset_ng/patch_filter/shannon_filter.py +188 -0
  146. careamics/dataset_ng/patching_strategies/__init__.py +26 -0
  147. careamics/dataset_ng/patching_strategies/patching_strategy_factory.py +50 -0
  148. careamics/dataset_ng/patching_strategies/patching_strategy_protocol.py +161 -0
  149. careamics/dataset_ng/patching_strategies/random_patching.py +393 -0
  150. careamics/dataset_ng/patching_strategies/sequential_patching.py +99 -0
  151. careamics/dataset_ng/patching_strategies/tiling_strategy.py +207 -0
  152. careamics/dataset_ng/patching_strategies/whole_sample.py +61 -0
  153. careamics/file_io/__init__.py +15 -0
  154. careamics/file_io/read/__init__.py +11 -0
  155. careamics/file_io/read/get_func.py +57 -0
  156. careamics/file_io/read/tiff.py +58 -0
  157. careamics/file_io/write/__init__.py +15 -0
  158. careamics/file_io/write/get_func.py +63 -0
  159. careamics/file_io/write/tiff.py +40 -0
  160. careamics/lightning/__init__.py +32 -0
  161. careamics/lightning/callbacks/__init__.py +13 -0
  162. careamics/lightning/callbacks/data_stats_callback.py +33 -0
  163. careamics/lightning/callbacks/hyperparameters_callback.py +49 -0
  164. careamics/lightning/callbacks/prediction_writer_callback/__init__.py +20 -0
  165. careamics/lightning/callbacks/prediction_writer_callback/file_path_utils.py +56 -0
  166. careamics/lightning/callbacks/prediction_writer_callback/prediction_writer_callback.py +234 -0
  167. careamics/lightning/callbacks/prediction_writer_callback/write_strategy.py +399 -0
  168. careamics/lightning/callbacks/prediction_writer_callback/write_strategy_factory.py +215 -0
  169. careamics/lightning/callbacks/progress_bar_callback.py +90 -0
  170. careamics/lightning/dataset_ng/__init__.py +1 -0
  171. careamics/lightning/dataset_ng/callbacks/__init__.py +1 -0
  172. careamics/lightning/dataset_ng/callbacks/prediction_writer/__init__.py +29 -0
  173. careamics/lightning/dataset_ng/callbacks/prediction_writer/cached_tiles_strategy.py +164 -0
  174. careamics/lightning/dataset_ng/callbacks/prediction_writer/file_path_utils.py +33 -0
  175. careamics/lightning/dataset_ng/callbacks/prediction_writer/prediction_writer_callback.py +219 -0
  176. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_image_strategy.py +91 -0
  177. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_strategy.py +27 -0
  178. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_strategy_factory.py +214 -0
  179. careamics/lightning/dataset_ng/callbacks/prediction_writer/write_tiles_zarr_strategy.py +375 -0
  180. careamics/lightning/dataset_ng/data_module.py +529 -0
  181. careamics/lightning/dataset_ng/data_module_utils.py +395 -0
  182. careamics/lightning/dataset_ng/lightning_modules/__init__.py +9 -0
  183. careamics/lightning/dataset_ng/lightning_modules/care_module.py +97 -0
  184. careamics/lightning/dataset_ng/lightning_modules/n2v_module.py +106 -0
  185. careamics/lightning/dataset_ng/lightning_modules/unet_module.py +221 -0
  186. careamics/lightning/dataset_ng/prediction/__init__.py +16 -0
  187. careamics/lightning/dataset_ng/prediction/convert_prediction.py +198 -0
  188. careamics/lightning/dataset_ng/prediction/stitch_prediction.py +171 -0
  189. careamics/lightning/lightning_module.py +914 -0
  190. careamics/lightning/microsplit_data_module.py +632 -0
  191. careamics/lightning/predict_data_module.py +341 -0
  192. careamics/lightning/train_data_module.py +666 -0
  193. careamics/losses/__init__.py +21 -0
  194. careamics/losses/fcn/__init__.py +1 -0
  195. careamics/losses/fcn/losses.py +125 -0
  196. careamics/losses/loss_factory.py +80 -0
  197. careamics/losses/lvae/__init__.py +1 -0
  198. careamics/losses/lvae/loss_utils.py +83 -0
  199. careamics/losses/lvae/losses.py +589 -0
  200. careamics/lvae_training/__init__.py +0 -0
  201. careamics/lvae_training/calibration.py +191 -0
  202. careamics/lvae_training/dataset/__init__.py +20 -0
  203. careamics/lvae_training/dataset/config.py +135 -0
  204. careamics/lvae_training/dataset/lc_dataset.py +274 -0
  205. careamics/lvae_training/dataset/ms_dataset_ref.py +1067 -0
  206. careamics/lvae_training/dataset/multich_dataset.py +1121 -0
  207. careamics/lvae_training/dataset/multicrop_dset.py +196 -0
  208. careamics/lvae_training/dataset/multifile_dataset.py +335 -0
  209. careamics/lvae_training/dataset/types.py +32 -0
  210. careamics/lvae_training/dataset/utils/__init__.py +0 -0
  211. careamics/lvae_training/dataset/utils/data_utils.py +114 -0
  212. careamics/lvae_training/dataset/utils/empty_patch_fetcher.py +65 -0
  213. careamics/lvae_training/dataset/utils/index_manager.py +491 -0
  214. careamics/lvae_training/dataset/utils/index_switcher.py +165 -0
  215. careamics/lvae_training/eval_utils.py +987 -0
  216. careamics/lvae_training/get_config.py +84 -0
  217. careamics/lvae_training/lightning_module.py +701 -0
  218. careamics/lvae_training/metrics.py +214 -0
  219. careamics/lvae_training/train_lvae.py +342 -0
  220. careamics/lvae_training/train_utils.py +121 -0
  221. careamics/model_io/__init__.py +7 -0
  222. careamics/model_io/bioimage/__init__.py +11 -0
  223. careamics/model_io/bioimage/_readme_factory.py +113 -0
  224. careamics/model_io/bioimage/bioimage_utils.py +56 -0
  225. careamics/model_io/bioimage/cover_factory.py +171 -0
  226. careamics/model_io/bioimage/model_description.py +341 -0
  227. careamics/model_io/bmz_io.py +251 -0
  228. careamics/model_io/model_io_utils.py +95 -0
  229. careamics/models/__init__.py +5 -0
  230. careamics/models/activation.py +40 -0
  231. careamics/models/layers.py +495 -0
  232. careamics/models/lvae/__init__.py +3 -0
  233. careamics/models/lvae/layers.py +1371 -0
  234. careamics/models/lvae/likelihoods.py +394 -0
  235. careamics/models/lvae/lvae.py +848 -0
  236. careamics/models/lvae/noise_models.py +738 -0
  237. careamics/models/lvae/stochastic.py +394 -0
  238. careamics/models/lvae/utils.py +404 -0
  239. careamics/models/model_factory.py +54 -0
  240. careamics/models/unet.py +449 -0
  241. careamics/nm_training_placeholder.py +203 -0
  242. careamics/prediction_utils/__init__.py +21 -0
  243. careamics/prediction_utils/lvae_prediction.py +158 -0
  244. careamics/prediction_utils/lvae_tiling_manager.py +362 -0
  245. careamics/prediction_utils/prediction_outputs.py +238 -0
  246. careamics/prediction_utils/stitch_prediction.py +193 -0
  247. careamics/py.typed +5 -0
  248. careamics/transforms/__init__.py +22 -0
  249. careamics/transforms/compose.py +173 -0
  250. careamics/transforms/n2v_manipulate.py +150 -0
  251. careamics/transforms/n2v_manipulate_torch.py +149 -0
  252. careamics/transforms/normalize.py +374 -0
  253. careamics/transforms/pixel_manipulation.py +406 -0
  254. careamics/transforms/pixel_manipulation_torch.py +388 -0
  255. careamics/transforms/struct_mask_parameters.py +20 -0
  256. careamics/transforms/transform.py +24 -0
  257. careamics/transforms/tta.py +88 -0
  258. careamics/transforms/xy_flip.py +131 -0
  259. careamics/transforms/xy_random_rotate90.py +108 -0
  260. careamics/utils/__init__.py +19 -0
  261. careamics/utils/autocorrelation.py +40 -0
  262. careamics/utils/base_enum.py +60 -0
  263. careamics/utils/context.py +67 -0
  264. careamics/utils/deprecation.py +63 -0
  265. careamics/utils/lightning_utils.py +71 -0
  266. careamics/utils/logging.py +323 -0
  267. careamics/utils/metrics.py +394 -0
  268. careamics/utils/path_utils.py +26 -0
  269. careamics/utils/plotting.py +76 -0
  270. careamics/utils/ram.py +15 -0
  271. careamics/utils/receptive_field.py +108 -0
  272. careamics/utils/serializers.py +62 -0
  273. careamics/utils/torch_utils.py +150 -0
  274. careamics/utils/version.py +38 -0
  275. careamics-0.0.19.dist-info/METADATA +80 -0
  276. careamics-0.0.19.dist-info/RECORD +279 -0
  277. careamics-0.0.19.dist-info/WHEEL +4 -0
  278. careamics-0.0.19.dist-info/entry_points.txt +2 -0
  279. careamics-0.0.19.dist-info/licenses/LICENSE +28 -0
@@ -0,0 +1,394 @@
1
+ """Script containing the common basic blocks (nn.Module)
2
+ reused by the LadderVAE architecture.
3
+ """
4
+
5
+ from typing import Dict, Tuple, Union
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torchvision.transforms.functional as F
10
+ from torch.distributions import kl_divergence
11
+ from torch.distributions.normal import Normal
12
+
13
+ from .utils import (
14
+ StableLogVar,
15
+ StableMean,
16
+ kl_normal_mc,
17
+ )
18
+
19
+ ConvType = Union[nn.Conv2d, nn.Conv3d]
20
+ NormType = Union[nn.BatchNorm2d, nn.BatchNorm3d]
21
+ DropoutType = Union[nn.Dropout2d, nn.Dropout3d]
22
+
23
+
24
+ class NormalStochasticBlock(nn.Module):
25
+ """
26
+ Stochastic block used in the Top-Down inference pass.
27
+
28
+ Algorithm:
29
+ - map input parameters to q(z) and (optionally) p(z) via convolution
30
+ - sample a latent tensor z ~ q(z)
31
+ - feed z to convolution and return.
32
+
33
+ NOTE 1:
34
+ If parameters for q are not given, sampling is done from p(z).
35
+
36
+ NOTE 2:
37
+ The restricted KL divergence is obtained by first computing the element-wise KL divergence
38
+ (i.e., the KL computed for each element of the latent tensors). Then, the restricted version
39
+ is computed by summing over the channels and the spatial dimensions associated only to the
40
+ portion of the latent tensor that is used for prediction.
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ c_in: int,
46
+ c_vars: int,
47
+ c_out: int,
48
+ conv_dims: int = 2,
49
+ kernel: int = 3,
50
+ transform_p_params: bool = True,
51
+ vanilla_latent_hw: int = None,
52
+ use_naive_exponential: bool = False,
53
+ ):
54
+ """
55
+ Parameters
56
+ ----------
57
+ c_in: int
58
+ The number of channels of the input tensor.
59
+ c_vars: int
60
+ The number of channels of the latent space tensor.
61
+ c_out: int
62
+ The output of the stochastic layer.
63
+ Note that this is different from the sampled latent z.
64
+ conv_dims: int, optional
65
+ The number of dimensions of the convolutional layers (2D or 3D).
66
+ Default is 2.
67
+ kernel: int, optional
68
+ The size of the kernel used in convolutional layers.
69
+ Default is 3.
70
+ transform_p_params: bool, optional
71
+ Whether a transformation should be applied to the `p_params` tensor.
72
+ The transformation consists in a 2D convolution ()`conv_in_p()`) that
73
+ maps the input to a larger number of channels.
74
+ Default is `True`.
75
+ vanilla_latent_hw: int, optional
76
+ The shape of the latent tensor used for prediction (i.e., it influences the computation of restricted KL).
77
+ Default is `None`.
78
+ use_naive_exponential: bool, optional
79
+ If `False`, exponentials are computed according to the alternative definition
80
+ provided by `StableExponential` class. This should improve numerical stability
81
+ in the training process. Default is `False`.
82
+ """
83
+ super().__init__()
84
+ assert kernel % 2 == 1
85
+ pad = kernel // 2
86
+ self.transform_p_params = transform_p_params
87
+ self.c_in = c_in
88
+ self.c_out = c_out
89
+ self.c_vars = c_vars
90
+ self.conv_dims = conv_dims
91
+ self._use_naive_exponential = use_naive_exponential
92
+ self._vanilla_latent_hw = vanilla_latent_hw
93
+
94
+ conv_layer: ConvType = getattr(nn, f"Conv{conv_dims}d")
95
+
96
+ if transform_p_params:
97
+ self.conv_in_p = conv_layer(c_in, 2 * c_vars, kernel, padding=pad)
98
+ self.conv_in_q = conv_layer(c_in, 2 * c_vars, kernel, padding=pad)
99
+ self.conv_out = conv_layer(c_vars, c_out, kernel, padding=pad)
100
+
101
+ def get_z(
102
+ self,
103
+ sampling_distrib: torch.distributions.normal.Normal,
104
+ forced_latent: Union[torch.Tensor, None],
105
+ mode_pred: bool,
106
+ use_uncond_mode: bool,
107
+ ) -> torch.Tensor:
108
+ """Sample a latent tensor from the given latent distribution.
109
+
110
+ Latent tensor can be obtained is several ways:
111
+ - Sampled from the (Gaussian) latent distribution.
112
+ - Taken as a pre-defined forced latent.
113
+ - Taken as the mode (mean) of the latent distribution.
114
+ - In prediction mode (`mode_pred==True`), can be either sample or taken as the distribution mode.
115
+
116
+ Parameters
117
+ ----------
118
+ sampling_distrib: torch.distributions.normal.Normal
119
+ The Gaussian distribution from which latent tensor is sampled.
120
+ forced_latent: torch.Tensor
121
+ A pre-defined latent tensor. If it is not `None`, than it is used as the actual latent tensor and,
122
+ hence, sampling does not happen.
123
+ mode_pred: bool
124
+ Whether the model is prediction mode.
125
+ use_uncond_mode: bool
126
+ Whether to use the uncoditional distribution p(z) to sample latents in prediction mode.
127
+ """
128
+ if forced_latent is None:
129
+ if mode_pred:
130
+ if use_uncond_mode:
131
+ z = sampling_distrib.mean
132
+ else:
133
+ z = sampling_distrib.rsample()
134
+ else:
135
+ z = sampling_distrib.rsample()
136
+ else:
137
+ z = forced_latent
138
+ return z
139
+
140
+ def sample_from_q(
141
+ self, q_params: torch.Tensor, var_clip_max: float
142
+ ) -> torch.Tensor:
143
+ """
144
+ Given an input parameter tensor defining q(z),
145
+ it processes it by calling `process_q_params()` method and
146
+ sample a latent tensor from the resulting distribution.
147
+
148
+ Parameters
149
+ ----------
150
+ q_params: torch.Tensor
151
+ The input tensor to be processed.
152
+ var_clip_max: float
153
+ The maximum value reachable by the log-variance of the latent distribution.
154
+ Values exceeding this threshold are clipped.
155
+ """
156
+ _, _, q = self.process_q_params(q_params, var_clip_max)
157
+ return q.rsample()
158
+
159
+ def compute_kl_metrics(
160
+ self,
161
+ p: torch.distributions.normal.Normal,
162
+ p_params: torch.Tensor,
163
+ q: torch.distributions.normal.Normal,
164
+ q_params: torch.Tensor,
165
+ mode_pred: bool,
166
+ analytical_kl: bool,
167
+ z: torch.Tensor,
168
+ ) -> Dict[str, torch.Tensor]:
169
+ """
170
+ Compute KL (analytical or MC estimate) and then process it, extracting composed versions of the metric.
171
+ Specifically, the different versions of the KL loss terms are:
172
+ - `kl_elementwise`: KL term for each single element of the latent tensor [Shape: (batch, ch, h, w)].
173
+ - `kl_samplewise`: KL term associated to each sample in the batch [Shape: (batch, )].
174
+ - `kl_samplewise_restricted`: KL term only associated to the portion of the latent tensor that is
175
+ used for prediction and summed over channel and spatial dimensions [Shape: (batch, )].
176
+ - `kl_channelwise`: KL term associated to each sample and each channel [Shape: (batch, ch, )].
177
+ - `kl_spatial`: KL term summed over the channels, i.e., retaining the spatial dimensions [Shape: (batch, h, w)]
178
+
179
+ Parameters
180
+ ----------
181
+ p: torch.distributions.normal.Normal
182
+ The prior generative distribution p(z_i|z_{i+1}) (or p(z_L)).
183
+ p_params: torch.Tensor
184
+ The parameters of the prior generative distribution.
185
+ q: torch.distributions.normal.Normal
186
+ The inference distribution q(z_i|z_{i+1}) (or q(z_L|x)).
187
+ q_params: torch.Tensor
188
+ The parameters of the inference distribution.
189
+ mode_pred: bool
190
+ Whether the model is in prediction mode.
191
+ analytical_kl: bool
192
+ Whether to compute the KL divergence analytically or using Monte Carlo estimation.
193
+ z: torch.Tensor
194
+ The sampled latent tensor.
195
+ """
196
+ kl_samplewise_restricted = None
197
+ if mode_pred is False: # if not predicting
198
+ if analytical_kl:
199
+ kl_elementwise = kl_divergence(q, p)
200
+ else:
201
+ kl_elementwise = kl_normal_mc(z, p_params, q_params)
202
+
203
+ all_dims = tuple(range(len(kl_elementwise.shape)))
204
+ kl_samplewise = kl_elementwise.sum(all_dims[1:])
205
+ kl_channelwise = kl_elementwise.sum(all_dims[2:])
206
+
207
+ # compute KL only on the portion of the latent space that is used for prediction.
208
+ pad = (kl_elementwise.shape[-1] - self._vanilla_latent_hw) // 2
209
+ if pad > 0:
210
+ tmp = kl_elementwise[..., pad:-pad, pad:-pad]
211
+ kl_samplewise_restricted = tmp.sum(all_dims[1:])
212
+ else:
213
+ kl_samplewise_restricted = kl_samplewise
214
+
215
+ # Compute spatial KL analytically (but conditioned on samples from
216
+ # previous layers)
217
+ kl_spatial = kl_elementwise.sum(1)
218
+ else: # if predicting, no need to compute KL
219
+ kl_elementwise = kl_samplewise = kl_spatial = kl_channelwise = None
220
+
221
+ kl_dict = {
222
+ "kl_elementwise": kl_elementwise, # (batch, ch, h, w)
223
+ "kl_samplewise": kl_samplewise, # (batch, )
224
+ "kl_samplewise_restricted": kl_samplewise_restricted, # (batch, )
225
+ "kl_spatial": kl_spatial, # (batch, h, w)
226
+ "kl_channelwise": kl_channelwise, # (batch, ch)
227
+ } # TODO revisit, check dims
228
+ return kl_dict
229
+
230
+ def process_p_params(
231
+ self, p_params: torch.Tensor, var_clip_max: float
232
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.distributions.normal.Normal]:
233
+ """Process the input parameters to get the prior distribution p(z_i|z_{i+1}) (or p(z_L)).
234
+
235
+ Processing consists in:
236
+ - (optionally) 2D convolution on the input tensor to increase number of channels.
237
+ - split the resulting tensor into two chunks, the mean and the log-variance.
238
+ - (optionally) clip the log-variance to an upper threshold.
239
+ - define the normal distribution p(z) given the parameter tensors above.
240
+
241
+ Parameters
242
+ ----------
243
+ p_params: torch.Tensor
244
+ The input tensor to be processed.
245
+ var_clip_max: float
246
+ The maximum value reachable by the log-variance of the latent distribution.
247
+ Values exceeding this threshold are clipped.
248
+ """
249
+ if self.transform_p_params:
250
+ p_params = self.conv_in_p(p_params)
251
+ else:
252
+ assert p_params.size(1) == 2 * self.c_vars
253
+
254
+ # Define p(z)
255
+ p_mu, p_lv = p_params.chunk(2, dim=1)
256
+ if var_clip_max is not None:
257
+ p_lv = torch.clip(p_lv, max=var_clip_max)
258
+
259
+ p_mu = StableMean(p_mu)
260
+ p_lv = StableLogVar(p_lv, enable_stable=not self._use_naive_exponential)
261
+ p = Normal(p_mu.get(), p_lv.get_std())
262
+ return p_mu, p_lv, p
263
+
264
+ def process_q_params(
265
+ self, q_params: torch.Tensor, var_clip_max: float, allow_oddsizes: bool = False
266
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.distributions.normal.Normal]:
267
+ """
268
+ Process the input parameters to get the inference distribution q(z_i|z_{i+1}) (or q(z|x)).
269
+
270
+ Processing consists in:
271
+ - convolution on the input tensor to double the number of channels.
272
+ - split the resulting tensor into 2 chunks, respectively mean and log-var.
273
+ - (optionally) clip the log-variance to an upper threshold.
274
+ - (optionally) crop the resulting tensors to ensure that the last spatial dimension is even.
275
+ - define the normal distribution q(z) given the parameter tensors above.
276
+
277
+ Parameters
278
+ ----------
279
+ p_params: torch.Tensor
280
+ The input tensor to be processed.
281
+ var_clip_max: float
282
+ The maximum value reachable by the log-variance of the latent distribution.
283
+ Values exceeding this threshold are clipped.
284
+ """
285
+ q_params = self.conv_in_q(q_params)
286
+
287
+ q_mu, q_lv = q_params.chunk(2, dim=1)
288
+ if var_clip_max is not None:
289
+ q_lv = torch.clip(q_lv, max=var_clip_max)
290
+
291
+ if q_mu.shape[-1] % 2 == 1 and allow_oddsizes is False:
292
+ q_mu = F.center_crop(q_mu, q_mu.shape[-1] - 1)
293
+ q_lv = F.center_crop(q_lv, q_lv.shape[-1] - 1)
294
+ # TODO revisit ?!
295
+ q_mu = StableMean(q_mu)
296
+ q_lv = StableLogVar(q_lv, enable_stable=not self._use_naive_exponential)
297
+ q = Normal(q_mu.get(), q_lv.get_std())
298
+ return q_mu, q_lv, q
299
+
300
+ def forward(
301
+ self,
302
+ p_params: torch.Tensor,
303
+ q_params: Union[torch.Tensor, None] = None,
304
+ forced_latent: Union[torch.Tensor, None] = None,
305
+ force_constant_output: bool = False,
306
+ analytical_kl: bool = False,
307
+ mode_pred: bool = False,
308
+ use_uncond_mode: bool = False,
309
+ var_clip_max: Union[float, None] = None,
310
+ ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
311
+ """
312
+ Parameters
313
+ ----------
314
+ p_params: torch.Tensor
315
+ The output tensor of the top-down layer above (i.e., mu_{p,i+1}, sigma_{p,i+1}).
316
+ q_params: torch.Tensor, optional
317
+ The tensor resulting from merging the bu_value tensor at the same hierarchical level
318
+ from the bottom-up pass and the `p_params` tensor. Default is `None`.
319
+ forced_latent: torch.Tensor, optional
320
+ A pre-defined latent tensor. If it is not `None`, than it is used as the actual latent
321
+ tensor and, hence, sampling does not happen. Default is `None`.
322
+ force_constant_output: bool, optional
323
+ Whether to copy the first sample (and rel. distrib parameters) over the whole batch.
324
+ This is used when doing experiment from the prior - q is not used.
325
+ Default is `False`.
326
+ analytical_kl: bool, optional
327
+ Whether to compute the KL divergence analytically or using Monte Carlo estimation.
328
+ Default is `False`.
329
+ mode_pred: bool, optional
330
+ Whether the model is in prediction mode. Default is `False`.
331
+ use_uncond_mode: bool, optional
332
+ Whether to use the uncoditional distribution p(z) to sample latents in prediction mode.
333
+ Default is `False`.
334
+ var_clip_max: float, optional
335
+ The maximum value reachable by the log-variance of the latent distribution.
336
+ Values exceeding this threshold are clipped. Default is `None`.
337
+ """
338
+ debug_qvar_max = 0
339
+
340
+ # Check sampling options consistency
341
+ assert forced_latent is None
342
+
343
+ # Get generative distribution p(z_i|z_{i+1})
344
+ p_mu, p_lv, p = self.process_p_params(p_params, var_clip_max)
345
+ p_params = (p_mu, p_lv)
346
+
347
+ if q_params is not None:
348
+ # Get inference distribution q(z_i|z_{i+1})
349
+ q_mu, q_lv, q = self.process_q_params(q_params, var_clip_max)
350
+ q_params = (q_mu, q_lv)
351
+ debug_qvar_max = torch.max(q_lv.get())
352
+ sampling_distrib = q
353
+ q_size = q_mu.get().shape[-1]
354
+ if p_mu.get().shape[-1] != q_size and mode_pred is False:
355
+ p_mu.centercrop_to_size(q_size)
356
+ p_lv.centercrop_to_size(q_size)
357
+ else:
358
+ sampling_distrib = p
359
+
360
+ # Sample latent variable
361
+ z = self.get_z(sampling_distrib, forced_latent, mode_pred, use_uncond_mode)
362
+
363
+ # TODO: not necessary, remove
364
+ # Copy one sample (and distrib parameters) over the whole batch.
365
+ # This is used when doing experiment from the prior - q is not used.
366
+ if force_constant_output:
367
+ z = z[0:1].expand_as(z).clone()
368
+ p_params = (
369
+ p_params[0][0:1].expand_as(p_params[0]).clone(),
370
+ p_params[1][0:1].expand_as(p_params[1]).clone(),
371
+ )
372
+
373
+ # Pass the sampled latent through the output convolution of stochastic block
374
+ out = self.conv_out(z)
375
+
376
+ if q_params is not None:
377
+ # Compute log q(z)
378
+ logprob_q = q.log_prob(z).sum(tuple(range(1, z.dim())))
379
+ # Compute KL divergence metrics
380
+ kl_dict = self.compute_kl_metrics(
381
+ p, p_params, q, q_params, mode_pred, analytical_kl, z
382
+ )
383
+ else:
384
+ kl_dict = {}
385
+ logprob_q = None
386
+
387
+ # Store meaningful quantities for later computation
388
+ data = kl_dict
389
+ data["z"] = z # sampled variable at this layer (B, C, [Z], Y, X)
390
+ data["p_params"] = p_params # (B, C, [Z], Y, X) where B is 1 or batch size
391
+ data["q_params"] = q_params # (B, C, [Z], Y, X)
392
+ data["logprob_q"] = logprob_q # (B, )
393
+ data["qvar_max"] = debug_qvar_max
394
+ return out, data