monai-weekly 1.4.dev2428__py3-none-any.whl → 1.4.dev2430__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. monai/__init__.py +1 -1
  2. monai/_version.py +3 -3
  3. monai/apps/auto3dseg/hpo_gen.py +1 -1
  4. monai/apps/detection/utils/anchor_utils.py +2 -2
  5. monai/apps/pathology/transforms/post/array.py +7 -4
  6. monai/auto3dseg/analyzer.py +1 -1
  7. monai/bundle/scripts.py +204 -22
  8. monai/bundle/utils.py +1 -0
  9. monai/data/dataset_summary.py +1 -0
  10. monai/data/meta_tensor.py +2 -2
  11. monai/data/test_time_augmentation.py +2 -0
  12. monai/data/utils.py +9 -6
  13. monai/data/wsi_reader.py +2 -2
  14. monai/engines/__init__.py +3 -1
  15. monai/engines/trainer.py +281 -2
  16. monai/engines/utils.py +76 -1
  17. monai/handlers/mlflow_handler.py +21 -4
  18. monai/inferers/__init__.py +5 -0
  19. monai/inferers/inferer.py +1279 -1
  20. monai/metrics/cumulative_average.py +2 -0
  21. monai/metrics/panoptic_quality.py +1 -1
  22. monai/metrics/rocauc.py +2 -2
  23. monai/networks/blocks/__init__.py +3 -0
  24. monai/networks/blocks/attention_utils.py +128 -0
  25. monai/networks/blocks/crossattention.py +168 -0
  26. monai/networks/blocks/rel_pos_embedding.py +56 -0
  27. monai/networks/blocks/selfattention.py +74 -5
  28. monai/networks/blocks/spade_norm.py +95 -0
  29. monai/networks/blocks/spatialattention.py +82 -0
  30. monai/networks/blocks/transformerblock.py +25 -4
  31. monai/networks/blocks/upsample.py +22 -10
  32. monai/networks/layers/__init__.py +2 -1
  33. monai/networks/layers/factories.py +12 -1
  34. monai/networks/layers/simplelayers.py +1 -1
  35. monai/networks/layers/utils.py +14 -1
  36. monai/networks/layers/vector_quantizer.py +233 -0
  37. monai/networks/nets/__init__.py +9 -0
  38. monai/networks/nets/autoencoderkl.py +702 -0
  39. monai/networks/nets/controlnet.py +465 -0
  40. monai/networks/nets/diffusion_model_unet.py +1913 -0
  41. monai/networks/nets/patchgan_discriminator.py +230 -0
  42. monai/networks/nets/quicknat.py +8 -6
  43. monai/networks/nets/resnet.py +3 -4
  44. monai/networks/nets/spade_autoencoderkl.py +480 -0
  45. monai/networks/nets/spade_diffusion_model_unet.py +934 -0
  46. monai/networks/nets/spade_network.py +435 -0
  47. monai/networks/nets/swin_unetr.py +4 -3
  48. monai/networks/nets/transformer.py +157 -0
  49. monai/networks/nets/vqvae.py +472 -0
  50. monai/networks/schedulers/__init__.py +17 -0
  51. monai/networks/schedulers/ddim.py +294 -0
  52. monai/networks/schedulers/ddpm.py +250 -0
  53. monai/networks/schedulers/pndm.py +316 -0
  54. monai/networks/schedulers/scheduler.py +205 -0
  55. monai/networks/utils.py +22 -0
  56. monai/transforms/croppad/array.py +8 -8
  57. monai/transforms/croppad/dictionary.py +4 -4
  58. monai/transforms/croppad/functional.py +1 -1
  59. monai/transforms/regularization/array.py +4 -0
  60. monai/transforms/spatial/array.py +1 -1
  61. monai/transforms/utils_create_transform_ims.py +2 -4
  62. monai/utils/__init__.py +1 -0
  63. monai/utils/misc.py +5 -4
  64. monai/utils/ordering.py +207 -0
  65. monai/visualize/class_activation_maps.py +5 -5
  66. monai/visualize/img2tensorboard.py +3 -1
  67. {monai_weekly-1.4.dev2428.dist-info → monai_weekly-1.4.dev2430.dist-info}/METADATA +1 -1
  68. {monai_weekly-1.4.dev2428.dist-info → monai_weekly-1.4.dev2430.dist-info}/RECORD +71 -50
  69. {monai_weekly-1.4.dev2428.dist-info → monai_weekly-1.4.dev2430.dist-info}/WHEEL +1 -1
  70. {monai_weekly-1.4.dev2428.dist-info → monai_weekly-1.4.dev2430.dist-info}/LICENSE +0 -0
  71. {monai_weekly-1.4.dev2428.dist-info → monai_weekly-1.4.dev2430.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,230 @@
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Sequence
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from monai.networks.blocks import Convolution
20
+ from monai.networks.layers import Act
21
+ from monai.networks.utils import normal_init
22
+
23
+
24
+ class MultiScalePatchDiscriminator(nn.Sequential):
25
+ """
26
+ Multi-scale Patch-GAN discriminator based on Pix2PixHD:
27
+ High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs (https://arxiv.org/abs/1711.11585)
28
+
29
+ The Multi-scale discriminator made up of several PatchGAN discriminators, that process the images
30
+ at different spatial scales.
31
+
32
+ Args:
33
+ num_d: number of discriminators
34
+ num_layers_d: number of Convolution layers (Conv + activation + normalisation + [dropout]) in the first
35
+ discriminator. Each subsequent discriminator has one additional layer, meaning the output size is halved.
36
+ spatial_dims: number of spatial dimensions (1D, 2D etc.)
37
+ channels: number of filters in the first convolutional layer (doubled for each subsequent layer)
38
+ in_channels: number of input channels
39
+ out_channels: number of output channels in each discriminator
40
+ kernel_size: kernel size of the convolution layers
41
+ activation: activation layer type
42
+ norm: normalisation type
43
+ bias: introduction of layer bias
44
+ dropout: probability of dropout applied, defaults to 0.
45
+ minimum_size_im: minimum spatial size of the input image. Introduced to make sure the architecture
46
+ requested isn't going to downsample the input image beyond value of 1.
47
+ last_conv_kernel_size: kernel size of the last convolutional layer.
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ num_d: int,
53
+ num_layers_d: int,
54
+ spatial_dims: int,
55
+ channels: int,
56
+ in_channels: int,
57
+ out_channels: int = 1,
58
+ kernel_size: int = 4,
59
+ activation: str | tuple = (Act.LEAKYRELU, {"negative_slope": 0.2}),
60
+ norm: str | tuple = "BATCH",
61
+ bias: bool = False,
62
+ dropout: float | tuple = 0.0,
63
+ minimum_size_im: int = 256,
64
+ last_conv_kernel_size: int = 1,
65
+ ) -> None:
66
+ super().__init__()
67
+ self.num_d = num_d
68
+ self.num_layers_d = num_layers_d
69
+ self.num_channels = channels
70
+ self.padding = tuple([int((kernel_size - 1) / 2)] * spatial_dims)
71
+ for i_ in range(self.num_d):
72
+ num_layers_d_i = self.num_layers_d * (i_ + 1)
73
+ output_size = float(minimum_size_im) / (2**num_layers_d_i)
74
+ if output_size < 1:
75
+ raise AssertionError(
76
+ f"Your image size is too small to take in up to {i_} discriminators with num_layers = {num_layers_d_i}."
77
+ "Please reduce num_layers, reduce num_D or enter bigger images."
78
+ )
79
+ subnet_d = PatchDiscriminator(
80
+ spatial_dims=spatial_dims,
81
+ channels=self.num_channels,
82
+ in_channels=in_channels,
83
+ out_channels=out_channels,
84
+ num_layers_d=num_layers_d_i,
85
+ kernel_size=kernel_size,
86
+ activation=activation,
87
+ norm=norm,
88
+ bias=bias,
89
+ padding=self.padding,
90
+ dropout=dropout,
91
+ last_conv_kernel_size=last_conv_kernel_size,
92
+ )
93
+
94
+ self.add_module("discriminator_%d" % i_, subnet_d)
95
+
96
+ def forward(self, i: torch.Tensor) -> tuple[list[torch.Tensor], list[list[torch.Tensor]]]:
97
+ """
98
+ Args:
99
+ i: Input tensor
100
+
101
+ Returns:
102
+ list of outputs and another list of lists with the intermediate features
103
+ of each discriminator.
104
+ """
105
+
106
+ out: list[torch.Tensor] = []
107
+ intermediate_features: list[list[torch.Tensor]] = []
108
+ for disc in self.children():
109
+ out_d: list[torch.Tensor] = disc(i)
110
+ out.append(out_d[-1])
111
+ intermediate_features.append(out_d[:-1])
112
+
113
+ return out, intermediate_features
114
+
115
+
116
+ class PatchDiscriminator(nn.Sequential):
117
+ """
118
+ Patch-GAN discriminator based on Pix2PixHD:
119
+ High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs (https://arxiv.org/abs/1711.11585)
120
+
121
+
122
+ Args:
123
+ spatial_dims: number of spatial dimensions (1D, 2D etc.)
124
+ channels: number of filters in the first convolutional layer (doubled for each subsequent layer)
125
+ in_channels: number of input channels
126
+ out_channels: number of output channels
127
+ num_layers_d: number of Convolution layers (Conv + activation + normalisation + [dropout]) in the discriminator.
128
+ kernel_size: kernel size of the convolution layers
129
+ act: activation type and arguments. Defaults to LeakyReLU.
130
+ norm: feature normalization type and arguments. Defaults to batch norm.
131
+ bias: whether to have a bias term in convolution blocks. Defaults to False.
132
+ padding: padding to be applied to the convolutional layers
133
+ dropout: proportion of dropout applied, defaults to 0.
134
+ last_conv_kernel_size: kernel size of the last convolutional layer.
135
+ """
136
+
137
+ def __init__(
138
+ self,
139
+ spatial_dims: int,
140
+ channels: int,
141
+ in_channels: int,
142
+ out_channels: int = 1,
143
+ num_layers_d: int = 3,
144
+ kernel_size: int = 4,
145
+ activation: str | tuple = (Act.LEAKYRELU, {"negative_slope": 0.2}),
146
+ norm: str | tuple = "BATCH",
147
+ bias: bool = False,
148
+ padding: int | Sequence[int] = 1,
149
+ dropout: float | tuple = 0.0,
150
+ last_conv_kernel_size: int | None = None,
151
+ ) -> None:
152
+ super().__init__()
153
+ self.num_layers_d = num_layers_d
154
+ self.num_channels = channels
155
+ if last_conv_kernel_size is None:
156
+ last_conv_kernel_size = kernel_size
157
+
158
+ self.add_module(
159
+ "initial_conv",
160
+ Convolution(
161
+ spatial_dims=spatial_dims,
162
+ kernel_size=kernel_size,
163
+ in_channels=in_channels,
164
+ out_channels=channels,
165
+ act=activation,
166
+ bias=True,
167
+ norm=None,
168
+ dropout=dropout,
169
+ padding=padding,
170
+ strides=2,
171
+ ),
172
+ )
173
+
174
+ input_channels = channels
175
+ output_channels = channels * 2
176
+
177
+ # Initial Layer
178
+ for l_ in range(self.num_layers_d):
179
+ if l_ == self.num_layers_d - 1:
180
+ stride = 1
181
+ else:
182
+ stride = 2
183
+ layer = Convolution(
184
+ spatial_dims=spatial_dims,
185
+ kernel_size=kernel_size,
186
+ in_channels=input_channels,
187
+ out_channels=output_channels,
188
+ act=activation,
189
+ bias=bias,
190
+ norm=norm,
191
+ dropout=dropout,
192
+ padding=padding,
193
+ strides=stride,
194
+ )
195
+ self.add_module("%d" % l_, layer)
196
+ input_channels = output_channels
197
+ output_channels = output_channels * 2
198
+
199
+ # Final layer
200
+ self.add_module(
201
+ "final_conv",
202
+ Convolution(
203
+ spatial_dims=spatial_dims,
204
+ kernel_size=last_conv_kernel_size,
205
+ in_channels=input_channels,
206
+ out_channels=out_channels,
207
+ bias=True,
208
+ conv_only=True,
209
+ padding=int((last_conv_kernel_size - 1) / 2),
210
+ dropout=0.0,
211
+ strides=1,
212
+ ),
213
+ )
214
+
215
+ self.apply(normal_init)
216
+
217
+ def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
218
+ """
219
+ Args:
220
+ x: input tensor
221
+
222
+ Returns:
223
+ list of intermediate features, with the last element being the output.
224
+ """
225
+ out = [x]
226
+ for submodel in self.children():
227
+ intermediate_output = submodel(out[-1])
228
+ out.append(intermediate_output)
229
+
230
+ return out[1:]
@@ -42,7 +42,7 @@ class SkipConnectionWithIdx(SkipConnection):
42
42
  Inherits from SkipConnection but provides the indizes with each forward pass.
43
43
  """
44
44
 
45
- def forward(self, input, indices):
45
+ def forward(self, input, indices): # type: ignore[override]
46
46
  return super().forward(input), indices
47
47
 
48
48
 
@@ -57,7 +57,7 @@ class SequentialWithIdx(nn.Sequential):
57
57
  def __init__(self, *args):
58
58
  super().__init__(*args)
59
59
 
60
- def forward(self, input, indices):
60
+ def forward(self, input, indices): # type: ignore[override]
61
61
  for module in self:
62
62
  input, indices = module(input, indices)
63
63
  return input, indices
@@ -165,9 +165,11 @@ class ConvConcatDenseBlock(ConvDenseBlock):
165
165
  )
166
166
  return nn.Sequential(conv.get_submodule("adn"), conv.get_submodule("conv"))
167
167
 
168
- def forward(self, input, _):
168
+ def forward(self, input, _): # type: ignore[override]
169
169
  i = 0
170
170
  result = input
171
+ result1 = input # this will not stay this value, needed here for pylint/mypy
172
+
171
173
  for l in self.children():
172
174
  # ignoring the max (un-)pool and droupout already added in the initial initialization step
173
175
  if isinstance(l, (nn.MaxPool2d, nn.MaxUnpool2d, nn.Dropout2d)):
@@ -213,7 +215,7 @@ class Encoder(ConvConcatDenseBlock):
213
215
  super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters)
214
216
  self.max_pool = max_pool
215
217
 
216
- def forward(self, input, indices=None):
218
+ def forward(self, input, indices=None): # type: ignore[override]
217
219
  input, indices = self.max_pool(input)
218
220
 
219
221
  out_block, _ = super().forward(input, None)
@@ -241,7 +243,7 @@ class Decoder(ConvConcatDenseBlock):
241
243
  super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters)
242
244
  self.un_pool = un_pool
243
245
 
244
- def forward(self, input, indices):
246
+ def forward(self, input, indices): # type: ignore[override]
245
247
  out_block, _ = super().forward(input, None)
246
248
  out_block = self.un_pool(out_block, indices)
247
249
  return out_block, None
@@ -268,7 +270,7 @@ class Bottleneck(ConvConcatDenseBlock):
268
270
  self.max_pool = max_pool
269
271
  self.un_pool = un_pool
270
272
 
271
- def forward(self, input, indices):
273
+ def forward(self, input, indices): # type: ignore[override]
272
274
  out_block, indices = self.max_pool(input)
273
275
  out_block, _ = super().forward(out_block, None)
274
276
  out_block = self.un_pool(out_block, indices)
@@ -510,7 +510,7 @@ def _resnet(
510
510
  # Check model bias_downsample and shortcut_type
511
511
  bias_downsample, shortcut_type = get_medicalnet_pretrained_resnet_args(resnet_depth)
512
512
  if shortcut_type == kwargs.get("shortcut_type", "B") and (
513
- bool(bias_downsample) == kwargs.get("bias_downsample", False) if bias_downsample != -1 else True
513
+ bias_downsample == kwargs.get("bias_downsample", True)
514
514
  ):
515
515
  # Download the MedicalNet pretrained model
516
516
  model_state_dict = get_pretrained_resnet_medicalnet(
@@ -518,8 +518,7 @@ def _resnet(
518
518
  )
519
519
  else:
520
520
  raise NotImplementedError(
521
- f"Please set shortcut_type to {shortcut_type} and bias_downsample to"
522
- f"{bool(bias_downsample) if bias_downsample!=-1 else 'True or False'}"
521
+ f"Please set shortcut_type to {shortcut_type} and bias_downsample to {bias_downsample} "
523
522
  f"when using pretrained MedicalNet resnet{resnet_depth}"
524
523
  )
525
524
  else:
@@ -681,7 +680,7 @@ def get_medicalnet_pretrained_resnet_args(resnet_depth: int):
681
680
  # After testing
682
681
  # False: 10, 50, 101, 152, 200
683
682
  # Any: 18, 34
684
- bias_downsample = -1 if resnet_depth in [18, 34] else 0 # 18, 10, 34
683
+ bias_downsample = resnet_depth in (18, 34)
685
684
  shortcut_type = "A" if resnet_depth in [18, 34] else "B"
686
685
  return bias_downsample, shortcut_type
687
686