monai-weekly 1.4.dev2434__py3-none-any.whl → 1.4.dev2436__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. monai/__init__.py +44 -2
  2. monai/_version.py +3 -3
  3. monai/apps/vista3d/inferer.py +177 -0
  4. monai/apps/vista3d/sampler.py +179 -0
  5. monai/apps/vista3d/transforms.py +224 -0
  6. monai/bundle/config_parser.py +5 -3
  7. monai/bundle/scripts.py +2 -2
  8. monai/bundle/utils.py +35 -1
  9. monai/handlers/__init__.py +1 -0
  10. monai/handlers/trt_handler.py +61 -0
  11. monai/inferers/utils.py +1 -0
  12. monai/metrics/generalized_dice.py +77 -48
  13. monai/networks/__init__.py +2 -0
  14. monai/networks/layers/filtering.py +6 -2
  15. monai/networks/nets/swin_unetr.py +4 -4
  16. monai/networks/nets/vista3d.py +53 -11
  17. monai/networks/trt_compiler.py +569 -0
  18. monai/networks/utils.py +225 -41
  19. monai/transforms/__init__.py +24 -2
  20. monai/transforms/io/array.py +58 -2
  21. monai/transforms/io/dictionary.py +29 -2
  22. monai/transforms/spatial/array.py +44 -0
  23. monai/transforms/spatial/dictionary.py +61 -0
  24. monai/transforms/spatial/functional.py +70 -1
  25. monai/transforms/utility/array.py +153 -4
  26. monai/transforms/utility/dictionary.py +105 -3
  27. monai/transforms/utils.py +83 -10
  28. monai/utils/__init__.py +1 -0
  29. monai/utils/enums.py +1 -0
  30. monai/utils/type_conversion.py +8 -0
  31. {monai_weekly-1.4.dev2434.dist-info → monai_weekly-1.4.dev2436.dist-info}/METADATA +4 -1
  32. {monai_weekly-1.4.dev2434.dist-info → monai_weekly-1.4.dev2436.dist-info}/RECORD +36 -31
  33. {monai_weekly-1.4.dev2434.dist-info → monai_weekly-1.4.dev2436.dist-info}/WHEEL +1 -1
  34. /monai/apps/{generation/maisi/utils → vista3d}/__init__.py +0 -0
  35. {monai_weekly-1.4.dev2434.dist-info → monai_weekly-1.4.dev2436.dist-info}/LICENSE +0 -0
  36. {monai_weekly-1.4.dev2434.dist-info → monai_weekly-1.4.dev2436.dist-info}/top_level.txt +0 -0
monai/__init__.py CHANGED
@@ -13,9 +13,51 @@ from __future__ import annotations
13
13
 
14
14
  import os
15
15
  import sys
16
-
16
+ import logging
17
+ import warnings
17
18
  from ._version import get_versions
18
19
 
20
+
21
+ old_showwarning = warnings.showwarning
22
+
23
+
24
+ def custom_warning_handler(message, category, filename, lineno, file=None, line=None):
25
+ ignore_files = ["ignite/handlers/checkpoint", "modelopt/torch/quantization/tensor_quant"]
26
+ if any(ignore in filename for ignore in ignore_files):
27
+ return
28
+ old_showwarning(message, category, filename, lineno, file, line)
29
+
30
+
31
+ class DeprecatedTypesWarningFilter(logging.Filter):
32
+ def filter(self, record):
33
+ message_bodies_to_ignore = [
34
+ "np.bool8",
35
+ "np.object0",
36
+ "np.int0",
37
+ "np.uint0",
38
+ "np.void0",
39
+ "np.str0",
40
+ "np.bytes0",
41
+ "@validator",
42
+ "@root_validator",
43
+ "class-based `config`",
44
+ "pkg_resources",
45
+ "Implicitly cleaning up",
46
+ ]
47
+ for message in message_bodies_to_ignore:
48
+ if message in record.getMessage():
49
+ return False
50
+ return True
51
+
52
+
53
+ # workaround for https://github.com/Project-MONAI/MONAI/issues/8060
54
+ # TODO: remove this workaround after upstream fixed the warning
55
+ # Set the custom warning handler to filter warning
56
+ warnings.showwarning = custom_warning_handler
57
+ # Get the logger for warnings and add the filter to the logger
58
+ logging.getLogger("py.warnings").addFilter(DeprecatedTypesWarningFilter())
59
+
60
+
19
61
  PY_REQUIRED_MAJOR = 3
20
62
  PY_REQUIRED_MINOR = 9
21
63
 
@@ -93,4 +135,4 @@ except BaseException:
93
135
 
94
136
  if MONAIEnvVars.debug():
95
137
  raise
96
- __commit_id__ = "a5fbe716378948630783deef8ee435e7e3bdc918"
138
+ __commit_id__ = "d02ba11d8069870d71316a616f047c499627c71c"
monai/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2024-08-25T02:21:56+0000",
11
+ "date": "2024-09-08T02:25:56+0000",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "dc611d231ba670004b1da1b011fe140375fb91af",
15
- "version": "1.4.dev2434"
14
+ "full-revisionid": "0d9ab7da5ba0cbc2df3de3f7397c58ac1fe80598",
15
+ "version": "1.4.dev2436"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -0,0 +1,177 @@
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import copy
15
+ from collections.abc import Sequence
16
+ from typing import Any
17
+
18
+ import torch
19
+
20
+ from monai.data.meta_tensor import MetaTensor
21
+ from monai.utils import optional_import
22
+
23
+ tqdm, _ = optional_import("tqdm", name="tqdm")
24
+
25
+ __all__ = ["point_based_window_inferer"]
26
+
27
+
28
+ def point_based_window_inferer(
29
+ inputs: torch.Tensor | MetaTensor,
30
+ roi_size: Sequence[int],
31
+ predictor: torch.nn.Module,
32
+ point_coords: torch.Tensor,
33
+ point_labels: torch.Tensor,
34
+ class_vector: torch.Tensor | None = None,
35
+ prompt_class: torch.Tensor | None = None,
36
+ prev_mask: torch.Tensor | MetaTensor | None = None,
37
+ point_start: int = 0,
38
+ center_only: bool = True,
39
+ margin: int = 5,
40
+ **kwargs: Any,
41
+ ) -> torch.Tensor:
42
+ """
43
+ Point-based window inferer that takes an input image, a set of points, and a model, and returns a segmented image.
44
+ The inferer algorithm crops the input image into patches that centered at the point sets, which is followed by
45
+ patch inference and average output stitching, and finally returns the segmented mask.
46
+
47
+ Args:
48
+ inputs: [1CHWD], input image to be processed.
49
+ roi_size: the spatial window size for inferences.
50
+ When its components have None or non-positives, the corresponding inputs dimension will be used.
51
+ if the components of the `roi_size` are non-positive values, the transform will use the
52
+ corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted
53
+ to `(32, 64)` if the second spatial dimension size of img is `64`.
54
+ sw_batch_size: the batch size to run window slices.
55
+ predictor: the model. For vista3D, the output is [B, 1, H, W, D] which needs to be transposed to [1, B, H, W, D].
56
+ Add transpose=True in kwargs for vista3d.
57
+ point_coords: [B, N, 3]. Point coordinates for B foreground objects, each has N points.
58
+ point_labels: [B, N]. Point labels. 0/1 means negative/positive points for regular supported or zero-shot classes.
59
+ 2/3 means negative/positive points for special supported classes (e.g. tumor, vessel).
60
+ class_vector: [B]. Used for class-head automatic segmentation. Can be None value.
61
+ prompt_class: [B]. The same as class_vector representing the point class and inform point head about
62
+ supported class or zeroshot, not used for automatic segmentation. If None, point head is default
63
+ to supported class segmentation.
64
+ prev_mask: [1, B, H, W, D]. The value is before sigmoid. An optional tensor of previously segmented masks.
65
+ point_start: only use points starting from this number. All points before this number is used to generate
66
+ prev_mask. This is used to avoid re-calculating the points in previous iterations if given prev_mask.
67
+ center_only: for each point, only crop the patch centered at this point. If false, crop 3 patches for each point.
68
+ margin: if center_only is false, this value is the distance between point to the patch boundary.
69
+ Returns:
70
+ stitched_output: [1, B, H, W, D]. The value is before sigmoid.
71
+ Notice: The function only supports SINGLE OBJECT INFERENCE with B=1.
72
+ """
73
+ if not point_coords.shape[0] == 1:
74
+ raise ValueError("Only supports single object point click.")
75
+ if not len(inputs.shape) == 5:
76
+ raise ValueError("Input image should be 5D.")
77
+ image, pad = _pad_previous_mask(copy.deepcopy(inputs), roi_size)
78
+ point_coords = point_coords + torch.tensor([pad[-2], pad[-4], pad[-6]]).to(point_coords.device)
79
+ prev_mask = _pad_previous_mask(copy.deepcopy(prev_mask), roi_size)[0] if prev_mask is not None else None
80
+ stitched_output = None
81
+ for p in point_coords[0][point_start:]:
82
+ lx_, rx_ = _get_window_idx(p[0], roi_size[0], image.shape[-3], center_only=center_only, margin=margin)
83
+ ly_, ry_ = _get_window_idx(p[1], roi_size[1], image.shape[-2], center_only=center_only, margin=margin)
84
+ lz_, rz_ = _get_window_idx(p[2], roi_size[2], image.shape[-1], center_only=center_only, margin=margin)
85
+ for i in range(len(lx_)):
86
+ for j in range(len(ly_)):
87
+ for k in range(len(lz_)):
88
+ lx, rx, ly, ry, lz, rz = (lx_[i], rx_[i], ly_[j], ry_[j], lz_[k], rz_[k])
89
+ unravel_slice = [
90
+ slice(None),
91
+ slice(None),
92
+ slice(int(lx), int(rx)),
93
+ slice(int(ly), int(ry)),
94
+ slice(int(lz), int(rz)),
95
+ ]
96
+ batch_image = image[unravel_slice]
97
+ output = predictor(
98
+ batch_image,
99
+ point_coords=point_coords,
100
+ point_labels=point_labels,
101
+ class_vector=class_vector,
102
+ prompt_class=prompt_class,
103
+ patch_coords=[unravel_slice],
104
+ prev_mask=prev_mask,
105
+ **kwargs,
106
+ )
107
+ if stitched_output is None:
108
+ stitched_output = torch.zeros(
109
+ [1, output.shape[1], image.shape[-3], image.shape[-2], image.shape[-1]], device="cpu"
110
+ )
111
+ stitched_mask = torch.zeros(
112
+ [1, output.shape[1], image.shape[-3], image.shape[-2], image.shape[-1]], device="cpu"
113
+ )
114
+ stitched_output[unravel_slice] += output.to("cpu")
115
+ stitched_mask[unravel_slice] = 1
116
+ # if stitched_mask is 0, then NaN value
117
+ stitched_output = stitched_output / stitched_mask
118
+ # revert padding
119
+ stitched_output = stitched_output[
120
+ :, :, pad[4] : image.shape[-3] - pad[5], pad[2] : image.shape[-2] - pad[3], pad[0] : image.shape[-1] - pad[1]
121
+ ]
122
+ stitched_mask = stitched_mask[
123
+ :, :, pad[4] : image.shape[-3] - pad[5], pad[2] : image.shape[-2] - pad[3], pad[0] : image.shape[-1] - pad[1]
124
+ ]
125
+ if prev_mask is not None:
126
+ prev_mask = prev_mask[
127
+ :,
128
+ :,
129
+ pad[4] : image.shape[-3] - pad[5],
130
+ pad[2] : image.shape[-2] - pad[3],
131
+ pad[0] : image.shape[-1] - pad[1],
132
+ ]
133
+ prev_mask = prev_mask.to("cpu") # type: ignore
134
+ # for un-calculated place, use previous mask
135
+ stitched_output[stitched_mask < 1] = prev_mask[stitched_mask < 1]
136
+ if isinstance(inputs, torch.Tensor):
137
+ inputs = MetaTensor(inputs)
138
+ if not hasattr(stitched_output, "meta"):
139
+ stitched_output = MetaTensor(stitched_output, affine=inputs.meta["affine"], meta=inputs.meta)
140
+ return stitched_output
141
+
142
+
143
+ def _get_window_idx_c(p: int, roi: int, s: int) -> tuple[int, int]:
144
+ """Helper function to get the window index."""
145
+ if p - roi // 2 < 0:
146
+ left, right = 0, roi
147
+ elif p + roi // 2 > s:
148
+ left, right = s - roi, s
149
+ else:
150
+ left, right = int(p) - roi // 2, int(p) + roi // 2
151
+ return left, right
152
+
153
+
154
+ def _get_window_idx(p: int, roi: int, s: int, center_only: bool = True, margin: int = 5) -> tuple[list[int], list[int]]:
155
+ """Get the window index."""
156
+ left, right = _get_window_idx_c(p, roi, s)
157
+ if center_only:
158
+ return [left], [right]
159
+ left_most = max(0, p - roi + margin)
160
+ right_most = min(s, p + roi - margin)
161
+ left_list = [left_most, right_most - roi, left]
162
+ right_list = [left_most + roi, right_most, right]
163
+ return left_list, right_list
164
+
165
+
166
+ def _pad_previous_mask(
167
+ inputs: torch.Tensor | MetaTensor, roi_size: Sequence[int], padvalue: int = 0
168
+ ) -> tuple[torch.Tensor | MetaTensor, list[int]]:
169
+ """Helper function to pad inputs."""
170
+ pad_size = []
171
+ for k in range(len(inputs.shape) - 1, 1, -1):
172
+ diff = max(roi_size[k - 2] - inputs.shape[k], 0)
173
+ half = diff // 2
174
+ pad_size.extend([half, diff - half])
175
+ if any(pad_size):
176
+ inputs = torch.nn.functional.pad(inputs, pad=pad_size, mode="constant", value=padvalue) # type: ignore
177
+ return inputs, pad_size
@@ -0,0 +1,179 @@
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import copy
15
+ import random
16
+ from collections.abc import Callable, Sequence
17
+ from typing import Any
18
+
19
+ import numpy as np
20
+ import torch
21
+ from torch import Tensor
22
+
23
+ ENABLE_SPECIAL = True
24
+ SPECIAL_INDEX = (23, 24, 25, 26, 27, 57, 128)
25
+ MERGE_LIST = {
26
+ 1: [25, 26], # hepatic tumor and vessel merge into liver
27
+ 4: [24], # pancreatic tumor merge into pancreas
28
+ 132: [57], # overlap with trachea merge into airway
29
+ }
30
+
31
+ __all__ = ["sample_prompt_pairs"]
32
+
33
+
34
+ def _get_point_label(id: int) -> tuple[int, int]:
35
+ if id in SPECIAL_INDEX and ENABLE_SPECIAL:
36
+ return 2, 3
37
+ else:
38
+ return 0, 1
39
+
40
+
41
+ def sample_prompt_pairs(
42
+ labels: Tensor,
43
+ label_set: Sequence[int],
44
+ max_prompt: int | None = None,
45
+ max_foreprompt: int | None = None,
46
+ max_backprompt: int = 1,
47
+ max_point: int = 20,
48
+ include_background: bool = False,
49
+ drop_label_prob: float = 0.2,
50
+ drop_point_prob: float = 0.2,
51
+ point_sampler: Callable | None = None,
52
+ **point_sampler_kwargs: Any,
53
+ ) -> tuple[Tensor | None, Tensor | None, Tensor | None, Tensor | None]:
54
+ """
55
+ Sample training pairs for VISTA3D training.
56
+
57
+ Args:
58
+ labels: [1, 1, H, W, D], ground truth labels.
59
+ label_set: the label list for the specific dataset. Note if 0 is included in label_set,
60
+ it will be added into automatic branch training. Recommend removing 0 from label_set
61
+ for multi-partially-labeled-dataset training, and adding 0 for finetuning specific dataset.
62
+ The reason is region with 0 in one partially labeled dataset may contain foregrounds in
63
+ another dataset.
64
+ max_prompt: int, max number of total prompt, including foreground and background.
65
+ max_foreprompt: int, max number of prompt from foreground.
66
+ max_backprompt: int, max number of prompt from background.
67
+ max_point: maximum number of points for each object.
68
+ include_background: if include 0 into training prompt. If included, background 0 is treated
69
+ the same as foreground and points will be sampled. Can be true only if user want to segment
70
+ background 0 with point clicks, otherwise always be false.
71
+ drop_label_prob: probability to drop label prompt.
72
+ drop_point_prob: probability to drop point prompt.
73
+ point_sampler: sampler to augment masks with supervoxel.
74
+ point_sampler_kwargs: arguments for point_sampler.
75
+
76
+ Returns:
77
+ tuple:
78
+ - label_prompt (Tensor | None): Tensor of shape [B, 1] containing the classes used for
79
+ training automatic segmentation.
80
+ - point (Tensor | None): Tensor of shape [B, N, 3] representing the corresponding points
81
+ for each class. Note that background label prompts require matching points as well
82
+ (e.g., [0, 0, 0] is used).
83
+ - point_label (Tensor | None): Tensor of shape [B, N] representing the corresponding point
84
+ labels for each point (negative or positive). -1 is used for padding the background
85
+ label prompt and will be ignored.
86
+ - prompt_class (Tensor | None): Tensor of shape [B, 1], exactly the same as label_prompt
87
+ for label indexing during training. If label_prompt is None, prompt_class is used to
88
+ identify point classes.
89
+
90
+ """
91
+
92
+ # class label number
93
+ if not labels.shape[0] == 1:
94
+ raise ValueError("only support batch size 1")
95
+ labels = labels[0, 0]
96
+ device = labels.device
97
+ unique_labels = labels.unique().cpu().numpy().tolist()
98
+ if include_background:
99
+ unique_labels = list(set(unique_labels) - (set(unique_labels) - set(label_set)))
100
+ else:
101
+ unique_labels = list(set(unique_labels) - (set(unique_labels) - set(label_set)) - {0})
102
+ background_labels = list(set(label_set) - set(unique_labels))
103
+ # during training, balance background and foreground prompts
104
+ if max_backprompt is not None:
105
+ if len(background_labels) > max_backprompt:
106
+ random.shuffle(background_labels)
107
+ background_labels = background_labels[:max_backprompt]
108
+
109
+ if max_foreprompt is not None:
110
+ if len(unique_labels) > max_foreprompt:
111
+ random.shuffle(unique_labels)
112
+ unique_labels = unique_labels[:max_foreprompt]
113
+
114
+ if max_prompt is not None:
115
+ if len(unique_labels) + len(background_labels) > max_prompt:
116
+ if len(unique_labels) > max_prompt:
117
+ unique_labels = random.sample(unique_labels, max_prompt)
118
+ background_labels = []
119
+ else:
120
+ background_labels = random.sample(background_labels, max_prompt - len(unique_labels))
121
+ _point = []
122
+ _point_label = []
123
+ # if use regular sampling
124
+ if point_sampler is None:
125
+ num_p = min(max_point, int(np.abs(random.gauss(mu=0, sigma=max_point // 2))) + 1)
126
+ num_n = min(max_point, int(np.abs(random.gauss(mu=0, sigma=max_point // 2))))
127
+ for id in unique_labels:
128
+ neg_id, pos_id = _get_point_label(id)
129
+ plabels = labels == int(id)
130
+ nlabels = ~plabels
131
+ plabelpoints = torch.nonzero(plabels)
132
+ nlabelpoints = torch.nonzero(nlabels)
133
+ # final sampled positive points
134
+ num_pa = min(len(plabelpoints), num_p)
135
+ # final sampled negative points
136
+ num_na = min(len(nlabelpoints), num_n)
137
+ _point.append(
138
+ torch.stack(
139
+ random.choices(plabelpoints, k=num_pa)
140
+ + random.choices(nlabelpoints, k=num_na)
141
+ + [torch.tensor([0, 0, 0], device=device)] * (num_p + num_n - num_pa - num_na)
142
+ )
143
+ )
144
+ _point_label.append(
145
+ torch.tensor([pos_id] * num_pa + [neg_id] * num_na + [-1] * (num_p + num_n - num_pa - num_na)).to(
146
+ device
147
+ )
148
+ )
149
+ for _ in background_labels:
150
+ # pad the background labels
151
+ _point.append(torch.zeros(num_p + num_n, 3).to(device)) # all 0
152
+ _point_label.append(torch.zeros(num_p + num_n).to(device) - 1) # -1 not a point
153
+ else:
154
+ _point, _point_label = point_sampler(unique_labels, **point_sampler_kwargs)
155
+ for _ in background_labels:
156
+ # pad the background labels
157
+ _point.append(torch.zeros(len(_point_label[0]), 3).to(device)) # all 0
158
+ _point_label.append(torch.zeros(len(_point_label[0])).to(device) - 1) # -1 not a point
159
+ if len(unique_labels) == 0 and len(background_labels) == 0:
160
+ # if max_backprompt is 0 and len(unique_labels), there is no effective prompt and the iteration must
161
+ # be skipped. Handle this in trainer.
162
+ label_prompt, point, point_label, prompt_class = None, None, None, None
163
+ else:
164
+ label_prompt = torch.tensor(unique_labels + background_labels).unsqueeze(-1).to(device).long()
165
+ point = torch.stack(_point)
166
+ point_label = torch.stack(_point_label)
167
+ prompt_class = copy.deepcopy(label_prompt)
168
+ if random.uniform(0, 1) < drop_label_prob and len(unique_labels) > 0:
169
+ label_prompt = None
170
+ # If label prompt is dropped, there is no need to pad with points with label -1.
171
+ pad = len(background_labels)
172
+ point = point[: len(point) - pad] # type: ignore
173
+ point_label = point_label[: len(point_label) - pad]
174
+ prompt_class = prompt_class[: len(prompt_class) - pad]
175
+ else:
176
+ if random.uniform(0, 1) < drop_point_prob:
177
+ point = None
178
+ point_label = None
179
+ return label_prompt, point, point_label, prompt_class
@@ -0,0 +1,224 @@
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from typing import Sequence
16
+
17
+ import numpy as np
18
+ import torch
19
+
20
+ from monai.config import DtypeLike, KeysCollection
21
+ from monai.transforms import MapLabelValue
22
+ from monai.transforms.transform import MapTransform
23
+ from monai.transforms.utils import keep_components_with_positive_points
24
+ from monai.utils import look_up_option
25
+
26
+ __all__ = ["VistaPreTransformd", "VistaPostTransformd", "Relabeld"]
27
+
28
+
29
+ def _get_name_to_index_mapping(labels_dict: dict | None) -> dict:
30
+ """get the label name to index mapping"""
31
+ name_to_index_mapping = {}
32
+ if labels_dict is not None:
33
+ name_to_index_mapping = {v.lower(): int(k) for k, v in labels_dict.items()}
34
+ return name_to_index_mapping
35
+
36
+
37
+ def _convert_name_to_index(name_to_index_mapping: dict, label_prompt: list | None) -> list | None:
38
+ """convert the label name to index"""
39
+ if label_prompt is not None and isinstance(label_prompt, list):
40
+ converted_label_prompt = []
41
+ # for new class, add to the mapping
42
+ for l in label_prompt:
43
+ if isinstance(l, str) and not l.isdigit():
44
+ if l.lower() not in name_to_index_mapping:
45
+ name_to_index_mapping[l.lower()] = len(name_to_index_mapping)
46
+ for l in label_prompt:
47
+ if isinstance(l, (int, str)):
48
+ converted_label_prompt.append(
49
+ name_to_index_mapping.get(l.lower(), int(l) if l.isdigit() else 0) if isinstance(l, str) else int(l)
50
+ )
51
+ else:
52
+ converted_label_prompt.append(l)
53
+ return converted_label_prompt
54
+ return label_prompt
55
+
56
+
57
+ class VistaPreTransformd(MapTransform):
58
+ def __init__(
59
+ self,
60
+ keys: KeysCollection,
61
+ allow_missing_keys: bool = False,
62
+ special_index: Sequence[int] = (25, 26, 27, 28, 29, 117),
63
+ labels_dict: dict | None = None,
64
+ subclass: dict | None = None,
65
+ ) -> None:
66
+ """
67
+ Pre-transform for Vista3d.
68
+
69
+ It performs two functionalities:
70
+
71
+ 1. If label prompt shows the points belong to special class (defined by special index, e.g. tumors, vessels),
72
+ convert point labels from 0 (negative), 1 (positive) to special 2 (negative), 3 (positive).
73
+
74
+ 2. If label prompt is within the keys in subclass, convert the label prompt to its subclasses defined by subclass[key].
75
+ e.g. "lung" label is converted to ["left lung", "right lung"].
76
+
77
+ The `label_prompt` is a list of int values of length [B] and `point_labels` is a list of length B,
78
+ where each element is an int value of length [B, N].
79
+
80
+ Args:
81
+ keys: keys of the corresponding items to be transformed.
82
+ special_index: the index that defines the special class.
83
+ subclass: a dictionary that maps a label prompt to its subclasses.
84
+ allow_missing_keys: don't raise exception if key is missing.
85
+ """
86
+ super().__init__(keys, allow_missing_keys)
87
+ self.special_index = special_index
88
+ self.subclass = subclass
89
+ self.name_to_index_mapping = _get_name_to_index_mapping(labels_dict)
90
+
91
+ def __call__(self, data):
92
+ label_prompt = data.get("label_prompt", None)
93
+ point_labels = data.get("point_labels", None)
94
+ # convert the label name to index if needed
95
+ label_prompt = _convert_name_to_index(self.name_to_index_mapping, label_prompt)
96
+ try:
97
+ # The evaluator will check prompt. The invalid prompt will be skipped here and captured by evaluator.
98
+ if self.subclass is not None and label_prompt is not None:
99
+ _label_prompt = []
100
+ subclass_keys = list(map(int, self.subclass.keys()))
101
+ for i in range(len(label_prompt)):
102
+ if label_prompt[i] in subclass_keys:
103
+ _label_prompt.extend(self.subclass[str(label_prompt[i])])
104
+ else:
105
+ _label_prompt.append(label_prompt[i])
106
+ data["label_prompt"] = _label_prompt
107
+ if label_prompt is not None and point_labels is not None:
108
+ if label_prompt[0] in self.special_index:
109
+ point_labels = np.array(point_labels)
110
+ point_labels[point_labels == 0] = 2
111
+ point_labels[point_labels == 1] = 3
112
+ point_labels = point_labels.tolist()
113
+ data["point_labels"] = point_labels
114
+ except Exception:
115
+ # There is specific requirements for `label_prompt` and `point_labels`.
116
+ # If B > 1 or `label_prompt` is in subclass_keys, `point_labels` must be None.
117
+ # Those formatting errors should be captured later.
118
+ warnings.warn("VistaPreTransformd failed to transform label prompt or point labels.")
119
+
120
+ return data
121
+
122
+
123
+ class VistaPostTransformd(MapTransform):
124
+ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
125
+ """
126
+ Post-transform for Vista3d. It converts the model output logits into final segmentation masks.
127
+ If `label_prompt` is None, the output will be thresholded to be sequential indexes [0,1,2,...],
128
+ else the indexes will be [0, label_prompt[0], label_prompt[1], ...].
129
+ If `label_prompt` is None while `points` are provided, the model will perform postprocess to remove
130
+ regions that does not contain positive points.
131
+
132
+ Args:
133
+ keys: keys of the corresponding items to be transformed.
134
+ dataset_transforms: a dictionary specifies the transform for corresponding dataset:
135
+ key: dataset name, value: list of data transforms.
136
+ dataset_key: key to get the dataset name from the data dictionary, default to "dataset_name".
137
+ allow_missing_keys: don't raise exception if key is missing.
138
+
139
+ """
140
+ super().__init__(keys, allow_missing_keys)
141
+
142
+ def __call__(self, data):
143
+ """data["label_prompt"] should not contain 0"""
144
+ for keys in self.keys:
145
+ if keys in data:
146
+ pred = data[keys]
147
+ object_num = pred.shape[0]
148
+ device = pred.device
149
+ if data.get("label_prompt", None) is None and data.get("points", None) is not None:
150
+ pred = keep_components_with_positive_points(
151
+ pred.unsqueeze(0),
152
+ point_coords=data.get("points").to(device),
153
+ point_labels=data.get("point_labels").to(device),
154
+ )[0]
155
+ pred[pred < 0] = 0.0
156
+ # if it's multichannel, perform argmax
157
+ if object_num > 1:
158
+ # concate background channel. Make sure user did not provide 0 as prompt.
159
+ is_bk = torch.all(pred <= 0, dim=0, keepdim=True)
160
+ pred = pred.argmax(0).unsqueeze(0).float() + 1.0
161
+ pred[is_bk] = 0.0
162
+ else:
163
+ # AsDiscrete will remove NaN
164
+ # pred = monai.transforms.AsDiscrete(threshold=0.5)(pred)
165
+ pred[pred > 0] = 1.0
166
+ if "label_prompt" in data and data["label_prompt"] is not None:
167
+ pred += 0.5 # inplace mapping to avoid cloning pred
168
+ label_prompt = data["label_prompt"].to(device) # Ensure label_prompt is on the same device
169
+ for i in range(1, object_num + 1):
170
+ frac = i + 0.5
171
+ pred[pred == frac] = label_prompt[i - 1].to(pred.dtype)
172
+ pred[pred == 0.5] = 0.0
173
+ data[keys] = pred
174
+ return data
175
+
176
+
177
+ class Relabeld(MapTransform):
178
+ def __init__(
179
+ self,
180
+ keys: KeysCollection,
181
+ label_mappings: dict[str, list[tuple[int, int]]],
182
+ dtype: DtypeLike = np.int16,
183
+ dataset_key: str = "dataset_name",
184
+ allow_missing_keys: bool = False,
185
+ ) -> None:
186
+ """
187
+ Remap the voxel labels in the input data dictionary based on the specified mapping.
188
+
189
+ This list of local -> global label mappings will be applied to each input `data[keys]`.
190
+ if `data[dataset_key]` is not in `label_mappings`, label_mappings['default']` will be used.
191
+ if `label_mappings[data[dataset_key]]` is None, no relabeling will be performed.
192
+
193
+ Args:
194
+ keys: keys of the corresponding items to be transformed.
195
+ label_mappings: a dictionary specifies how local dataset class indices are mapped to the
196
+ global class indices. The dictionary keys are dataset names and the values are lists of
197
+ list of (local label, global label) pairs. This list of local -> global label mappings
198
+ will be applied to each input `data[keys]`. If `data[dataset_key]` is not in `label_mappings`,
199
+ label_mappings['default']` will be used. if `label_mappings[data[dataset_key]]` is None,
200
+ no relabeling will be performed. Please set `label_mappings={}` to completely skip this transform.
201
+ dtype: convert the output data to dtype, default to float32.
202
+ dataset_key: key to get the dataset name from the data dictionary, default to "dataset_name".
203
+ allow_missing_keys: don't raise exception if key is missing.
204
+
205
+ """
206
+ super().__init__(keys, allow_missing_keys)
207
+ self.mappers = {}
208
+ self.dataset_key = dataset_key
209
+ for name, mapping in label_mappings.items():
210
+ self.mappers[name] = MapLabelValue(
211
+ orig_labels=[int(pair[0]) for pair in mapping],
212
+ target_labels=[int(pair[1]) for pair in mapping],
213
+ dtype=dtype,
214
+ )
215
+
216
+ def __call__(self, data):
217
+ d = dict(data)
218
+ dataset_name = d.get(self.dataset_key, "default")
219
+ _m = look_up_option(dataset_name, self.mappers, default=None)
220
+ if _m is None:
221
+ return d
222
+ for key in self.key_iterator(d):
223
+ d[key] = _m(d[key])
224
+ return d
@@ -20,7 +20,7 @@ from typing import TYPE_CHECKING, Any
20
20
 
21
21
  from monai.bundle.config_item import ComponentLocator, ConfigComponent, ConfigExpression, ConfigItem
22
22
  from monai.bundle.reference_resolver import ReferenceResolver
23
- from monai.bundle.utils import ID_REF_KEY, ID_SEP_KEY, MACRO_KEY
23
+ from monai.bundle.utils import ID_REF_KEY, ID_SEP_KEY, MACRO_KEY, merge_kv
24
24
  from monai.config import PathLike
25
25
  from monai.utils import ensure_tuple, look_up_option, optional_import
26
26
  from monai.utils.misc import CheckKeyDuplicatesYamlLoader, check_key_duplicates
@@ -423,8 +423,10 @@ class ConfigParser:
423
423
  if isinstance(files, str) and not Path(files).is_file() and "," in files:
424
424
  files = files.split(",")
425
425
  for i in ensure_tuple(files):
426
- for k, v in (cls.load_config_file(i, **kwargs)).items():
427
- parser[k] = v
426
+ config_dict = cls.load_config_file(i, **kwargs)
427
+ for k, v in config_dict.items():
428
+ merge_kv(parser, k, v)
429
+
428
430
  return parser.get() # type: ignore
429
431
 
430
432
  @classmethod