konfai 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of konfai might be problematic. Click here for more details.
- konfai/data/patching.py +2 -2
- konfai/data/transform.py +14 -13
- {konfai-1.2.6.dist-info → konfai-1.2.8.dist-info}/METADATA +1 -1
- {konfai-1.2.6.dist-info → konfai-1.2.8.dist-info}/RECORD +8 -8
- {konfai-1.2.6.dist-info → konfai-1.2.8.dist-info}/WHEEL +0 -0
- {konfai-1.2.6.dist-info → konfai-1.2.8.dist-info}/entry_points.txt +0 -0
- {konfai-1.2.6.dist-info → konfai-1.2.8.dist-info}/licenses/LICENSE +0 -0
- {konfai-1.2.6.dist-info → konfai-1.2.8.dist-info}/top_level.txt +0 -0
konfai/data/patching.py
CHANGED
|
@@ -219,7 +219,7 @@ class Patch(ABC):
|
|
|
219
219
|
)
|
|
220
220
|
slices = [s] + list(self._patch_slices[a][index][1:])
|
|
221
221
|
data_sliced = data[slices_pre + slices]
|
|
222
|
-
if data_sliced.shape[len(slices_pre)] < bottom + top + 1:
|
|
222
|
+
if extend_slice > 0 and data_sliced.shape[len(slices_pre)] < bottom + top + 1:
|
|
223
223
|
pad_bottom = 0
|
|
224
224
|
pad_top = 0
|
|
225
225
|
if self._patch_slices[a][index][0].start - bottom < 0:
|
|
@@ -335,7 +335,7 @@ class DatasetManager:
|
|
|
335
335
|
self.data: list[torch.Tensor] = []
|
|
336
336
|
|
|
337
337
|
for transform_function in transforms:
|
|
338
|
-
_shape = transform_function.transform_shape(_shape, cache_attribute)
|
|
338
|
+
_shape = transform_function.transform_shape(self.name, _shape, cache_attribute)
|
|
339
339
|
|
|
340
340
|
self.patch = (
|
|
341
341
|
DatasetPatch(
|
konfai/data/transform.py
CHANGED
|
@@ -21,7 +21,7 @@ class Transform(NeedDevice, ABC):
|
|
|
21
21
|
def set_datasets(self, datasets: list[Dataset]):
|
|
22
22
|
self.datasets = datasets
|
|
23
23
|
|
|
24
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
24
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
25
25
|
return shape
|
|
26
26
|
|
|
27
27
|
@abstractmethod
|
|
@@ -300,7 +300,7 @@ class Padding(TransformInverse):
|
|
|
300
300
|
).squeeze(0)
|
|
301
301
|
return result
|
|
302
302
|
|
|
303
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
303
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
304
304
|
for dim in range(len(self.padding) // 2):
|
|
305
305
|
shape[-dim - 1] += sum(self.padding[dim * 2 : dim * 2 + 2])
|
|
306
306
|
return shape
|
|
@@ -352,7 +352,7 @@ class Resample(TransformInverse, ABC):
|
|
|
352
352
|
pass
|
|
353
353
|
|
|
354
354
|
@abstractmethod
|
|
355
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
355
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
356
356
|
pass
|
|
357
357
|
|
|
358
358
|
def inverse(self, name: str, tensor: torch.Tensor, cache_attribute: Attribute) -> torch.Tensor:
|
|
@@ -368,7 +368,7 @@ class ResampleToResolution(Resample):
|
|
|
368
368
|
super().__init__(inverse)
|
|
369
369
|
self.spacing = torch.tensor([0 if s < 0 else s for s in spacing])
|
|
370
370
|
|
|
371
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
371
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
372
372
|
if "Spacing" not in cache_attribute:
|
|
373
373
|
TransformError(
|
|
374
374
|
"Missing 'Spacing' in cache attributes, the data is likely not a valid image.",
|
|
@@ -376,36 +376,37 @@ class ResampleToResolution(Resample):
|
|
|
376
376
|
)
|
|
377
377
|
if len(shape) != len(self.spacing):
|
|
378
378
|
TransformError("Shape and spacing dimensions do not match: shape={shape}, spacing={self.spacing}")
|
|
379
|
-
image_spacing = cache_attribute.get_tensor("Spacing")
|
|
379
|
+
image_spacing = cache_attribute.get_tensor("Spacing")
|
|
380
380
|
spacing = self.spacing
|
|
381
381
|
|
|
382
382
|
for i, s in enumerate(self.spacing):
|
|
383
383
|
if s == 0:
|
|
384
384
|
spacing[i] = image_spacing[i]
|
|
385
|
-
resize_factor = spacing /
|
|
385
|
+
resize_factor = spacing / image_spacing
|
|
386
386
|
return [int(x) for x in (torch.tensor(shape) * 1 / resize_factor)]
|
|
387
387
|
|
|
388
388
|
def __call__(self, name: str, tensor: torch.Tensor, cache_attribute: Attribute) -> torch.Tensor:
|
|
389
|
-
image_spacing = cache_attribute.get_tensor("Spacing")
|
|
389
|
+
image_spacing = cache_attribute.get_tensor("Spacing")
|
|
390
390
|
spacing = self.spacing
|
|
391
391
|
for i, s in enumerate(self.spacing):
|
|
392
392
|
if s == 0:
|
|
393
393
|
spacing[i] = image_spacing[i]
|
|
394
|
-
resize_factor = spacing / cache_attribute.get_tensor("Spacing")
|
|
395
|
-
cache_attribute["Spacing"] = spacing
|
|
394
|
+
resize_factor = spacing / cache_attribute.get_tensor("Spacing")
|
|
395
|
+
cache_attribute["Spacing"] = spacing
|
|
396
396
|
cache_attribute["Size"] = np.asarray([int(x) for x in torch.tensor(tensor.shape[1:])])
|
|
397
397
|
size = [int(x) for x in (torch.tensor(tensor.shape[1:]) * 1 / resize_factor)]
|
|
398
398
|
cache_attribute["Size"] = np.asarray(size)
|
|
399
399
|
return self._resample(tensor, size)
|
|
400
400
|
|
|
401
401
|
|
|
402
|
+
|
|
402
403
|
class ResampleToShape(Resample):
|
|
403
404
|
|
|
404
405
|
def __init__(self, shape: list[float] = [100, 256, 256], inverse: bool = True) -> None:
|
|
405
406
|
super().__init__(inverse)
|
|
406
407
|
self.shape = torch.tensor([0 if s < 0 else s for s in shape])
|
|
407
408
|
|
|
408
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
409
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
409
410
|
if "Spacing" not in cache_attribute:
|
|
410
411
|
TransformError(
|
|
411
412
|
"Missing 'Spacing' in cache attributes, the data is likely not a valid image.",
|
|
@@ -441,7 +442,7 @@ class ResampleTransform(TransformInverse):
|
|
|
441
442
|
super().__init__(inverse)
|
|
442
443
|
self.transforms = transforms
|
|
443
444
|
|
|
444
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
445
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
445
446
|
return shape
|
|
446
447
|
|
|
447
448
|
def __call__(self, name: str, tensor: torch.Tensor, cache_attribute: Attribute) -> torch.Tensor:
|
|
@@ -611,7 +612,7 @@ class Flatten(Transform):
|
|
|
611
612
|
def __init__(self) -> None:
|
|
612
613
|
super().__init__()
|
|
613
614
|
|
|
614
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
615
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
615
616
|
return [np.prod(np.asarray(shape))]
|
|
616
617
|
|
|
617
618
|
def __call__(self, name: str, tensor: torch.Tensor, cache_attribute: Attribute) -> torch.Tensor:
|
|
@@ -624,7 +625,7 @@ class Permute(TransformInverse):
|
|
|
624
625
|
super().__init__(inverse)
|
|
625
626
|
self.dims = [0] + [int(d) + 1 for d in dims.split("|")]
|
|
626
627
|
|
|
627
|
-
def transform_shape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
628
|
+
def transform_shape(self, name: str, shape: list[int], cache_attribute: Attribute) -> list[int]:
|
|
628
629
|
return [shape[it - 1] for it in self.dims[1:]]
|
|
629
630
|
|
|
630
631
|
def __call__(self, name: str, tensor: torch.Tensor, cache_attribute: Attribute) -> torch.Tensor:
|
|
@@ -6,8 +6,8 @@ konfai/trainer.py,sha256=g_TkPDUjToFGDGB7aaRZMn-fQllHV_I2GHFKUzDGF8o,27106
|
|
|
6
6
|
konfai/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
7
|
konfai/data/augmentation.py,sha256=7jrWcpw61t3cCIdHtUlnekRO7JwXIM5Q6RIXY8Ya-xM,27796
|
|
8
8
|
konfai/data/data_manager.py,sha256=tZ2DZHDW4UySCCzwEzR2WIL0fTp7lqAfqEbNPiEw5NE,31064
|
|
9
|
-
konfai/data/patching.py,sha256=
|
|
10
|
-
konfai/data/transform.py,sha256=
|
|
9
|
+
konfai/data/patching.py,sha256=P0TcjR4qcUWpB_Uph0-dd8bMeNVJC_IGNK_jkxStglQ,16526
|
|
10
|
+
konfai/data/transform.py,sha256=DpD5bNAKji_mly2IzNb1HrCXkFMYLUVj9drjTOZHk_M,30343
|
|
11
11
|
konfai/metric/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
12
|
konfai/metric/measure.py,sha256=0mOIZKTa2u0UECpoDSbdJUhttAw_e1BlsROQQpi1oBk,27804
|
|
13
13
|
konfai/metric/schedulers.py,sha256=TpYMA24FMpxRnqfhMGb0i_Mm-bzT9kySbBgvkYk-6wM,1327
|
|
@@ -30,9 +30,9 @@ konfai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
30
30
|
konfai/utils/config.py,sha256=a7t44CYMUT5oCDdjL94IswhCVfFbQ5FCgDWZktDDkc4,14347
|
|
31
31
|
konfai/utils/dataset.py,sha256=Au22fcADKyDJMfS8Z9q8kEXLtKkoufJsH7Pwly6pALo,28288
|
|
32
32
|
konfai/utils/utils.py,sha256=jCj3tZ8agQYceSY_tlVYp88UFPE5oUn6tXrqnZGrKiI,28410
|
|
33
|
-
konfai-1.2.
|
|
34
|
-
konfai-1.2.
|
|
35
|
-
konfai-1.2.
|
|
36
|
-
konfai-1.2.
|
|
37
|
-
konfai-1.2.
|
|
38
|
-
konfai-1.2.
|
|
33
|
+
konfai-1.2.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
34
|
+
konfai-1.2.8.dist-info/METADATA,sha256=CKKm-QfF05P8k86_9AbNwya6QrQX0qAitMVUIlNStwc,2475
|
|
35
|
+
konfai-1.2.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
36
|
+
konfai-1.2.8.dist-info/entry_points.txt,sha256=fG82HRN5-g39ACSOCtij_I3N6EHxfYnMR0D7TI_8pW8,81
|
|
37
|
+
konfai-1.2.8.dist-info/top_level.txt,sha256=xF470dkIlFoFqTZEOlRehKJr4WU_8OKGXrJqYm9vWKs,7
|
|
38
|
+
konfai-1.2.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|