konfai 1.1.5__py3-none-any.whl → 1.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of konfai might be problematic. Click here for more details.

@@ -57,7 +57,7 @@ class DataAugmentationsList():
57
57
 
58
58
  def load(self, key: str, datasets: list[Dataset]):
59
59
  for augmentation, prob in self.dataAugmentationsLoader.items():
60
- module, name = _getModule(augmentation, "data.augmentation")
60
+ module, name = _getModule(augmentation, "konfai.data.augmentation")
61
61
  dataAugmentation: DataAugmentation = config("{}.Dataset.augmentations.{}.dataAugmentations.{}".format(KONFAI_ROOT(), key, augmentation))(getattr(importlib.import_module(module), name))(config = None)
62
62
  dataAugmentation.load(prob.prob)
63
63
  dataAugmentation.setDatasets(datasets)
@@ -184,7 +184,7 @@ class DatasetIter(data.Dataset):
184
184
  data = {}
185
185
  x, a, p = self.map[index]
186
186
  if x not in self._index_cache:
187
- if x not in self._index_cache and len(self._index_cache) >= self.buffer_size and not self.use_cache:
187
+ if len(self._index_cache) >= self.buffer_size and not self.use_cache:
188
188
  self._unloadData(self._index_cache[0])
189
189
  self._loadData(x)
190
190
 
konfai/data/transform.py CHANGED
@@ -36,7 +36,7 @@ class TransformLoader:
36
36
  pass
37
37
 
38
38
  def getTransform(self, classpath : str, DL_args : str) -> Transform:
39
- module, name = _getModule(classpath, "data.transform")
39
+ module, name = _getModule(classpath, "konfai.data.transform")
40
40
  return config("{}.{}".format(DL_args, classpath))(getattr(importlib.import_module(module), name))(config = None)
41
41
 
42
42
  class Clip(Transform):
@@ -213,7 +213,7 @@ class Resample(Transform, ABC):
213
213
 
214
214
  class ResampleToResolution(Resample):
215
215
 
216
- def __init__(self, spacing : list[Union[float, None]] = [1., 1., 1.]) -> None:
216
+ def __init__(self, spacing : list[float] = [1., 1., 1.]) -> None:
217
217
  self.spacing = torch.tensor([0 if s < 0 else s for s in spacing])
218
218
 
219
219
  def transformShape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
@@ -244,34 +244,35 @@ class ResampleToResolution(Resample):
244
244
  cache_attribute["Size"] = np.asarray(size)
245
245
  return self._resample(input, size)
246
246
 
247
- class ResampleToSize(Resample):
247
+ class ResampleToShape(Resample):
248
248
 
249
- def __init__(self, size : list[int] = [100,512,512]) -> None:
250
- self.size = size
249
+ def __init__(self, shape : list[float] = [100,256,256]) -> None:
250
+ self.shape = torch.tensor([0 if s < 0 else s for s in shape])
251
251
 
252
252
  def transformShape(self, shape: list[int], cache_attribute: Attribute) -> list[int]:
253
253
  if "Spacing" not in cache_attribute:
254
254
  TransformError("Missing 'Spacing' in cache attributes, the data is likely not a valid image.",
255
255
  "Make sure your input is a image (e.g., .nii, .mha) with proper metadata.")
256
- if len(shape) != len(self.size):
256
+ if len(shape) != len(self.shape):
257
257
  TransformError("Shape and spacing dimensions do not match: shape={shape}, spacing={self.spacing}")
258
- size = self.size
259
- for i, s in enumerate(self.size):
260
- if s == -1:
261
- size[i] = shape[i]
262
- return size
258
+ new_shape = self.shape
259
+ for i, s in enumerate(self.shape):
260
+ if s == 0:
261
+ new_shape[i] = shape[i]
262
+ print(new_shape)
263
+ return new_shape
263
264
 
264
265
  def __call__(self, name: str, input: torch.Tensor, cache_attribute: Attribute) -> torch.Tensor:
265
- size = self.size
266
- image_size = np.asarray([int(x) for x in torch.tensor(input.shape[1:])])
267
- for i, s in enumerate(self.size):
268
- if s is None:
269
- size[i] = image_size[i]
266
+ shape = self.shape
267
+ image_shape = torch.tensor([int(x) for x in torch.tensor(input.shape[1:])])
268
+ for i, s in enumerate(self.shape):
269
+ if s == 0:
270
+ shape[i] = image_shape[i]
270
271
  if "Spacing" in cache_attribute:
271
- cache_attribute["Spacing"] = torch.flip(torch.tensor(image_size)/torch.tensor(size)*torch.flip(cache_attribute.get_tensor("Spacing"), dims=[0]), dims=[0])
272
- cache_attribute["Size"] = image_size
273
- cache_attribute["Size"] = size
274
- return self._resample(input, size)
272
+ cache_attribute["Spacing"] = torch.flip(image_shape/shape*torch.flip(cache_attribute.get_tensor("Spacing"), dims=[0]), dims=[0])
273
+ cache_attribute["Size"] = image_shape
274
+ cache_attribute["Size"] = shape
275
+ return self._resample(input, shape)
275
276
 
276
277
  class ResampleTransform(Transform):
277
278
 
konfai/evaluator.py CHANGED
@@ -27,7 +27,7 @@ class CriterionsLoader():
27
27
  def getCriterions(self, output_group : str, target_group : str) -> dict[torch.nn.Module, CriterionsAttr]:
28
28
  criterions = {}
29
29
  for module_classpath, criterionsAttr in self.criterionsLoader.items():
30
- module, name = _getModule(module_classpath, "metric.measure")
30
+ module, name = _getModule(module_classpath, "konfai.metric.measure")
31
31
  criterions[config("{}.metrics.{}.targetsCriterions.{}.criterionsLoader.{}".format(KONFAI_ROOT(), output_group, target_group, module_classpath))(getattr(importlib.import_module(module), name))(config = None)] = criterionsAttr
32
32
  return criterions
33
33
 
konfai/main.py CHANGED
@@ -9,8 +9,6 @@ import sys
9
9
  sys.path.insert(0, os.getcwd())
10
10
 
11
11
  def main():
12
- import tracemalloc
13
-
14
12
  parser = argparse.ArgumentParser(description="KonfAI", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
15
13
  try:
16
14
  with setup(parser) as distributedObject:
konfai/metric/measure.py CHANGED
@@ -250,7 +250,7 @@ class PerceptualLoss(Criterion):
250
250
  def getLoss(self) -> dict[torch.nn.Module, float]:
251
251
  result: dict[torch.nn.Module, float] = {}
252
252
  for loss, l in self.losses.items():
253
- module, name = _getModule(loss, "metric.measure")
253
+ module, name = _getModule(loss, "konfai.metric.measure")
254
254
  result[config(self.DL_args)(getattr(importlib.import_module(module), name))(config=None)] = l
255
255
  return result
256
256
 
@@ -458,13 +458,11 @@ class FID(Criterion):
458
458
  return features
459
459
 
460
460
  def calculate_fid(real_features: np.ndarray, generated_features: np.ndarray) -> float:
461
- # Calculate mean and covariance statistics
462
461
  mu1 = np.mean(real_features, axis=0)
463
462
  sigma1 = np.cov(real_features, rowvar=False)
464
463
  mu2 = np.mean(generated_features, axis=0)
465
464
  sigma2 = np.cov(generated_features, rowvar=False)
466
465
 
467
- # Calculate FID score
468
466
  diff = mu1 - mu2
469
467
  covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
470
468
  if np.iscomplexobj(covmean):
@@ -480,7 +478,6 @@ class FID(Criterion):
480
478
  generated_features = FID.get_features(generated_images, self.inception_model)
481
479
 
482
480
  return FID.calculate_fid(real_features, generated_features)
483
-
484
481
 
485
482
  class MutualInformationLoss(torch.nn.Module):
486
483
  def __init__(self, num_bins: int = 23, sigma_ratio: float = 0.5, smooth_nr: float = 1e-7, smooth_dr: float = 1e-7) -> None:
@@ -498,7 +495,6 @@ class MutualInformationLoss(torch.nn.Module):
498
495
  target_weight, target_probability = self.parzen_windowing_gaussian(target)
499
496
  return pred_weight, pred_probability, target_weight, target_probability
500
497
 
501
-
502
498
  def parzen_windowing_gaussian(self, img: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
503
499
  img = torch.clamp(img, 0, 1)
504
500
  img = img.reshape(img.shape[0], -1, 1) # (batch, num_sample, 1)
@@ -2,6 +2,7 @@ import torch
2
2
  import numpy as np
3
3
  from abc import abstractmethod
4
4
  from konfai.utils.config import config
5
+ from functools import partial
5
6
 
6
7
  class Scheduler():
7
8
 
@@ -46,4 +47,12 @@ class CosineAnnealing(Scheduler):
46
47
  self.T_max = T_max
47
48
 
48
49
  def get_value(self):
49
- return self.eta_min + (self.baseValue - self.eta_min) *(1 + np.cos(self.it * torch.pi / self.T_max)) / 2
50
+ return self.eta_min + (self.baseValue - self.eta_min) *(1 + np.cos(self.it * torch.pi / self.T_max)) / 2
51
+
52
+ class Warmup(torch.optim.lr_scheduler.LambdaLR):
53
+
54
+ def warmup(warmup_steps: int, step: int) -> float:
55
+ return min(1.0, step / warmup_steps)
56
+
57
+ def __init__(self, optimizer: torch.optim.Optimizer, warmup_steps: int = 10, last_epoch=-1, verbose="deprecated"):
58
+ super().__init__(optimizer, partial(Warmup.warmup, warmup_steps), last_epoch, verbose)
@@ -3,23 +3,23 @@ from typing import Union
3
3
  import torch
4
4
  from konfai.data.patching import ModelPatch
5
5
 
6
-
7
- class NestedUNetBlock(network.ModuleArgsDict):
6
+ class NestedUNet(network.Network):
8
7
 
9
- def __init__(self, channels: list[int], nb_conv_per_stage: int, blockConfig: blocks.BlockConfig, downSampleMode: blocks.DownSampleMode, upSampleMode: blocks.UpSampleMode, attention : bool, block: type, dim: int, i : int = 0) -> None:
10
- super().__init__()
11
- if i > 0:
12
- self.add_module(downSampleMode.name, blocks.downSample(in_channels=channels[0], out_channels=channels[1], downSampleMode=downSampleMode, dim=dim))
13
-
14
- self.add_module("X_{}_{}".format(i, 0), block(in_channels=channels[1 if downSampleMode == blocks.DownSampleMode.CONV_STRIDE and i > 0 else 0], out_channels=channels[1], blockConfigs=[blockConfig]*nb_conv_per_stage, dim=dim), out_branch=["X_{}_{}".format(i, 0)])
15
- if len(channels) > 2:
16
- self.add_module("UNetBlock_{}".format(i+1), NestedUNetBlock(channels[1:], nb_conv_per_stage, blockConfig, downSampleMode, upSampleMode, attention, block, dim, i+1), in_branch=["X_{}_{}".format(i, 0)], out_branch=["X_{}_{}".format(i+1, j) for j in range(len(channels)-2)])
17
- for j in range(len(channels)-2):
18
- self.add_module("X_{}_{}_{}".format(i, j+1, upSampleMode.name), blocks.upSample(in_channels=channels[2], out_channels=channels[1], upSampleMode=upSampleMode, dim=dim), in_branch=["X_{}_{}".format(i+1, j)], out_branch=["X_{}_{}".format(i+1, j)])
19
- self.add_module("SkipConnection_{}_{}".format(i, j+1), blocks.Concat(), in_branch=["X_{}_{}".format(i+1, j)]+["X_{}_{}".format(i, r) for r in range(j+1)], out_branch=["X_{}_{}".format(i, j+1)])
20
- self.add_module("X_{}_{}".format(i, j+1), block(in_channels=(channels[1]*(j+1)+channels[2]) if upSampleMode != blocks.UpSampleMode.CONV_TRANSPOSE else channels[1]*(j+2), out_channels=channels[1], blockConfigs=[blockConfig]*nb_conv_per_stage, dim=dim), in_branch=["X_{}_{}".format(i, j+1)], out_branch=["X_{}_{}".format(i, j+1)])
8
+ class NestedUNetBlock(network.ModuleArgsDict):
9
+
10
+ def __init__(self, channels: list[int], nb_conv_per_stage: int, blockConfig: blocks.BlockConfig, downSampleMode: blocks.DownSampleMode, upSampleMode: blocks.UpSampleMode, attention : bool, block: type, dim: int, i : int = 0) -> None:
11
+ super().__init__()
12
+ if i > 0:
13
+ self.add_module(downSampleMode.name, blocks.downSample(in_channels=channels[0], out_channels=channels[1], downSampleMode=downSampleMode, dim=dim))
14
+
15
+ self.add_module("X_{}_{}".format(i, 0), block(in_channels=channels[1 if downSampleMode == blocks.DownSampleMode.CONV_STRIDE and i > 0 else 0], out_channels=channels[1], blockConfigs=[blockConfig]*nb_conv_per_stage, dim=dim), out_branch=["X_{}_{}".format(i, 0)])
16
+ if len(channels) > 2:
17
+ self.add_module("UNetBlock_{}".format(i+1), NestedUNet.NestedUNetBlock(channels[1:], nb_conv_per_stage, blockConfig, downSampleMode, upSampleMode, attention, block, dim, i+1), in_branch=["X_{}_{}".format(i, 0)], out_branch=["X_{}_{}".format(i+1, j) for j in range(len(channels)-2)])
18
+ for j in range(len(channels)-2):
19
+ self.add_module("X_{}_{}_{}".format(i, j+1, upSampleMode.name), blocks.upSample(in_channels=channels[2], out_channels=channels[1], upSampleMode=upSampleMode, dim=dim), in_branch=["X_{}_{}".format(i+1, j)], out_branch=["X_{}_{}".format(i+1, j)])
20
+ self.add_module("SkipConnection_{}_{}".format(i, j+1), blocks.Concat(), in_branch=["X_{}_{}".format(i+1, j)]+["X_{}_{}".format(i, r) for r in range(j+1)], out_branch=["X_{}_{}".format(i, j+1)])
21
+ self.add_module("X_{}_{}".format(i, j+1), block(in_channels=(channels[1]*(j+1)+channels[2]) if upSampleMode != blocks.UpSampleMode.CONV_TRANSPOSE else channels[1]*(j+2), out_channels=channels[1], blockConfigs=[blockConfig]*nb_conv_per_stage, dim=dim), in_branch=["X_{}_{}".format(i, j+1)], out_branch=["X_{}_{}".format(i, j+1)])
21
22
 
22
- class NestedUNet(network.Network):
23
23
 
24
24
  class NestedUNetHead(network.ModuleArgsDict):
25
25
 
@@ -49,58 +49,55 @@ class NestedUNet(network.Network):
49
49
  activation: str = "Softmax") -> None:
50
50
  super().__init__(in_channels = channels[0], optimizer = optimizer, schedulers = schedulers, outputsCriterions = outputsCriterions, patch=patch, dim = dim)
51
51
 
52
- self.add_module("UNetBlock_0", NestedUNetBlock(channels, nb_conv_per_stage, blockConfig, downSampleMode=blocks.DownSampleMode._member_map_[downSampleMode], upSampleMode=blocks.UpSampleMode._member_map_[upSampleMode], attention=attention, block = blocks.ConvBlock if blockType == "Conv" else blocks.ResBlock, dim=dim), out_branch=["X_0_{}".format(j+1) for j in range(len(channels)-2)])
52
+ self.add_module("UNetBlock_0", NestedUNet.NestedUNetBlock(channels, nb_conv_per_stage, blockConfig, downSampleMode=blocks.DownSampleMode._member_map_[downSampleMode], upSampleMode=blocks.UpSampleMode._member_map_[upSampleMode], attention=attention, block = blocks.ConvBlock if blockType == "Conv" else blocks.ResBlock, dim=dim), out_branch=["X_0_{}".format(j+1) for j in range(len(channels)-2)])
53
53
  for j in range(len(channels)-2):
54
54
  self.add_module("Head_{}".format(j), NestedUNet.NestedUNetHead(in_channels=channels[1], nb_class=nb_class, activation=activation, dim=dim), in_branch=["X_0_{}".format(j+1)], out_branch=[-1])
55
55
 
56
56
 
57
+ class UNetpp(network.Network):
57
58
 
58
-
59
- class ResNetEncoderLayer(network.ModuleArgsDict):
60
-
61
- def __init__(self, in_channel: int, out_channel: int, nb_block: int, dim: int, downSampleMode : blocks.DownSampleMode):
62
- super().__init__()
63
- for i in range(nb_block):
64
- if downSampleMode == blocks.DownSampleMode.MAXPOOL and i == 0:
65
- self.add_module("DownSample", blocks.getTorchModule("MaxPool", dim)(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False))
66
- self.add_module("ResBlock_{}".format(i), blocks.ResBlock(in_channel, out_channel, [blocks.BlockConfig(3, 2 if downSampleMode == blocks.DownSampleMode.CONV_STRIDE and i == 0 else 1, 1, False, "ReLU;True", blocks.NormMode.BATCH), blocks.BlockConfig(3, 1, 1, False, None, blocks.NormMode.BATCH)], dim=dim))
67
- in_channel = out_channel
68
-
69
- def resNetEncoder(channels: list[int], layers: list[int], dim: int) -> list[torch.nn.Module]:
70
- modules = []
71
- modules.append(blocks.ConvBlock(channels[0], channels[1], [blocks.BlockConfig(7, 2, 3, False, "ReLU", blocks.NormMode.BATCH)], dim=dim))
72
- for i, (in_channel, out_channel, layer) in enumerate(zip(channels[1:], channels[2:], layers)):
73
- modules.append(ResNetEncoderLayer(in_channel, out_channel, layer, dim, blocks.DownSampleMode.MAXPOOL if i==0 else blocks.DownSampleMode.CONV_STRIDE))
74
- return modules
75
-
76
- class NestedUNetBlock(network.ModuleArgsDict):
59
+ class ResNetEncoderLayer(network.ModuleArgsDict):
77
60
 
78
- def __init__(self, encoder_channels: list[int], decoder_channels: list[int], encoders: list[torch.nn.Module], upSampleMode: blocks.UpSampleMode, dim: int, i : int = 0) -> None:
79
- super().__init__()
80
- self.add_module("X_{}_{}".format(i, 0), encoders[0], out_branch=["X_{}_{}".format(i, 0)])
81
- if len(encoder_channels) > 2:
82
- self.add_module("UNetBlock_{}".format(i+1), NestedUNetBlock(encoder_channels[1:], decoder_channels[1:], encoders[1:], upSampleMode, dim, i+1), in_branch=["X_{}_{}".format(i, 0)], out_branch=["X_{}_{}".format(i+1, j) for j in range(len(encoder_channels)-2)])
83
- for j in range(len(encoder_channels)-2):
84
- self.add_module("X_{}_{}_{}".format(i, j+1, upSampleMode.name), blocks.upSample(in_channels=encoder_channels[2], out_channels=decoder_channels[2] if j == len(encoder_channels)-3 else encoder_channels[1], upSampleMode=upSampleMode, dim=dim), in_branch=["X_{}_{}".format(i+1, j)], out_branch=["X_{}_{}".format(i+1, j)])
85
- self.add_module("SkipConnection_{}_{}".format(i, j+1), blocks.Concat(), in_branch=["X_{}_{}".format(i+1, j)]+["X_{}_{}".format(i, r) for r in range(j+1)], out_branch=["X_{}_{}".format(i, j+1)])
86
- self.add_module("X_{}_{}".format(i, j+1), blocks.ConvBlock(in_channels=encoder_channels[1]*j+((encoder_channels[1]+encoder_channels[2]) if upSampleMode != blocks.UpSampleMode.CONV_TRANSPOSE else (decoder_channels[1]+decoder_channels[2] if j == len(encoder_channels)-3 else encoder_channels[1]+encoder_channels[1])), out_channels=decoder_channels[2] if j == len(encoder_channels)-3 else encoder_channels[1], blockConfigs=[blocks.BlockConfig(3,1,1,False, "ReLU;True", blocks.NormMode.BATCH)]*2, dim=dim), in_branch=["X_{}_{}".format(i, j+1)], out_branch=["X_{}_{}".format(i, j+1)])
61
+ def __init__(self, in_channel: int, out_channel: int, nb_block: int, dim: int, downSampleMode : blocks.DownSampleMode):
62
+ super().__init__()
63
+ for i in range(nb_block):
64
+ if downSampleMode == blocks.DownSampleMode.MAXPOOL and i == 0:
65
+ self.add_module("DownSample", blocks.getTorchModule("MaxPool", dim)(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False))
66
+ self.add_module("ResBlock_{}".format(i), blocks.ResBlock(in_channel, out_channel, [blocks.BlockConfig(3, 2 if downSampleMode == blocks.DownSampleMode.CONV_STRIDE and i == 0 else 1, 1, False, "ReLU;True", blocks.NormMode.BATCH), blocks.BlockConfig(3, 1, 1, False, None, blocks.NormMode.BATCH)], dim=dim))
67
+ in_channel = out_channel
68
+
69
+ def resNetEncoder(channels: list[int], layers: list[int], dim: int) -> list[torch.nn.Module]:
70
+ modules = []
71
+ modules.append(blocks.ConvBlock(channels[0], channels[1], [blocks.BlockConfig(7, 2, 3, False, "ReLU", blocks.NormMode.BATCH)], dim=dim))
72
+ for i, (in_channel, out_channel, layer) in enumerate(zip(channels[1:], channels[2:], layers)):
73
+ modules.append(UNetpp.ResNetEncoderLayer(in_channel, out_channel, layer, dim, blocks.DownSampleMode.MAXPOOL if i==0 else blocks.DownSampleMode.CONV_STRIDE))
74
+ return modules
87
75
 
88
- class NestedUNetHead(network.ModuleArgsDict):
76
+ class UNetPPBlock(network.ModuleArgsDict):
89
77
 
90
- def __init__(self, in_channels: int, out_channels: int, nb_class: int, dim: int) -> None:
91
- super().__init__()
92
- self.add_module("Upsample", blocks.upSample(in_channels=in_channels, out_channels=out_channels, upSampleMode=blocks.UpSampleMode.UPSAMPLE_BILINEAR, dim=dim))
93
- self.add_module("ConvBlock", blocks.ConvBlock(in_channels=in_channels, out_channels=out_channels, blockConfigs=[blocks.BlockConfig(3,1,1,False, "ReLU;True", blocks.NormMode.BATCH)]*2, dim=dim))
94
- self.add_module("Conv", blocks.getTorchModule("Conv", dim)(in_channels = out_channels, out_channels = nb_class, kernel_size = 3, stride = 1, padding = 1))
95
- if nb_class > 1:
96
- self.add_module("Softmax", torch.nn.Softmax(dim=1))
97
- self.add_module("Argmax", blocks.ArgMax(dim=1))
98
- else:
99
- self.add_module("Tanh", torch.nn.Tanh())
100
-
101
- class UNetpp(network.Network):
78
+ def __init__(self, encoder_channels: list[int], decoder_channels: list[int], encoders: list[torch.nn.Module], upSampleMode: blocks.UpSampleMode, dim: int, i : int = 0) -> None:
79
+ super().__init__()
80
+ self.add_module("X_{}_{}".format(i, 0), encoders[0], out_branch=["X_{}_{}".format(i, 0)])
81
+ if len(encoder_channels) > 2:
82
+ self.add_module("UNetBlock_{}".format(i+1), UNetpp.UNetPPBlock(encoder_channels[1:], decoder_channels[1:], encoders[1:], upSampleMode, dim, i+1), in_branch=["X_{}_{}".format(i, 0)], out_branch=["X_{}_{}".format(i+1, j) for j in range(len(encoder_channels)-2)])
83
+ for j in range(len(encoder_channels)-2):
84
+ self.add_module("X_{}_{}_{}".format(i, j+1, upSampleMode.name), blocks.upSample(in_channels=encoder_channels[2], out_channels=decoder_channels[2] if j == len(encoder_channels)-3 else encoder_channels[1], upSampleMode=upSampleMode, dim=dim), in_branch=["X_{}_{}".format(i+1, j)], out_branch=["X_{}_{}".format(i+1, j)])
85
+ self.add_module("SkipConnection_{}_{}".format(i, j+1), blocks.Concat(), in_branch=["X_{}_{}".format(i+1, j)]+["X_{}_{}".format(i, r) for r in range(j+1)], out_branch=["X_{}_{}".format(i, j+1)])
86
+ self.add_module("X_{}_{}".format(i, j+1), blocks.ConvBlock(in_channels=encoder_channels[1]*j+((encoder_channels[1]+encoder_channels[2]) if upSampleMode != blocks.UpSampleMode.CONV_TRANSPOSE else (decoder_channels[1]+decoder_channels[2] if j == len(encoder_channels)-3 else encoder_channels[1]+encoder_channels[1])), out_channels=decoder_channels[2] if j == len(encoder_channels)-3 else encoder_channels[1], blockConfigs=[blocks.BlockConfig(3,1,1,False, "ReLU;True", blocks.NormMode.BATCH)]*2, dim=dim), in_branch=["X_{}_{}".format(i, j+1)], out_branch=["X_{}_{}".format(i, j+1)])
102
87
 
88
+ class UNetPPHead(network.ModuleArgsDict):
103
89
 
90
+ def __init__(self, in_channels: int, out_channels: int, nb_class: int, dim: int) -> None:
91
+ super().__init__()
92
+ self.add_module("Upsample", blocks.upSample(in_channels=in_channels, out_channels=out_channels, upSampleMode=blocks.UpSampleMode.UPSAMPLE_BILINEAR, dim=dim))
93
+ self.add_module("ConvBlock", blocks.ConvBlock(in_channels=in_channels, out_channels=out_channels, blockConfigs=[blocks.BlockConfig(3,1,1,False, "ReLU;True", blocks.NormMode.BATCH)]*2, dim=dim))
94
+ self.add_module("Conv", blocks.getTorchModule("Conv", dim)(in_channels = out_channels, out_channels = nb_class, kernel_size = 3, stride = 1, padding = 1))
95
+ if nb_class > 1:
96
+ self.add_module("Softmax", torch.nn.Softmax(dim=1))
97
+ self.add_module("Argmax", blocks.ArgMax(dim=1))
98
+ else:
99
+ self.add_module("Tanh", torch.nn.Tanh())
100
+
104
101
  def __init__( self,
105
102
  optimizer : network.OptimizerLoader = network.OptimizerLoader(),
106
103
  schedulers : network.LRSchedulersLoader = network.LRSchedulersLoader(),
@@ -111,5 +108,6 @@ class UNetpp(network.Network):
111
108
  layers: list[int] = [3,4,6,3],
112
109
  dim : int = 2) -> None:
113
110
  super().__init__(in_channels = encoder_channels[0], optimizer = optimizer, schedulers = schedulers, outputsCriterions = outputsCriterions, patch=patch, dim = dim)
114
- self.add_module("Block_0", NestedUNetBlock(encoder_channels, decoder_channels[::-1], resNetEncoder(encoder_channels, layers, dim), blocks.UpSampleMode.UPSAMPLE_BILINEAR, dim=dim), out_branch=["X_0_{}".format(j+1) for j in range(len(encoder_channels)-2)])
115
- self.add_module("Head", NestedUNetHead(in_channels=decoder_channels[-3], out_channels=decoder_channels[-2], nb_class=decoder_channels[-1], dim=dim), in_branch=["X_0_{}".format(len(encoder_channels)-2)], out_branch=[-1])
111
+ self.add_module("Block_0", UNetpp.UNetPPBlock(encoder_channels, decoder_channels[::-1], UNetpp.resNetEncoder(encoder_channels, layers, dim), blocks.UpSampleMode.UPSAMPLE_BILINEAR, dim=dim), out_branch=["X_0_{}".format(j+1) for j in range(len(encoder_channels)-2)])
112
+ self.add_module("Head", UNetpp.UNetPPHead(in_channels=decoder_channels[-3], out_channels=decoder_channels[-2], nb_class=decoder_channels[-1], dim=dim), in_branch=["X_0_{}".format(len(encoder_channels)-2)], out_branch=[-1])
113
+
@@ -12,7 +12,6 @@ class UNetHead(network.ModuleArgsDict):
12
12
  self.add_module("Conv", blocks.getTorchModule("Conv", dim)(in_channels = in_channels, out_channels = nb_class, kernel_size = 1, stride = 1, padding = 0))
13
13
  self.add_module("Softmax", torch.nn.Softmax(dim=1))
14
14
  self.add_module("Argmax", blocks.ArgMax(dim=1))
15
-
16
15
 
17
16
  class UNetBlock(network.ModuleArgsDict):
18
17
 
konfai/network/network.py CHANGED
@@ -16,7 +16,7 @@ from enum import Enum
16
16
  from konfai import KONFAI_ROOT
17
17
  from konfai.metric.schedulers import Scheduler
18
18
  from konfai.utils.config import config
19
- from konfai.utils.utils import State, _getModule, getDevice, getGPUMemory, MeasureError
19
+ from konfai.utils.utils import State, _getModule, getDevice, getGPUMemory, MeasureError, TrainerError
20
20
  from konfai.data.patching import Accumulator, ModelPatch
21
21
 
22
22
  class NetState(Enum):
@@ -55,9 +55,16 @@ class LRSchedulersLoader():
55
55
 
56
56
  def getschedulers(self, key: str, optimizer: torch.optim.Optimizer) -> dict[torch.optim.lr_scheduler._LRScheduler, int]:
57
57
  schedulers : dict[torch.optim.lr_scheduler._LRScheduler, int] = {}
58
- for name, step in self.params.items():
59
- if name:
60
- schedulers[config("Trainer.Model.{}.Schedulers.{}".format(key, name))(getattr(importlib.import_module('torch.optim.lr_scheduler'), name))(optimizer, config = None)] = step.nb_step
58
+ for nameTmp, step in self.params.items():
59
+ if nameTmp:
60
+ ok = False
61
+ for m in ["torch.optim.lr_scheduler", "konfai.metric.schedulers"]:
62
+ module, name = _getModule(nameTmp, m)
63
+ if hasattr(importlib.import_module(module), name):
64
+ schedulers[config("{}.Model.{}.Schedulers.{}".format(KONFAI_ROOT(), key, name))(getattr(importlib.import_module(module), name))(optimizer, config = None)] = step.nb_step
65
+ ok = True
66
+ if not ok:
67
+ raise TrainerError("Unknown scheduler {}, tried importing from: 'torch.optim.lr_scheduler' and 'konfai.metric.schedulers', but no valid match was found. Check your YAML config or scheduler name spelling.".format(nameTmp))
61
68
  return schedulers
62
69
 
63
70
  class SchedulersLoader():
@@ -95,7 +102,7 @@ class CriterionsLoader():
95
102
  def getCriterions(self, model_classname : str, output_group : str, target_group : str) -> dict[torch.nn.Module, CriterionsAttr]:
96
103
  criterions = {}
97
104
  for module_classpath, criterionsAttr in self.criterionsLoader.items():
98
- module, name = _getModule(module_classpath, "metric.measure")
105
+ module, name = _getModule(module_classpath, "konfai.metric.measure")
99
106
  criterionsAttr.isTorchCriterion = module.startswith("torch")
100
107
  criterionsAttr.sheduler = criterionsAttr.l.getschedulers("{}.Model.{}.outputsCriterions.{}.targetsCriterions.{}.criterionsLoader.{}".format(KONFAI_ROOT(), model_classname, output_group, target_group, module_classpath))
101
108
  criterions[config("{}.Model.{}.outputsCriterions.{}.targetsCriterions.{}.criterionsLoader.{}".format(KONFAI_ROOT(), model_classname, output_group, target_group, module_classpath))(getattr(importlib.import_module(module), name))(config = None)] = criterionsAttr
@@ -561,6 +568,7 @@ class Network(ModuleArgsDict, ABC):
561
568
  self.init_gain = init_gain
562
569
  self.dim = dim
563
570
  self._it = 0
571
+ self._nb_lr_update = 0
564
572
  self.outputsGroup : list[OutputsGroup]= []
565
573
 
566
574
  @_function_network()
@@ -655,9 +663,15 @@ class Network(ModuleArgsDict, ABC):
655
663
  else:
656
664
  model_state_dict[alias] = model_state_dict_tmp[alias]
657
665
  self.load_state_dict(model_state_dict)
658
-
659
- if "{}_optimizer_state_dict".format(name) in state_dict and self.optimizer:
660
- self.optimizer.load_state_dict(state_dict['{}_optimizer_state_dict'.format(name)])
666
+ if "{}_optimizer_state_dict".format(self.getName()) in state_dict and self.optimizer:
667
+ self.optimizer.load_state_dict(state_dict['{}_optimizer_state_dict'.format(self.getName())])
668
+ if "{}_it".format(self.getName()) in state_dict:
669
+ self._it = int(state_dict["{}_it".format(self.getName())])
670
+ if "{}_nb_lr_update".format(self.getName()) in state_dict:
671
+ self._nb_lr_update = int(state_dict["{}_nb_lr_update".format(self.getName())])
672
+ if self.schedulers:
673
+ for scheduler in self.schedulers:
674
+ scheduler.last_epoch = self._nb_lr_update
661
675
  self.initialized()
662
676
 
663
677
  def _compute_channels_trace(self, module : ModuleArgsDict, in_channels : int, gradient_checkpoints: Union[list[str], None], gpu_checkpoints: Union[list[str], None], name: Union[str, None] = None, in_is_channel = True, out_channels : Union[int, None] = None, out_is_channel = True) -> tuple[int, bool, int, bool]:
@@ -886,11 +900,12 @@ class Network(ModuleArgsDict, ABC):
886
900
 
887
901
  @_function_network()
888
902
  def update_lr(self):
903
+ self._nb_lr_update+=1
889
904
  step = 0
890
905
  scheduler = None
891
906
  if self.schedulers:
892
907
  for scheduler, value in self.schedulers.items():
893
- if value is None or (self._it >= step and self._it < step+value):
908
+ if value is None or (self._nb_lr_update >= step and self._nb_lr_update < step+value):
894
909
  break
895
910
  step += value
896
911
  if scheduler:
@@ -898,7 +913,7 @@ class Network(ModuleArgsDict, ABC):
898
913
  if self.measure:
899
914
  scheduler.step(sum(self.measure.getLastValues(0).values()))
900
915
  else:
901
- scheduler.step()
916
+ scheduler.step()
902
917
 
903
918
  @_function_network()
904
919
  def getNetworks(self) -> Self:
@@ -916,6 +931,12 @@ class Network(ModuleArgsDict, ABC):
916
931
  v = Network.to(v, int(os.environ["device"]))
917
932
  else:
918
933
  v = v.to(getDevice(int(os.environ["device"])))
934
+ if isinstance(module, Network):
935
+ if module.optimizer is not None:
936
+ for state in module.optimizer.state.values():
937
+ for k, v in state.items():
938
+ if isinstance(v, torch.Tensor):
939
+ state[k] = v.to(getDevice(int(os.environ["device"])))
919
940
  return module
920
941
 
921
942
  def getName(self) -> str:
@@ -944,7 +965,7 @@ class ModelLoader():
944
965
 
945
966
  @config("Model")
946
967
  def __init__(self, classpath : str = "default:segmentation.UNet.UNet") -> None:
947
- self.module, self.name = _getModule(classpath, "models")
968
+ self.module, self.name = _getModule(classpath, "konfai.models")
948
969
 
949
970
  def getModel(self, train : bool = True, DL_args: Union[str, None] = None, DL_without=["optimizer", "schedulers", "nb_batch_per_step", "init_type", "init_gain"]) -> Network:
950
971
  if not DL_args:
konfai/predictor.py CHANGED
@@ -57,7 +57,7 @@ class OutDataset(Dataset, NeedDevice, ABC):
57
57
  transform_type.append(transform)
58
58
 
59
59
  if self._patchCombine is not None:
60
- module, name = _getModule(self._patchCombine, "data.patching")
60
+ module, name = _getModule(self._patchCombine, "konfai.data.patching")
61
61
  self.patchCombine = getattr(importlib.import_module(module), name)(config = None, DL_args = "{}.outsDataset.{}.OutDataset".format(KONFAI_ROOT(), name_layer))
62
62
 
63
63
  def setPatchConfig(self, patchSize: Union[list[int], None], overlap: Union[int, None], nb_data_augmentation: int) -> None:
@@ -96,17 +96,21 @@ class Reduction():
96
96
  def __init__(self):
97
97
  pass
98
98
 
99
- class ReductionMean():
99
+ class Mean(Reduction):
100
100
 
101
101
  def __init__(self):
102
102
  pass
103
-
104
- class ReductionMedian():
103
+
104
+ def __call__(self, result: torch.Tensor) -> torch.Tensor:
105
+ return torch.mean(result.float(), dim=0)
106
+
107
+ class Median(Reduction):
105
108
 
106
109
  def __init__(self):
107
110
  pass
108
111
 
109
-
112
+ def __call__(self, result: torch.Tensor) -> torch.Tensor:
113
+ return torch.median(result.float(), dim=0).values
110
114
 
111
115
  class OutSameAsGroupDataset(OutDataset):
112
116
 
@@ -114,7 +118,7 @@ class OutSameAsGroupDataset(OutDataset):
114
118
  def __init__(self, dataset_filename: str = "./Dataset:mha", group: str = "default", sameAsGroup: str = "default", pre_transforms : dict[str, TransformLoader] = {"default:Normalize": TransformLoader()}, post_transforms : dict[str, TransformLoader] = {"default:Normalize": TransformLoader()}, final_transforms : dict[str, TransformLoader] = {"default:Normalize": TransformLoader()}, patchCombine: Union[str, None] = None, reduction: str = "mean", inverse_transform: bool = True) -> None:
115
119
  super().__init__(dataset_filename, group, pre_transforms, post_transforms, final_transforms, patchCombine)
116
120
  self.group_src, self.group_dest = sameAsGroup.split(":")
117
- self.reduction = reduction
121
+ self.reduction_classpath = reduction
118
122
  self.inverse_transform = inverse_transform
119
123
 
120
124
  def addLayer(self, index_dataset: int, index_augmentation: int, index_patch: int, layer: torch.Tensor, dataset: DatasetIter):
@@ -142,7 +146,9 @@ class OutSameAsGroupDataset(OutDataset):
142
146
 
143
147
  def load(self, name_layer: str, datasets: list[Dataset], groups: dict[str, str]):
144
148
  super().load(name_layer, datasets, groups)
145
-
149
+ module, name = _getModule(self.reduction_classpath, "konfai.predictor")
150
+ self.reduction = config("{}.outsDataset.{}.OutDataset.{}".format(KONFAI_ROOT(), name_layer, self.reduction_classpath))(getattr(importlib.import_module(module), name))(config = None)
151
+
146
152
  if self.group_src not in groups.keys():
147
153
  raise PredictorError(
148
154
  f"Source group '{self.group_src}' not found. Available groups: {list(groups.keys())}."
@@ -179,14 +185,8 @@ class OutSameAsGroupDataset(OutDataset):
179
185
  result = torch.cat([self._getOutput(index, index_augmentation, dataset).unsqueeze(0) for index_augmentation in self.output_layer_accumulator[index].keys()], dim=0)
180
186
  name = self.names[index]
181
187
  self.output_layer_accumulator.pop(index)
182
- dtype = result.dtype
188
+ result = self.reduction(result.float()).to(result.dtype)
183
189
 
184
- if self.reduction == "mean":
185
- result = torch.mean(result.float(), dim=0).to(dtype)
186
- elif self.reduction == "median":
187
- result, _ = torch.median(result.float(), dim=0)
188
- else:
189
- raise NameError("Reduction method does not exist (mean, median)")
190
190
  for transform in self.final_transforms:
191
191
  result = transform(name, result, self.attributes[index][0][0])
192
192
  return result
@@ -307,9 +307,9 @@ class _Predictor():
307
307
 
308
308
  class ModelComposite(Network):
309
309
 
310
- def __init__(self, model: Network, nb_models: int, method: str):
310
+ def __init__(self, model: Network, nb_models: int, combine: Reduction):
311
311
  super().__init__(model.in_channels, model.optimizer, model.schedulers, model.outputsCriterionsLoader, model.patch, model.nb_batch_per_step, model.init_type, model.init_gain, model.dim)
312
- self.method = method
312
+ self.combine = combine
313
313
  for i in range(nb_models):
314
314
  self.add_module("Model_{}".format(i), copy.deepcopy(model), in_branch=[0], out_branch=["output_{}".format(i)])
315
315
 
@@ -330,12 +330,7 @@ class ModelComposite(Network):
330
330
 
331
331
  final_outputs = []
332
332
  for key, tensors in aggregated.items():
333
- stacked = torch.stack(tensors, dim=0)
334
- if self.method == 'mean':
335
- agg = torch.mean(stacked, dim=0)
336
- elif self.method == 'median':
337
- agg = torch.median(stacked, dim=0).values
338
- final_outputs.append((key, agg))
333
+ final_outputs.append((key, self.combine(torch.stack(tensors, dim=0))))
339
334
 
340
335
  return final_outputs
341
336
 
@@ -358,7 +353,7 @@ class Predictor(DistributedObject):
358
353
  super().__init__(train_name)
359
354
  self.manual_seed = manual_seed
360
355
  self.dataset = dataset
361
- self.combine = combine
356
+ self.combine_classpath = combine
362
357
  self.autocast = autocast
363
358
 
364
359
  self.model = model.getModel(train=False)
@@ -430,7 +425,10 @@ class Predictor(DistributedObject):
430
425
  "Available modules: {}".format(modules),
431
426
  "Please check that the name matches exactly a submodule or output of your model architecture."
432
427
  )
433
- self.modelComposite = ModelComposite(self.model, len(MODEL().split(":")), self.combine)
428
+ module, name = _getModule(self.combine_classpath, "konfai.predictor")
429
+ combine = config("{}.{}".format(KONFAI_ROOT(), self.combine_classpath))(getattr(importlib.import_module(module), name))(config = None)
430
+
431
+ self.modelComposite = ModelComposite(self.model, len(MODEL().split(":")), combine)
434
432
  self.modelComposite.load(self._load())
435
433
 
436
434
  if len(list(self.outsDataset.keys())) == 0 and len([network for network in self.modelComposite.getNetworks().values() if network.measure is not None]) == 0:
konfai/trainer.py CHANGED
@@ -108,9 +108,7 @@ class _Trainer():
108
108
  if data_log is not None:
109
109
  for data in data_log:
110
110
  self.data_log[data.split("/")[0].replace(":", ".")] = (DataLog.__getitem__(data.split("/")[1]).value[0], int(data.split("/")[2]))
111
-
112
-
113
-
111
+
114
112
  def __enter__(self):
115
113
  return self
116
114
 
@@ -212,6 +210,9 @@ class _Trainer():
212
210
  save_dict["Model_EMA"] = self.modelEMA.module.state_dict()
213
211
 
214
212
  save_dict.update({'{}_optimizer_state_dict'.format(name): network.optimizer.state_dict() for name, network in self.model.module.getNetworks().items() if network.optimizer is not None})
213
+ save_dict.update({'{}_it'.format(name): network._it for name, network in self.model.module.getNetworks().items() if network.optimizer is not None})
214
+ save_dict.update({'{}_nb_lr_update'.format(name): network._nb_lr_update for name, network in self.model.module.getNetworks().items() if network.optimizer is not None})
215
+
215
216
  torch.save(save_dict, save_path)
216
217
 
217
218
  if self.save_checkpoint_mode == "BEST":
@@ -356,7 +357,7 @@ class Trainer(DistributedObject):
356
357
  name = sorted(os.listdir(path))[-1]
357
358
 
358
359
  if os.path.exists(path+name):
359
- state_dict = torch.load(path+name, weights_only=False)
360
+ state_dict = torch.load(path+name, weights_only=False, map_location="cpu")
360
361
  else:
361
362
  raise Exception("Model : {} does not exist !".format(self.name))
362
363
 
konfai/utils/utils.py CHANGED
@@ -37,7 +37,7 @@ def _getModule(classpath : str, type : str) -> tuple[str, str]:
37
37
  module = ".".join(classpath.split(":")[:-1])
38
38
  name = classpath.split(":")[-1]
39
39
  else:
40
- module = "konfai."+type+("." if len(classpath.split(".")) > 2 else "")+".".join(classpath.split(".")[:-1])
40
+ module = type+("." if len(classpath.split(".")) > 2 else "")+".".join(classpath.split(".")[:-1])
41
41
  name = classpath.split(".")[-1]
42
42
  return module, name
43
43
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: konfai
3
- Version: 1.1.5
3
+ Version: 1.1.6
4
4
  Summary: Modular and configurable Deep Learning framework with YAML and PyTorch
5
5
  Author-email: Valentin Boussot <boussot.v@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -1,16 +1,16 @@
1
1
  konfai/__init__.py,sha256=YXG-wpSEXWs6Jt3BDI77V4r89gEUNX-6lxW9btj5VYI,851
2
- konfai/evaluator.py,sha256=G8tXI-g8yTxBZXmESpRW7B5UEI4SMXq47fsGksU0G0M,8394
3
- konfai/main.py,sha256=xhCIs3VKXxNCgyyN616K-VANEFCsZZRR-l8LIMHlmPw,2597
4
- konfai/predictor.py,sha256=bNkNBa_9pvbAKt8xrmAdfUnnPIerG0_NC1zOCCvSdGk,23973
5
- konfai/trainer.py,sha256=mxzBDHMQR7bvpoW8WHpfAzzqFnxIApkDx5raf7GwTxw,20295
2
+ konfai/evaluator.py,sha256=WM78NGydV0iqKElahr-WpOCZLEJXHjjPq-LS-gi23rk,8401
3
+ konfai/main.py,sha256=kr7Iie_f67NF6G3dAAj9G6Z9dhn9RzbdLYpzy2WvIh8,2573
4
+ konfai/predictor.py,sha256=kqAF3P4BAOZvjHtBAfv2M4-yonqAOcnIV7APgxdRsww,24283
5
+ konfai/trainer.py,sha256=Edi8l8OOfCewYM-Cd5C5rCqCaprvlfxzohd4iLkK5u0,20632
6
6
  konfai/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- konfai/data/augmentation.py,sha256=ruvS7ykFveOZBe2Mv_TOU5a8AzUgH7gfTipwqYgRM7k,32293
8
- konfai/data/data_manager.py,sha256=8Te3R39K64ucy3_mWH7AbIurNrmFRwRUkUBY0OBsss4,30107
7
+ konfai/data/augmentation.py,sha256=SD8m0uLz5aQYtjAjns0H04WgrDWSN6nd7lWlNnT2fi8,32300
8
+ konfai/data/data_manager.py,sha256=eYgFJjHr9HEpRLTtLabUZLPH4c82OQCiGuYQ84ktWCE,30076
9
9
  konfai/data/patching.py,sha256=zs3T4yTV8_iCFrqO21bo6GhwTywoTxIL31IAi-jiJDQ,15478
10
- konfai/data/transform.py,sha256=ZZoxft0V8t-DaKjF3OsIFezz41B8_xwPqBvYCYwF0dk,26471
10
+ konfai/data/transform.py,sha256=LfdsNUE5_6mIGrVzI6B2N2WO-jaNzFmuMxhP1Y0Fbwk,26538
11
11
  konfai/metric/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- konfai/metric/measure.py,sha256=PwNbR9Jb07gNmTtvL93MhM57LzUWrrZ-78bhCWcGSP0,22881
13
- konfai/metric/schedulers.py,sha256=UoSr1TW_hrus3DhvOEbDefxCSUGz7lJS_8vbz0GEye8,1370
12
+ konfai/metric/measure.py,sha256=j-rEsNgVMPjYFWtahYEaiMSDnyD5Dv8edx5KEt2Rj2U,22797
13
+ konfai/metric/schedulers.py,sha256=eZIlJMOQH8AV48F5hiRFbmONmoY4hHMujvS3qgFYXbw,1771
14
14
  konfai/models/classification/convNeXt.py,sha256=Phj1hO8TCItVhBXoFXQcIkA85DZxurGLyHIPWBfXJ0Y,9243
15
15
  konfai/models/classification/resnet.py,sha256=t8KJEgGudWBpGJM9YS1lH_5eX0-wGbK7ZW5uZW214eM,7958
16
16
  konfai/models/generation/cStyleGan.py,sha256=PwaVQX9h5u_8YbbxA7sz7inzIEoMadinuo0kFFNqIn0,8042
@@ -20,20 +20,20 @@ konfai/models/generation/gan.py,sha256=-GoKxHm3W9NdD4U77UcJrG5TfOZ3NWFUZG663kt2X
20
20
  konfai/models/generation/vae.py,sha256=_3JYVT2ojZ0P98tYcD2ny7a-gWVUmnByLDhY7i-n_4g,4719
21
21
  konfai/models/registration/registration.py,sha256=18EiWt4RJIXLyFtqU-kHjV1sMnQRm9mxAA6_-2B1YqI,6313
22
22
  konfai/models/representation/representation.py,sha256=RwQYoxtdph440-t_ZLelykl0hkUAD1zdspQaLkgxb-0,2677
23
- konfai/models/segmentation/NestedUNet.py,sha256=kF8uXmWdg4MOLTwzY4Zqq1FgWdwzT1I8mLAqi-XnuH8,9958
24
- konfai/models/segmentation/UNet.py,sha256=Uei51-_0Le24ZJs9tbuL6hQn66kigkfbaLSMlH38els,4045
23
+ konfai/models/segmentation/NestedUNet.py,sha256=D2qChm5SSYqV7FayCe9eT260vxckxRfBOCS1US6xcnw,10184
24
+ konfai/models/segmentation/UNet.py,sha256=TKPmhHEnlOYfnUF2Qof3VSETl5nEIh9h0SkblBRdbbg,4036
25
25
  konfai/network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  konfai/network/blocks.py,sha256=P7mEuuE1B0HRSgXRCeLxZvzYGKLskHD33hskpdkjIPs,14370
27
- konfai/network/network.py,sha256=6DBxTehY6McTaEHl6QzVfIZf2pe69KFqktYkefe3sj8,47521
27
+ konfai/network/network.py,sha256=zszV7z6tyZpZtWb9SHXqH1pJlvdy9YB4yrf7Muv_HiM,48936
28
28
  konfai/utils/ITK.py,sha256=OxTieDNNYHGkn7zxJsAG-6ecRG1VYMvn1dlBbBe1DOs,13955
29
29
  konfai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
30
  konfai/utils/config.py,sha256=f7o83ix5_oNbr2pki-Czqr-yHi-8n92ZL64nlo0XGwA,12514
31
31
  konfai/utils/dataset.py,sha256=6ZzevdhJ7e5zlXATAVwSh9O6acKXM7gYNxkMAa5DrmM,36351
32
32
  konfai/utils/registration.py,sha256=v1srEBOcgDnHrx0YtsK6bcj0yCMH7wNeaQ3wC7gEvOw,8898
33
- konfai/utils/utils.py,sha256=EVikQCUnZy7Va-53E4plX9gY9aFancOoHd2NkT4Uy1k,23585
34
- konfai-1.1.5.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
35
- konfai-1.1.5.dist-info/METADATA,sha256=DmiQixYnw4ZAb6J1A9C_itL76GGLH3GNeQAa7XqjcGY,2515
36
- konfai-1.1.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
- konfai-1.1.5.dist-info/entry_points.txt,sha256=fG82HRN5-g39ACSOCtij_I3N6EHxfYnMR0D7TI_8pW8,81
38
- konfai-1.1.5.dist-info/top_level.txt,sha256=xF470dkIlFoFqTZEOlRehKJr4WU_8OKGXrJqYm9vWKs,7
39
- konfai-1.1.5.dist-info/RECORD,,
33
+ konfai/utils/utils.py,sha256=1M46MYSZuPrbH5ihu8WZHaQNfqmvfeVsH-mAk0ED5TI,23575
34
+ konfai-1.1.6.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
35
+ konfai-1.1.6.dist-info/METADATA,sha256=wU8CbR5KO85P9dY3asD_ylG5rMCFIAQmNL0ifTDXA8g,2515
36
+ konfai-1.1.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
+ konfai-1.1.6.dist-info/entry_points.txt,sha256=fG82HRN5-g39ACSOCtij_I3N6EHxfYnMR0D7TI_8pW8,81
38
+ konfai-1.1.6.dist-info/top_level.txt,sha256=xF470dkIlFoFqTZEOlRehKJr4WU_8OKGXrJqYm9vWKs,7
39
+ konfai-1.1.6.dist-info/RECORD,,
File without changes