konfai 1.1.8__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of konfai might be problematic. Click here for more details.
- konfai/__init__.py +59 -14
- konfai/data/augmentation.py +457 -286
- konfai/data/data_manager.py +533 -316
- konfai/data/patching.py +300 -183
- konfai/data/transform.py +408 -275
- konfai/evaluator.py +325 -68
- konfai/main.py +71 -22
- konfai/metric/measure.py +360 -244
- konfai/metric/schedulers.py +24 -13
- konfai/models/classification/convNeXt.py +187 -81
- konfai/models/classification/resnet.py +272 -58
- konfai/models/generation/cStyleGan.py +233 -59
- konfai/models/generation/ddpm.py +348 -121
- konfai/models/generation/diffusionGan.py +757 -358
- konfai/models/generation/gan.py +177 -53
- konfai/models/generation/vae.py +140 -40
- konfai/models/registration/registration.py +135 -52
- konfai/models/representation/representation.py +57 -23
- konfai/models/segmentation/NestedUNet.py +339 -68
- konfai/models/segmentation/UNet.py +140 -30
- konfai/network/blocks.py +331 -187
- konfai/network/network.py +795 -427
- konfai/predictor.py +644 -238
- konfai/trainer.py +509 -222
- konfai/utils/ITK.py +191 -106
- konfai/utils/config.py +152 -95
- konfai/utils/dataset.py +326 -455
- konfai/utils/utils.py +497 -249
- {konfai-1.1.8.dist-info → konfai-1.2.0.dist-info}/METADATA +1 -3
- konfai-1.2.0.dist-info/RECORD +38 -0
- konfai/utils/registration.py +0 -199
- konfai-1.1.8.dist-info/RECORD +0 -39
- {konfai-1.1.8.dist-info → konfai-1.2.0.dist-info}/WHEEL +0 -0
- {konfai-1.1.8.dist-info → konfai-1.2.0.dist-info}/entry_points.txt +0 -0
- {konfai-1.1.8.dist-info → konfai-1.2.0.dist-info}/licenses/LICENSE +0 -0
- {konfai-1.1.8.dist-info → konfai-1.2.0.dist-info}/top_level.txt +0 -0
konfai/metric/schedulers.py
CHANGED
|
@@ -1,15 +1,16 @@
|
|
|
1
|
-
import torch
|
|
2
|
-
import numpy as np
|
|
3
1
|
from abc import abstractmethod
|
|
4
|
-
from konfai.utils.config import config
|
|
5
2
|
from functools import partial
|
|
6
3
|
|
|
7
|
-
|
|
4
|
+
import numpy as np
|
|
5
|
+
import torch
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Scheduler:
|
|
8
9
|
|
|
9
10
|
def __init__(self, start_value: float) -> None:
|
|
10
11
|
self.baseValue = float(start_value)
|
|
11
12
|
self.it = 0
|
|
12
|
-
|
|
13
|
+
|
|
13
14
|
def step(self, it: int):
|
|
14
15
|
self.it = it
|
|
15
16
|
|
|
@@ -17,6 +18,7 @@ class Scheduler():
|
|
|
17
18
|
def get_value(self) -> float:
|
|
18
19
|
pass
|
|
19
20
|
|
|
21
|
+
|
|
20
22
|
class Constant(Scheduler):
|
|
21
23
|
|
|
22
24
|
def __init__(self, value: float = 1):
|
|
@@ -25,20 +27,29 @@ class Constant(Scheduler):
|
|
|
25
27
|
def get_value(self) -> float:
|
|
26
28
|
return self.baseValue
|
|
27
29
|
|
|
30
|
+
|
|
28
31
|
class CosineAnnealing(Scheduler):
|
|
29
|
-
|
|
30
|
-
def __init__(self, start_value: float = 1, eta_min: float = 0.00001,
|
|
32
|
+
|
|
33
|
+
def __init__(self, start_value: float = 1, eta_min: float = 0.00001, t_max: int = 100):
|
|
31
34
|
super().__init__(start_value)
|
|
32
35
|
self.eta_min = eta_min
|
|
33
|
-
self.
|
|
36
|
+
self.t_max = t_max
|
|
34
37
|
|
|
35
38
|
def get_value(self):
|
|
36
|
-
return self.eta_min + (self.baseValue - self.eta_min) *(1 + np.cos(self.it * torch.pi / self.
|
|
39
|
+
return self.eta_min + (self.baseValue - self.eta_min) * (1 + np.cos(self.it * torch.pi / self.t_max)) / 2
|
|
40
|
+
|
|
37
41
|
|
|
38
42
|
class Warmup(torch.optim.lr_scheduler.LambdaLR):
|
|
39
|
-
|
|
40
|
-
def warmup(warmup_steps: int, step: int) -> float:
|
|
41
|
-
return min(1.0, (step+1) / (warmup_steps+1))
|
|
42
43
|
|
|
43
|
-
|
|
44
|
+
@staticmethod
|
|
45
|
+
def warmup(warmup_steps: int, step: int) -> float:
|
|
46
|
+
return min(1.0, (step + 1) / (warmup_steps + 1))
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
optimizer: torch.optim.Optimizer,
|
|
51
|
+
warmup_steps: int = 10,
|
|
52
|
+
last_epoch=-1,
|
|
53
|
+
verbose="deprecated",
|
|
54
|
+
):
|
|
44
55
|
super().__init__(optimizer, partial(Warmup.warmup, warmup_steps), last_epoch, verbose)
|
|
@@ -1,33 +1,44 @@
|
|
|
1
1
|
import torch
|
|
2
|
-
import torch.nn.functional as F
|
|
3
|
-
|
|
4
|
-
from konfai.utils.config import config
|
|
2
|
+
import torch.nn.functional as F # noqa: N812
|
|
3
|
+
|
|
5
4
|
from konfai.data.patching import ModelPatch
|
|
5
|
+
from konfai.network import blocks, network
|
|
6
6
|
|
|
7
7
|
"""
|
|
8
|
-
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
|
|
9
|
-
|
|
10
|
-
"
|
|
11
|
-
|
|
8
|
+
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
|
|
9
|
+
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768]
|
|
10
|
+
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
|
|
11
|
+
depths=[3, 3, 27, 3], dims=[96, 192, 384, 768]
|
|
12
|
+
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
|
|
13
|
+
depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024]
|
|
14
|
+
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
|
|
15
|
+
[3, 3, 27, 3], dims=[192, 384, 768, 1536]
|
|
12
16
|
"convnext_tiny_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
|
|
13
17
|
"convnext_small_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
|
|
14
18
|
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
|
|
15
19
|
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
|
|
16
|
-
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
|
|
20
|
+
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
|
|
21
|
+
depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048]
|
|
17
22
|
"""
|
|
18
23
|
|
|
24
|
+
|
|
19
25
|
class LayerNorm(torch.nn.Module):
|
|
20
26
|
|
|
21
|
-
def __init__(
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
normalized_shape: int,
|
|
30
|
+
eps: float = 1e-6,
|
|
31
|
+
data_format: str = "channels_last",
|
|
32
|
+
):
|
|
22
33
|
super().__init__()
|
|
23
34
|
self.weight = torch.nn.parameter.Parameter(torch.ones(normalized_shape))
|
|
24
35
|
self.bias = torch.nn.parameter.Parameter(torch.zeros(normalized_shape))
|
|
25
36
|
self.eps = eps
|
|
26
37
|
self.data_format = data_format
|
|
27
38
|
if self.data_format not in ["channels_last", "channels_first"]:
|
|
28
|
-
raise NotImplementedError
|
|
29
|
-
self.normalized_shape = (normalized_shape,
|
|
30
|
-
|
|
39
|
+
raise NotImplementedError
|
|
40
|
+
self.normalized_shape = (normalized_shape,)
|
|
41
|
+
|
|
31
42
|
def forward(self, x):
|
|
32
43
|
if self.data_format == "channels_last":
|
|
33
44
|
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
|
@@ -44,17 +55,18 @@ class LayerNorm(torch.nn.Module):
|
|
|
44
55
|
return x
|
|
45
56
|
|
|
46
57
|
def extra_repr(self):
|
|
47
|
-
return "normalized_shape={}, eps={}, data_format={
|
|
58
|
+
return f"normalized_shape={self.normalized_shape}, eps={self.eps}, data_format={self.data_format})"
|
|
59
|
+
|
|
48
60
|
|
|
49
61
|
class DropPath(torch.nn.Module):
|
|
50
62
|
|
|
51
|
-
def __init__(self, drop_prob: float = 0
|
|
52
|
-
super(
|
|
63
|
+
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
|
|
64
|
+
super().__init__()
|
|
53
65
|
self.drop_prob = drop_prob
|
|
54
66
|
self.scale_by_keep = scale_by_keep
|
|
55
67
|
|
|
56
68
|
def forward(self, x):
|
|
57
|
-
if self.drop_prob == 0. or not self.training:
|
|
69
|
+
if self.drop_prob == 0.0 or not self.training:
|
|
58
70
|
return x
|
|
59
71
|
keep_prob = 1 - self.drop_prob
|
|
60
72
|
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
|
|
@@ -64,111 +76,205 @@ class DropPath(torch.nn.Module):
|
|
|
64
76
|
return x * random_tensor
|
|
65
77
|
|
|
66
78
|
def extra_repr(self):
|
|
67
|
-
return "drop_prob={
|
|
79
|
+
return f"drop_prob={round(self.drop_prob, 3)}"
|
|
80
|
+
|
|
68
81
|
|
|
69
82
|
class LayerScaler(torch.nn.Module):
|
|
70
|
-
|
|
71
|
-
def __init__(self, init_value
|
|
83
|
+
|
|
84
|
+
def __init__(self, init_value: float, dimensions: int):
|
|
72
85
|
super().__init__()
|
|
73
86
|
self.init_value = init_value
|
|
74
87
|
self.gamma = torch.nn.Parameter(torch.ones(dimensions, 1, 1) * init_value)
|
|
75
88
|
|
|
76
|
-
def forward(self,
|
|
77
|
-
return self.gamma *
|
|
89
|
+
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
|
|
90
|
+
return self.gamma * tensor
|
|
78
91
|
|
|
79
92
|
def extra_repr(self):
|
|
80
|
-
return "init_value={
|
|
93
|
+
return f"init_value={self.init_value}"
|
|
94
|
+
|
|
81
95
|
|
|
82
96
|
class BottleNeckBlock(network.ModuleArgsDict):
|
|
83
|
-
def __init__(
|
|
84
|
-
features: int,
|
|
85
|
-
drop_p: float,
|
|
86
|
-
layer_scaler_init_value: float,
|
|
87
|
-
dim: int):
|
|
97
|
+
def __init__(self, features: int, drop_p: float, layer_scaler_init_value: float, dim: int):
|
|
88
98
|
super().__init__()
|
|
89
|
-
self.add_module(
|
|
99
|
+
self.add_module(
|
|
100
|
+
"Conv_0",
|
|
101
|
+
blocks.get_torch_module("Conv", dim)(features, features, kernel_size=7, padding=3, groups=features),
|
|
102
|
+
alias=["dwconv"],
|
|
103
|
+
)
|
|
90
104
|
self.add_module("ToFeatures", blocks.ToFeatures(dim))
|
|
91
105
|
self.add_module("LayerNorm", LayerNorm(features, eps=1e-6), alias=["norm"])
|
|
92
|
-
self.add_module("Linear_1",
|
|
106
|
+
self.add_module("Linear_1", torch.nn.Linear(features, features * 4), alias=["pwconv1"])
|
|
93
107
|
self.add_module("GELU", torch.nn.GELU())
|
|
94
108
|
self.add_module("Linear_2", torch.nn.Linear(features * 4, features), alias=["pwconv2"])
|
|
95
109
|
self.add_module("ToChannels", blocks.ToChannels(dim))
|
|
96
|
-
self.add_module(
|
|
110
|
+
self.add_module(
|
|
111
|
+
"LayerScaler",
|
|
112
|
+
LayerScaler(init_value=layer_scaler_init_value, dimensions=features),
|
|
113
|
+
alias=[""],
|
|
114
|
+
)
|
|
97
115
|
self.add_module("StochasticDepth", DropPath(drop_p))
|
|
98
|
-
self.add_module("Residual", blocks.Add(), in_branch=[0,1])
|
|
99
|
-
|
|
116
|
+
self.add_module("Residual", blocks.Add(), in_branch=[0, 1])
|
|
117
|
+
|
|
118
|
+
|
|
100
119
|
class DownSample(network.ModuleArgsDict):
|
|
101
|
-
|
|
102
|
-
def __init__(
|
|
103
|
-
in_features: int,
|
|
104
|
-
out_features: int,
|
|
105
|
-
dim : int):
|
|
120
|
+
|
|
121
|
+
def __init__(self, in_features: int, out_features: int, dim: int):
|
|
106
122
|
super().__init__()
|
|
107
|
-
self.add_module(
|
|
108
|
-
|
|
123
|
+
self.add_module(
|
|
124
|
+
"LayerNorm",
|
|
125
|
+
LayerNorm(in_features, eps=1e-6, data_format="channels_first"),
|
|
126
|
+
alias=["0"],
|
|
127
|
+
)
|
|
128
|
+
self.add_module(
|
|
129
|
+
"Conv",
|
|
130
|
+
blocks.get_torch_module("Conv", dim)(in_features, out_features, kernel_size=2, stride=2),
|
|
131
|
+
alias=["1"],
|
|
132
|
+
)
|
|
133
|
+
|
|
109
134
|
|
|
110
135
|
class ConvNexStage(network.ModuleArgsDict):
|
|
111
|
-
|
|
112
|
-
def __init__(
|
|
113
|
-
features: int,
|
|
114
|
-
depth: int,
|
|
115
|
-
drop_p: list[float],
|
|
116
|
-
dim : int):
|
|
136
|
+
|
|
137
|
+
def __init__(self, features: int, depth: int, drop_p: list[float], dim: int):
|
|
117
138
|
super().__init__()
|
|
118
139
|
for i in range(depth):
|
|
119
|
-
self.add_module(
|
|
140
|
+
self.add_module(
|
|
141
|
+
f"BottleNeckBlock_{i}",
|
|
142
|
+
BottleNeckBlock(
|
|
143
|
+
features=features,
|
|
144
|
+
drop_p=drop_p[i],
|
|
145
|
+
layer_scaler_init_value=1e-6,
|
|
146
|
+
dim=dim,
|
|
147
|
+
),
|
|
148
|
+
alias=[f"{i}"],
|
|
149
|
+
)
|
|
150
|
+
|
|
120
151
|
|
|
121
152
|
class ConvNextStem(network.ModuleArgsDict):
|
|
122
153
|
|
|
123
154
|
def __init__(self, in_features: int, out_features: int, dim: int):
|
|
124
155
|
super().__init__()
|
|
125
|
-
self.add_module(
|
|
126
|
-
|
|
156
|
+
self.add_module(
|
|
157
|
+
"Conv",
|
|
158
|
+
blocks.get_torch_module("Conv", dim)(in_features, out_features, kernel_size=4, stride=4),
|
|
159
|
+
alias=["0"],
|
|
160
|
+
)
|
|
161
|
+
self.add_module(
|
|
162
|
+
"LayerNorm",
|
|
163
|
+
LayerNorm(out_features, eps=1e-6, data_format="channels_first"),
|
|
164
|
+
alias=["1"],
|
|
165
|
+
)
|
|
166
|
+
|
|
127
167
|
|
|
128
168
|
class ConvNextEncoder(network.ModuleArgsDict):
|
|
129
169
|
|
|
130
|
-
def __init__(
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
170
|
+
def __init__(
|
|
171
|
+
self,
|
|
172
|
+
in_channels: int,
|
|
173
|
+
depths: list[int],
|
|
174
|
+
widths: list[int],
|
|
175
|
+
drop_p: float,
|
|
176
|
+
dim: int,
|
|
177
|
+
):
|
|
136
178
|
super().__init__()
|
|
137
|
-
self.add_module(
|
|
138
|
-
|
|
179
|
+
self.add_module(
|
|
180
|
+
"ConvNextStem",
|
|
181
|
+
ConvNextStem(in_channels, widths[0], dim=dim),
|
|
182
|
+
alias=["downsample_layers.0"],
|
|
183
|
+
)
|
|
184
|
+
|
|
139
185
|
drop_probs = [x.item() for x in torch.linspace(0, drop_p, sum(depths))]
|
|
140
|
-
self.add_module(
|
|
141
|
-
|
|
186
|
+
self.add_module(
|
|
187
|
+
"ConvNexStage_0",
|
|
188
|
+
ConvNexStage(
|
|
189
|
+
features=widths[0],
|
|
190
|
+
depth=depths[0],
|
|
191
|
+
drop_p=drop_probs[: depths[0]],
|
|
192
|
+
dim=dim,
|
|
193
|
+
),
|
|
194
|
+
alias=["stages.0"],
|
|
195
|
+
)
|
|
196
|
+
|
|
142
197
|
for i, (in_features, out_features) in enumerate(list(zip(widths[:], widths[1:]))):
|
|
143
|
-
self.add_module(
|
|
144
|
-
|
|
198
|
+
self.add_module(
|
|
199
|
+
f"DownSample_{i + 1}",
|
|
200
|
+
DownSample(in_features=in_features, out_features=out_features, dim=dim),
|
|
201
|
+
alias=[f"downsample_layers.{i + 1}"],
|
|
202
|
+
)
|
|
203
|
+
self.add_module(
|
|
204
|
+
f"ConvNexStage_{i + 1}",
|
|
205
|
+
ConvNexStage(
|
|
206
|
+
features=out_features,
|
|
207
|
+
depth=depths[i + 1],
|
|
208
|
+
drop_p=drop_probs[sum(depths[: i + 1]) : sum(depths[: i + 2])],
|
|
209
|
+
dim=dim,
|
|
210
|
+
),
|
|
211
|
+
alias=[f"stages.{i + 1}"],
|
|
212
|
+
)
|
|
213
|
+
|
|
145
214
|
|
|
146
215
|
class Head(network.ModuleArgsDict):
|
|
147
216
|
|
|
148
|
-
def __init__(self, in_features
|
|
217
|
+
def __init__(self, in_features: int, num_classes: list[int], dim: int) -> None:
|
|
149
218
|
super().__init__()
|
|
150
|
-
self.add_module(
|
|
219
|
+
self.add_module(
|
|
220
|
+
"AdaptiveAvgPool",
|
|
221
|
+
blocks.get_torch_module("AdaptiveAvgPool", dim)(tuple([1] * dim)),
|
|
222
|
+
)
|
|
151
223
|
self.add_module("Flatten", torch.nn.Flatten(1))
|
|
152
224
|
self.add_module("LayerNorm", torch.nn.LayerNorm(in_features, eps=1e-6), alias=["norm"])
|
|
153
|
-
|
|
225
|
+
|
|
154
226
|
for i, nb_classe in enumerate(num_classes):
|
|
155
|
-
self.add_module(
|
|
156
|
-
|
|
227
|
+
self.add_module(
|
|
228
|
+
f"Linear_{i}",
|
|
229
|
+
torch.nn.Linear(in_features, nb_classe),
|
|
230
|
+
pretrained=False,
|
|
231
|
+
alias=["head"],
|
|
232
|
+
out_branch=[i + 1],
|
|
233
|
+
)
|
|
234
|
+
self.add_module(
|
|
235
|
+
f"Unsqueeze_{i}",
|
|
236
|
+
blocks.Unsqueeze(2),
|
|
237
|
+
in_branch=[i + 1],
|
|
238
|
+
out_branch=[-1],
|
|
239
|
+
)
|
|
240
|
+
|
|
157
241
|
|
|
158
242
|
class ConvNeXt(network.Network):
|
|
159
|
-
|
|
160
|
-
def __init__(
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
243
|
+
|
|
244
|
+
def __init__(
|
|
245
|
+
self,
|
|
246
|
+
optimizer: network.OptimizerLoader = network.OptimizerLoader(),
|
|
247
|
+
schedulers: dict[str, network.LRSchedulersLoader] = {
|
|
248
|
+
"default:ReduceLROnPlateau": network.LRSchedulersLoader(0)
|
|
249
|
+
},
|
|
250
|
+
outputs_criterions: dict[str, network.TargetCriterionsLoader] = {"default": network.TargetCriterionsLoader()},
|
|
251
|
+
patch: ModelPatch = ModelPatch(),
|
|
252
|
+
dim: int = 3,
|
|
253
|
+
in_channels: int = 1,
|
|
254
|
+
depths: list[int] = [3, 3, 27, 3],
|
|
255
|
+
widths: list[int] = [128, 256, 512, 1024],
|
|
256
|
+
drop_p: float = 0.1,
|
|
257
|
+
num_classes: list[int] = [4, 7],
|
|
258
|
+
):
|
|
259
|
+
|
|
260
|
+
super().__init__(
|
|
261
|
+
in_channels=in_channels,
|
|
262
|
+
optimizer=optimizer,
|
|
263
|
+
schedulers=schedulers,
|
|
264
|
+
outputs_criterions=outputs_criterions,
|
|
265
|
+
dim=dim,
|
|
266
|
+
patch=patch,
|
|
267
|
+
init_type="trunc_normal",
|
|
268
|
+
init_gain=0.02,
|
|
269
|
+
)
|
|
270
|
+
self.add_module(
|
|
271
|
+
"ConvNextEncoder",
|
|
272
|
+
ConvNextEncoder(
|
|
273
|
+
in_channels=in_channels,
|
|
274
|
+
depths=depths,
|
|
275
|
+
widths=widths,
|
|
276
|
+
drop_p=drop_p,
|
|
277
|
+
dim=dim,
|
|
278
|
+
),
|
|
279
|
+
)
|
|
280
|
+
self.add_module("Head", Head(in_features=widths[-1], num_classes=num_classes, dim=dim))
|