lt-tensor 0.0.1a33__py3-none-any.whl → 0.0.1a35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lt_tensor/__init__.py +1 -1
- lt_tensor/losses.py +169 -47
- lt_tensor/lr_schedulers.py +147 -21
- lt_tensor/misc_utils.py +35 -42
- lt_tensor/model_zoo/activations/__init__.py +3 -0
- lt_tensor/model_zoo/activations/alias_free/__init__.py +3 -0
- lt_tensor/model_zoo/activations/{alias_free_torch → alias_free}/act.py +8 -6
- lt_tensor/model_zoo/activations/snake/__init__.py +41 -43
- lt_tensor/model_zoo/audio_models/__init__.py +2 -2
- lt_tensor/model_zoo/audio_models/bigvgan/__init__.py +243 -0
- lt_tensor/model_zoo/audio_models/hifigan/__init__.py +16 -347
- lt_tensor/model_zoo/audio_models/istft/__init__.py +14 -349
- lt_tensor/model_zoo/audio_models/resblocks.py +248 -0
- lt_tensor/model_zoo/convs.py +21 -32
- lt_tensor/model_zoo/losses/discriminators.py +143 -230
- {lt_tensor-0.0.1a33.dist-info → lt_tensor-0.0.1a35.dist-info}/METADATA +1 -1
- lt_tensor-0.0.1a35.dist-info/RECORD +40 -0
- lt_tensor/model_zoo/activations/alias_free_torch/__init__.py +0 -1
- lt_tensor-0.0.1a33.dist-info/RECORD +0 -37
- /lt_tensor/model_zoo/activations/{alias_free_torch → alias_free}/filter.py +0 -0
- /lt_tensor/model_zoo/activations/{alias_free_torch → alias_free}/resample.py +0 -0
- {lt_tensor-0.0.1a33.dist-info → lt_tensor-0.0.1a35.dist-info}/WHEEL +0 -0
- {lt_tensor-0.0.1a33.dist-info → lt_tensor-0.0.1a35.dist-info}/licenses/LICENSE +0 -0
- {lt_tensor-0.0.1a33.dist-info → lt_tensor-0.0.1a35.dist-info}/top_level.txt +0 -0
@@ -2,10 +2,9 @@ __all__ = ["iSTFTNetGenerator", "iSTFTNetConfig"]
|
|
2
2
|
from lt_utils.common import *
|
3
3
|
from lt_tensor.torch_commons import *
|
4
4
|
from lt_tensor.model_zoo.convs import ConvNets
|
5
|
-
from torch.nn import functional as F
|
6
5
|
from lt_tensor.config_templates import ModelConfig
|
7
|
-
from
|
8
|
-
from
|
6
|
+
from lt_utils.file_ops import is_file
|
7
|
+
from lt_tensor.model_zoo.audio_models.resblocks import ResBlock1, ResBlock2
|
9
8
|
|
10
9
|
|
11
10
|
class iSTFTNetConfig(ModelConfig):
|
@@ -22,6 +21,7 @@ class iSTFTNetConfig(ModelConfig):
|
|
22
21
|
]
|
23
22
|
|
24
23
|
activation: nn.Module = nn.LeakyReLU(0.1)
|
24
|
+
resblock_activation: nn.Module = nn.LeakyReLU(0.1)
|
25
25
|
resblock: int = 0
|
26
26
|
gen_istft_n_fft: int = 16
|
27
27
|
sampling_rate: Number = 24000
|
@@ -39,7 +39,8 @@ class iSTFTNetConfig(ModelConfig):
|
|
39
39
|
[1, 3, 5],
|
40
40
|
],
|
41
41
|
activation: nn.Module = nn.LeakyReLU(0.1),
|
42
|
-
|
42
|
+
resblock_activation: nn.Module = nn.LeakyReLU(0.1),
|
43
|
+
resblock: int = 0,
|
43
44
|
gen_istft_n_fft: int = 16,
|
44
45
|
sampling_rate: Number = 24000,
|
45
46
|
*args,
|
@@ -53,6 +54,7 @@ class iSTFTNetConfig(ModelConfig):
|
|
53
54
|
"resblock_kernel_sizes": resblock_kernel_sizes,
|
54
55
|
"resblock_dilation_sizes": resblock_dilation_sizes,
|
55
56
|
"activation": activation,
|
57
|
+
"resblock_activation": resblock_activation,
|
56
58
|
"resblock": resblock,
|
57
59
|
"gen_istft_n_fft": gen_istft_n_fft,
|
58
60
|
"sampling_rate": sampling_rate,
|
@@ -62,134 +64,6 @@ class iSTFTNetConfig(ModelConfig):
|
|
62
64
|
def post_process(self):
|
63
65
|
if isinstance(self.resblock, str):
|
64
66
|
self.resblock = 0 if self.resblock == "1" else 1
|
65
|
-
|
66
|
-
|
67
|
-
def get_padding(ks, d):
|
68
|
-
return int((ks * d - d) / 2)
|
69
|
-
|
70
|
-
|
71
|
-
class ResBlock1(ConvNets):
|
72
|
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
73
|
-
super().__init__()
|
74
|
-
self.convs1 = nn.ModuleList(
|
75
|
-
[
|
76
|
-
weight_norm(
|
77
|
-
nn.Conv1d(
|
78
|
-
channels,
|
79
|
-
channels,
|
80
|
-
kernel_size,
|
81
|
-
1,
|
82
|
-
dilation=dilation[0],
|
83
|
-
padding=get_padding(kernel_size, dilation[0]),
|
84
|
-
)
|
85
|
-
),
|
86
|
-
weight_norm(
|
87
|
-
nn.Conv1d(
|
88
|
-
channels,
|
89
|
-
channels,
|
90
|
-
kernel_size,
|
91
|
-
1,
|
92
|
-
dilation=dilation[1],
|
93
|
-
padding=get_padding(kernel_size, dilation[1]),
|
94
|
-
)
|
95
|
-
),
|
96
|
-
weight_norm(
|
97
|
-
nn.Conv1d(
|
98
|
-
channels,
|
99
|
-
channels,
|
100
|
-
kernel_size,
|
101
|
-
1,
|
102
|
-
dilation=dilation[2],
|
103
|
-
padding=get_padding(kernel_size, dilation[2]),
|
104
|
-
)
|
105
|
-
),
|
106
|
-
]
|
107
|
-
)
|
108
|
-
self.convs1.apply(self.init_weights)
|
109
|
-
|
110
|
-
self.convs2 = nn.ModuleList(
|
111
|
-
[
|
112
|
-
weight_norm(
|
113
|
-
nn.Conv1d(
|
114
|
-
channels,
|
115
|
-
channels,
|
116
|
-
kernel_size,
|
117
|
-
1,
|
118
|
-
dilation=1,
|
119
|
-
padding=get_padding(kernel_size, 1),
|
120
|
-
)
|
121
|
-
),
|
122
|
-
weight_norm(
|
123
|
-
nn.Conv1d(
|
124
|
-
channels,
|
125
|
-
channels,
|
126
|
-
kernel_size,
|
127
|
-
1,
|
128
|
-
dilation=1,
|
129
|
-
padding=get_padding(kernel_size, 1),
|
130
|
-
)
|
131
|
-
),
|
132
|
-
weight_norm(
|
133
|
-
nn.Conv1d(
|
134
|
-
channels,
|
135
|
-
channels,
|
136
|
-
kernel_size,
|
137
|
-
1,
|
138
|
-
dilation=1,
|
139
|
-
padding=get_padding(kernel_size, 1),
|
140
|
-
)
|
141
|
-
),
|
142
|
-
]
|
143
|
-
)
|
144
|
-
self.activation = nn.LeakyReLU(0.1)
|
145
|
-
self.convs2.apply(self.init_weights)
|
146
|
-
|
147
|
-
def forward(self, x):
|
148
|
-
for c1, c2 in zip(self.convs1, self.convs2):
|
149
|
-
xt = self.activation(x)
|
150
|
-
xt = c1(xt)
|
151
|
-
xt = self.activation(xt)
|
152
|
-
xt = c2(xt)
|
153
|
-
x = xt + x
|
154
|
-
return x
|
155
|
-
|
156
|
-
|
157
|
-
class ResBlock2(ConvNets):
|
158
|
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
159
|
-
super().__init__()
|
160
|
-
self.convs = nn.ModuleList(
|
161
|
-
[
|
162
|
-
weight_norm(
|
163
|
-
nn.Conv1d(
|
164
|
-
channels,
|
165
|
-
channels,
|
166
|
-
kernel_size,
|
167
|
-
1,
|
168
|
-
dilation=dilation[0],
|
169
|
-
padding=get_padding(kernel_size, dilation[0]),
|
170
|
-
)
|
171
|
-
),
|
172
|
-
weight_norm(
|
173
|
-
nn.Conv1d(
|
174
|
-
channels,
|
175
|
-
channels,
|
176
|
-
kernel_size,
|
177
|
-
1,
|
178
|
-
dilation=dilation[1],
|
179
|
-
padding=get_padding(kernel_size, dilation[1]),
|
180
|
-
)
|
181
|
-
),
|
182
|
-
]
|
183
|
-
)
|
184
|
-
self.activation = nn.LeakyReLU(0.1)
|
185
|
-
self.convs.apply(self.init_weights)
|
186
|
-
|
187
|
-
def forward(self, x):
|
188
|
-
for c in self.convs:
|
189
|
-
xt = self.activation(x)
|
190
|
-
xt = c(xt)
|
191
|
-
x = xt + x
|
192
|
-
return x
|
193
67
|
|
194
68
|
|
195
69
|
class iSTFTNetGenerator(ConvNets):
|
@@ -241,7 +115,7 @@ class iSTFTNetGenerator(ConvNets):
|
|
241
115
|
for j, (k, d) in enumerate(
|
242
116
|
zip(cfg.resblock_kernel_sizes, cfg.resblock_dilation_sizes)
|
243
117
|
):
|
244
|
-
self.resblocks.append(resblock(ch, k, d))
|
118
|
+
self.resblocks.append(resblock(ch, k, d, cfg.resblock_activation))
|
245
119
|
|
246
120
|
self.post_n_fft = cfg.gen_istft_n_fft
|
247
121
|
self.conv_post = weight_norm(
|
@@ -252,7 +126,7 @@ class iSTFTNetGenerator(ConvNets):
|
|
252
126
|
self.activation = cfg.activation
|
253
127
|
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
254
128
|
|
255
|
-
def forward(self, x):
|
129
|
+
def forward(self, x: Tensor):
|
256
130
|
x = self.conv_pre(x)
|
257
131
|
for i in range(self.num_upsamples):
|
258
132
|
x = self.activation(x)
|
@@ -283,7 +157,7 @@ class iSTFTNetGenerator(ConvNets):
|
|
283
157
|
**pickle_load_args,
|
284
158
|
):
|
285
159
|
try:
|
286
|
-
|
160
|
+
return super().load_weights(
|
287
161
|
path,
|
288
162
|
raise_if_not_exists,
|
289
163
|
strict,
|
@@ -292,18 +166,6 @@ class iSTFTNetGenerator(ConvNets):
|
|
292
166
|
mmap,
|
293
167
|
**pickle_load_args,
|
294
168
|
)
|
295
|
-
if incompatible_keys:
|
296
|
-
self.remove_norms()
|
297
|
-
incompatible_keys = super().load_weights(
|
298
|
-
path,
|
299
|
-
raise_if_not_exists,
|
300
|
-
strict,
|
301
|
-
assign,
|
302
|
-
weights_only,
|
303
|
-
mmap,
|
304
|
-
**pickle_load_args,
|
305
|
-
)
|
306
|
-
return incompatible_keys
|
307
169
|
except RuntimeError:
|
308
170
|
self.remove_norms()
|
309
171
|
return super().load_weights(
|
@@ -322,6 +184,7 @@ class iSTFTNetGenerator(ConvNets):
|
|
322
184
|
model_file: PathLike,
|
323
185
|
model_config: Union[iSTFTNetConfig, Dict[str, Any]],
|
324
186
|
*,
|
187
|
+
remove_norms: bool = False,
|
325
188
|
strict: bool = False,
|
326
189
|
map_location: str = "cpu",
|
327
190
|
weights_only: bool = False,
|
@@ -339,11 +202,11 @@ class iSTFTNetGenerator(ConvNets):
|
|
339
202
|
h = iSTFTNetConfig(**model_config)
|
340
203
|
|
341
204
|
model = cls(h)
|
205
|
+
if remove_norms:
|
206
|
+
model.remove_norms()
|
342
207
|
try:
|
343
|
-
|
344
|
-
|
345
|
-
model.remove_norms()
|
346
|
-
model.load_state_dict(model_state_dict, strict=strict)
|
208
|
+
model.load_state_dict(model_state_dict, strict=strict)
|
209
|
+
return model
|
347
210
|
except RuntimeError:
|
348
211
|
print(
|
349
212
|
f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
|
@@ -351,201 +214,3 @@ class iSTFTNetGenerator(ConvNets):
|
|
351
214
|
model.remove_norms()
|
352
215
|
model.load_state_dict(model_state_dict, strict=strict)
|
353
216
|
return model
|
354
|
-
|
355
|
-
|
356
|
-
class DiscriminatorP(ConvNets):
|
357
|
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
358
|
-
super().__init__()
|
359
|
-
self.period = period
|
360
|
-
self.activation = nn.LeakyReLU(0.1)
|
361
|
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
362
|
-
self.convs = nn.ModuleList(
|
363
|
-
[
|
364
|
-
norm_f(
|
365
|
-
nn.Conv2d(
|
366
|
-
1,
|
367
|
-
32,
|
368
|
-
(kernel_size, 1),
|
369
|
-
(stride, 1),
|
370
|
-
padding=(get_padding(5, 1), 0),
|
371
|
-
)
|
372
|
-
),
|
373
|
-
norm_f(
|
374
|
-
nn.Conv2d(
|
375
|
-
32,
|
376
|
-
128,
|
377
|
-
(kernel_size, 1),
|
378
|
-
(stride, 1),
|
379
|
-
padding=(get_padding(5, 1), 0),
|
380
|
-
)
|
381
|
-
),
|
382
|
-
norm_f(
|
383
|
-
nn.Conv2d(
|
384
|
-
128,
|
385
|
-
512,
|
386
|
-
(kernel_size, 1),
|
387
|
-
(stride, 1),
|
388
|
-
padding=(get_padding(5, 1), 0),
|
389
|
-
)
|
390
|
-
),
|
391
|
-
norm_f(
|
392
|
-
nn.Conv2d(
|
393
|
-
512,
|
394
|
-
1024,
|
395
|
-
(kernel_size, 1),
|
396
|
-
(stride, 1),
|
397
|
-
padding=(get_padding(5, 1), 0),
|
398
|
-
)
|
399
|
-
),
|
400
|
-
norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
|
401
|
-
]
|
402
|
-
)
|
403
|
-
self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
404
|
-
|
405
|
-
def forward(self, x):
|
406
|
-
fmap = []
|
407
|
-
|
408
|
-
# 1d to 2d
|
409
|
-
b, c, t = x.shape
|
410
|
-
if t % self.period != 0: # pad first
|
411
|
-
n_pad = self.period - (t % self.period)
|
412
|
-
x = F.pad(x, (0, n_pad), "reflect")
|
413
|
-
t = t + n_pad
|
414
|
-
x = x.view(b, c, t // self.period, self.period)
|
415
|
-
|
416
|
-
for l in self.convs:
|
417
|
-
x = l(x)
|
418
|
-
x = self.activation(x)
|
419
|
-
fmap.append(x)
|
420
|
-
x = self.conv_post(x)
|
421
|
-
fmap.append(x)
|
422
|
-
x = torch.flatten(x, 1, -1)
|
423
|
-
|
424
|
-
return x, fmap
|
425
|
-
|
426
|
-
|
427
|
-
class MultiPeriodDiscriminator(ConvNets):
|
428
|
-
def __init__(self):
|
429
|
-
super().__init__()
|
430
|
-
self.discriminators = nn.ModuleList(
|
431
|
-
[
|
432
|
-
DiscriminatorP(2),
|
433
|
-
DiscriminatorP(3),
|
434
|
-
DiscriminatorP(5),
|
435
|
-
DiscriminatorP(7),
|
436
|
-
DiscriminatorP(11),
|
437
|
-
]
|
438
|
-
)
|
439
|
-
|
440
|
-
def forward(self, y, y_hat):
|
441
|
-
y_d_rs = []
|
442
|
-
y_d_gs = []
|
443
|
-
fmap_rs = []
|
444
|
-
fmap_gs = []
|
445
|
-
for i, d in enumerate(self.discriminators):
|
446
|
-
y_d_r, fmap_r = d(y)
|
447
|
-
y_d_g, fmap_g = d(y_hat)
|
448
|
-
y_d_rs.append(y_d_r)
|
449
|
-
fmap_rs.append(fmap_r)
|
450
|
-
y_d_gs.append(y_d_g)
|
451
|
-
fmap_gs.append(fmap_g)
|
452
|
-
|
453
|
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
454
|
-
|
455
|
-
|
456
|
-
class DiscriminatorS(ConvNets):
|
457
|
-
def __init__(self, use_spectral_norm=False):
|
458
|
-
super().__init__()
|
459
|
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
460
|
-
self.convs = nn.ModuleList(
|
461
|
-
[
|
462
|
-
norm_f(nn.Conv1d(1, 128, 15, 1, padding=7)),
|
463
|
-
norm_f(nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)),
|
464
|
-
norm_f(nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)),
|
465
|
-
norm_f(nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)),
|
466
|
-
norm_f(nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
|
467
|
-
norm_f(nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
|
468
|
-
norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)),
|
469
|
-
]
|
470
|
-
)
|
471
|
-
self.activation = nn.LeakyReLU(0.1)
|
472
|
-
self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1))
|
473
|
-
|
474
|
-
def forward(self, x):
|
475
|
-
fmap = []
|
476
|
-
for l in self.convs:
|
477
|
-
x = self.activation(l(x))
|
478
|
-
|
479
|
-
fmap.append(x)
|
480
|
-
x = self.conv_post(x)
|
481
|
-
fmap.append(x)
|
482
|
-
x = torch.flatten(x, 1, -1)
|
483
|
-
|
484
|
-
return x, fmap
|
485
|
-
|
486
|
-
|
487
|
-
class MultiScaleDiscriminator(ConvNets):
|
488
|
-
def __init__(self):
|
489
|
-
super().__init__()
|
490
|
-
self.discriminators = nn.ModuleList(
|
491
|
-
[
|
492
|
-
DiscriminatorS(use_spectral_norm=True),
|
493
|
-
DiscriminatorS(),
|
494
|
-
DiscriminatorS(),
|
495
|
-
]
|
496
|
-
)
|
497
|
-
self.meanpools = nn.ModuleList(
|
498
|
-
[nn.AvgPool1d(4, 2, padding=2), nn.AvgPool1d(4, 2, padding=2)]
|
499
|
-
)
|
500
|
-
|
501
|
-
def forward(self, y, y_hat):
|
502
|
-
y_d_rs = []
|
503
|
-
y_d_gs = []
|
504
|
-
fmap_rs = []
|
505
|
-
fmap_gs = []
|
506
|
-
for i, d in enumerate(self.discriminators):
|
507
|
-
if i != 0:
|
508
|
-
y = self.meanpools[i - 1](y)
|
509
|
-
y_hat = self.meanpools[i - 1](y_hat)
|
510
|
-
y_d_r, fmap_r = d(y)
|
511
|
-
y_d_g, fmap_g = d(y_hat)
|
512
|
-
y_d_rs.append(y_d_r)
|
513
|
-
fmap_rs.append(fmap_r)
|
514
|
-
y_d_gs.append(y_d_g)
|
515
|
-
fmap_gs.append(fmap_g)
|
516
|
-
|
517
|
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
518
|
-
|
519
|
-
|
520
|
-
def feature_loss(fmap_r, fmap_g):
|
521
|
-
loss = 0
|
522
|
-
for dr, dg in zip(fmap_r, fmap_g):
|
523
|
-
for rl, gl in zip(dr, dg):
|
524
|
-
loss += torch.mean(torch.abs(rl - gl))
|
525
|
-
|
526
|
-
return loss * 2
|
527
|
-
|
528
|
-
|
529
|
-
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
530
|
-
loss = 0
|
531
|
-
r_losses = []
|
532
|
-
g_losses = []
|
533
|
-
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
534
|
-
r_loss = torch.mean((1 - dr) ** 2)
|
535
|
-
g_loss = torch.mean(dg**2)
|
536
|
-
loss += r_loss + g_loss
|
537
|
-
r_losses.append(r_loss.item())
|
538
|
-
g_losses.append(g_loss.item())
|
539
|
-
|
540
|
-
return loss, r_losses, g_losses
|
541
|
-
|
542
|
-
|
543
|
-
def generator_loss(disc_outputs):
|
544
|
-
loss = 0
|
545
|
-
gen_losses = []
|
546
|
-
for dg in disc_outputs:
|
547
|
-
l = torch.mean((1 - dg) ** 2)
|
548
|
-
gen_losses.append(l)
|
549
|
-
loss += l
|
550
|
-
|
551
|
-
return loss, gen_losses
|
@@ -0,0 +1,248 @@
|
|
1
|
+
from lt_utils.common import *
|
2
|
+
from torch.nn import functional as F
|
3
|
+
from lt_tensor.torch_commons import *
|
4
|
+
from lt_tensor.model_zoo.convs import ConvNets
|
5
|
+
from lt_tensor.model_zoo.activations import snake, alias_free
|
6
|
+
|
7
|
+
|
8
|
+
def get_padding(kernel_size, dilation=1):
|
9
|
+
return int((kernel_size * dilation - dilation) / 2)
|
10
|
+
|
11
|
+
|
12
|
+
class ResBlock1(ConvNets):
|
13
|
+
def __init__(
|
14
|
+
self,
|
15
|
+
channels,
|
16
|
+
kernel_size=3,
|
17
|
+
dilation=(1, 3, 5),
|
18
|
+
activation: nn.Module = nn.LeakyReLU(0.1),
|
19
|
+
):
|
20
|
+
super().__init__()
|
21
|
+
|
22
|
+
self.convs1 = nn.ModuleList(
|
23
|
+
[
|
24
|
+
weight_norm(
|
25
|
+
nn.Conv1d(
|
26
|
+
channels,
|
27
|
+
channels,
|
28
|
+
kernel_size,
|
29
|
+
1,
|
30
|
+
dilation=d,
|
31
|
+
padding=get_padding(kernel_size, d),
|
32
|
+
)
|
33
|
+
)
|
34
|
+
for d in dilation
|
35
|
+
]
|
36
|
+
)
|
37
|
+
self.convs1.apply(self.init_weights)
|
38
|
+
|
39
|
+
self.convs2 = nn.ModuleList(
|
40
|
+
[
|
41
|
+
weight_norm(
|
42
|
+
nn.Conv1d(
|
43
|
+
channels,
|
44
|
+
channels,
|
45
|
+
kernel_size,
|
46
|
+
1,
|
47
|
+
dilation=1,
|
48
|
+
padding=get_padding(kernel_size, 1),
|
49
|
+
)
|
50
|
+
)
|
51
|
+
for _ in dilation
|
52
|
+
]
|
53
|
+
)
|
54
|
+
self.convs2.apply(self.init_weights)
|
55
|
+
self.activation = activation
|
56
|
+
|
57
|
+
def forward(self, x):
|
58
|
+
for c1, c2 in zip(self.convs1, self.convs2):
|
59
|
+
xt = c1(self.activation(x))
|
60
|
+
x = c2(self.activation(xt)) + x
|
61
|
+
return x
|
62
|
+
|
63
|
+
|
64
|
+
class ResBlock2(ConvNets):
|
65
|
+
def __init__(
|
66
|
+
self,
|
67
|
+
channels,
|
68
|
+
kernel_size=3,
|
69
|
+
dilation=(1, 3),
|
70
|
+
activation: nn.Module = nn.LeakyReLU(0.1),
|
71
|
+
):
|
72
|
+
super().__init__()
|
73
|
+
self.convs = nn.ModuleList(
|
74
|
+
[
|
75
|
+
weight_norm(
|
76
|
+
nn.Conv1d(
|
77
|
+
channels,
|
78
|
+
channels,
|
79
|
+
kernel_size,
|
80
|
+
1,
|
81
|
+
dilation=d,
|
82
|
+
padding=get_padding(kernel_size, d),
|
83
|
+
)
|
84
|
+
)
|
85
|
+
for d in dilation
|
86
|
+
]
|
87
|
+
)
|
88
|
+
self.convs.apply(self.init_weights)
|
89
|
+
self.activation = activation
|
90
|
+
|
91
|
+
def forward(self, x):
|
92
|
+
for c in self.convs:
|
93
|
+
xt = c(self.activation(x))
|
94
|
+
x = xt + x
|
95
|
+
return x
|
96
|
+
|
97
|
+
|
98
|
+
def get_snake(name: Literal["snake", "snakebeta"] = "snake"):
|
99
|
+
assert name.lower() in [
|
100
|
+
"snake",
|
101
|
+
"snakebeta",
|
102
|
+
], f"'{name}' is not a valid snake activation! use 'snake' or 'snakebeta'"
|
103
|
+
if name.lower() == "snake":
|
104
|
+
return snake.Snake
|
105
|
+
return snake.SnakeBeta
|
106
|
+
|
107
|
+
|
108
|
+
class AMPBlock1(ConvNets):
|
109
|
+
"""Modified from 'https://github.com/NVIDIA/BigVGAN/blob/main/bigvgan.py' under MIT license, found in 'bigvgan/LICENSE'
|
110
|
+
AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
|
111
|
+
AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
|
112
|
+
|
113
|
+
Args:
|
114
|
+
channels (int): Number of convolution channels.
|
115
|
+
kernel_size (int): Size of the convolution kernel. Default is 3.
|
116
|
+
dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
|
117
|
+
snake_logscale: (bool): to use logscale with snake activation. Default to True.
|
118
|
+
activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Defaults to 'snakebeta'.
|
119
|
+
"""
|
120
|
+
|
121
|
+
def __init__(
|
122
|
+
self,
|
123
|
+
channels: int,
|
124
|
+
kernel_size: int = 3,
|
125
|
+
dilation: tuple = (1, 3, 5),
|
126
|
+
snake_logscale: bool = True,
|
127
|
+
activation: Literal["snake", "snakebeta"] = "snakebeta",
|
128
|
+
):
|
129
|
+
super().__init__()
|
130
|
+
actv = get_snake(activation)
|
131
|
+
|
132
|
+
self.convs1 = nn.ModuleList(
|
133
|
+
[
|
134
|
+
weight_norm(
|
135
|
+
nn.Conv1d(
|
136
|
+
channels,
|
137
|
+
channels,
|
138
|
+
kernel_size,
|
139
|
+
stride=1,
|
140
|
+
dilation=d,
|
141
|
+
padding=get_padding(kernel_size, d),
|
142
|
+
)
|
143
|
+
)
|
144
|
+
for d in dilation
|
145
|
+
]
|
146
|
+
)
|
147
|
+
self.convs1.apply(self.init_weights)
|
148
|
+
|
149
|
+
self.convs2 = nn.ModuleList(
|
150
|
+
[
|
151
|
+
weight_norm(
|
152
|
+
nn.Conv1d(
|
153
|
+
channels,
|
154
|
+
channels,
|
155
|
+
kernel_size,
|
156
|
+
stride=1,
|
157
|
+
dilation=1,
|
158
|
+
padding=get_padding(kernel_size, 1),
|
159
|
+
)
|
160
|
+
)
|
161
|
+
for _ in range(len(dilation))
|
162
|
+
]
|
163
|
+
)
|
164
|
+
self.convs2.apply(self.init_weights)
|
165
|
+
|
166
|
+
self.num_layers = len(self.convs1) + len(
|
167
|
+
self.convs2
|
168
|
+
) # Total number of conv layers
|
169
|
+
|
170
|
+
# Activation functions
|
171
|
+
self.activations = nn.ModuleList(
|
172
|
+
[
|
173
|
+
alias_free.Activation1d(
|
174
|
+
activation=actv(channels, alpha_logscale=snake_logscale)
|
175
|
+
)
|
176
|
+
for _ in range(self.num_layers)
|
177
|
+
]
|
178
|
+
)
|
179
|
+
|
180
|
+
def forward(self, x):
|
181
|
+
acts1, acts2 = self.activations[::2], self.activations[1::2]
|
182
|
+
for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
|
183
|
+
xt = a1(x)
|
184
|
+
xt = c1(xt)
|
185
|
+
xt = a2(xt)
|
186
|
+
xt = c2(xt)
|
187
|
+
x = xt + x
|
188
|
+
return x
|
189
|
+
|
190
|
+
|
191
|
+
class AMPBlock2(ConvNets):
|
192
|
+
"""Modified from 'https://github.com/NVIDIA/BigVGAN/blob/main/bigvgan.py' under MIT license, found in 'bigvgan/LICENSE'
|
193
|
+
AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
|
194
|
+
Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
|
195
|
+
|
196
|
+
Args:
|
197
|
+
channels (int): Number of convolution channels.
|
198
|
+
kernel_size (int): Size of the convolution kernel. Default is 3.
|
199
|
+
dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
|
200
|
+
snake_logscale: (bool): to use logscale with snake activation. Default to True.
|
201
|
+
activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Defaults to 'snakebeta'.
|
202
|
+
"""
|
203
|
+
|
204
|
+
def __init__(
|
205
|
+
self,
|
206
|
+
channels: int,
|
207
|
+
kernel_size: int = 3,
|
208
|
+
dilation: tuple = (1, 3, 5),
|
209
|
+
snake_logscale: bool = True,
|
210
|
+
activation: Literal["snake", "snakebeta"] = "snakebeta",
|
211
|
+
):
|
212
|
+
super().__init__()
|
213
|
+
actv = get_snake(activation)
|
214
|
+
self.convs = nn.ModuleList(
|
215
|
+
[
|
216
|
+
weight_norm(
|
217
|
+
nn.Conv1d(
|
218
|
+
channels,
|
219
|
+
channels,
|
220
|
+
kernel_size,
|
221
|
+
stride=1,
|
222
|
+
dilation=d,
|
223
|
+
padding=get_padding(kernel_size, d),
|
224
|
+
)
|
225
|
+
)
|
226
|
+
for d in dilation
|
227
|
+
]
|
228
|
+
)
|
229
|
+
self.convs.apply(self.init_weights)
|
230
|
+
|
231
|
+
self.num_layers = len(self.convs) # Total number of conv layers
|
232
|
+
|
233
|
+
# Activation functions
|
234
|
+
self.activations = nn.ModuleList(
|
235
|
+
[
|
236
|
+
alias_free.Activation1d(
|
237
|
+
activation=actv(channels, alpha_logscale=snake_logscale)
|
238
|
+
)
|
239
|
+
for _ in range(self.num_layers)
|
240
|
+
]
|
241
|
+
)
|
242
|
+
|
243
|
+
def forward(self, x):
|
244
|
+
for c, a in zip(self.convs, self.activations):
|
245
|
+
xt = a(x)
|
246
|
+
xt = c(xt)
|
247
|
+
x = xt + x
|
248
|
+
return x
|