lt-tensor 0.0.1a14__py3-none-any.whl → 0.0.1a15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lt_tensor/datasets/audio.py +23 -6
- lt_tensor/model_base.py +163 -123
- lt_tensor/model_zoo/diffwave/__init__.py +0 -0
- lt_tensor/model_zoo/diffwave/model.py +200 -0
- lt_tensor/model_zoo/diffwave/params.py +58 -0
- lt_tensor/model_zoo/discriminator.py +269 -151
- lt_tensor/model_zoo/features.py +102 -11
- lt_tensor/model_zoo/istft/generator.py +6 -2
- lt_tensor/model_zoo/istft/trainer.py +16 -7
- lt_tensor/model_zoo/residual.py +133 -64
- {lt_tensor-0.0.1a14.dist-info → lt_tensor-0.0.1a15.dist-info}/METADATA +1 -1
- {lt_tensor-0.0.1a14.dist-info → lt_tensor-0.0.1a15.dist-info}/RECORD +15 -12
- {lt_tensor-0.0.1a14.dist-info → lt_tensor-0.0.1a15.dist-info}/WHEEL +0 -0
- {lt_tensor-0.0.1a14.dist-info → lt_tensor-0.0.1a15.dist-info}/licenses/LICENSE +0 -0
- {lt_tensor-0.0.1a14.dist-info → lt_tensor-0.0.1a15.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
__all__ = ["iSTFTGenerator"]
|
2
2
|
from lt_utils.common import *
|
3
3
|
from lt_tensor.torch_commons import *
|
4
|
-
from lt_tensor.model_zoo.residual import ConvNets,
|
4
|
+
from lt_tensor.model_zoo.residual import ConvNets, ResBlocks1D, ResBlock1D, ResBlock1D2
|
5
5
|
|
6
6
|
|
7
7
|
class iSTFTGenerator(ConvNets):
|
@@ -20,6 +20,7 @@ class iSTFTGenerator(ConvNets):
|
|
20
20
|
n_fft: int = 16,
|
21
21
|
activation: nn.Module = nn.LeakyReLU(0.1),
|
22
22
|
hop_length: int = 256,
|
23
|
+
residual_cls: Union[ResBlock1D, ResBlock1D2] = ResBlock1D
|
23
24
|
):
|
24
25
|
super().__init__()
|
25
26
|
self.num_kernels = len(resblock_kernel_sizes)
|
@@ -37,6 +38,7 @@ class iSTFTGenerator(ConvNets):
|
|
37
38
|
upsample_initial_channel,
|
38
39
|
resblock_kernel_sizes,
|
39
40
|
resblock_dilation_sizes,
|
41
|
+
residual_cls
|
40
42
|
)
|
41
43
|
)
|
42
44
|
|
@@ -52,6 +54,7 @@ class iSTFTGenerator(ConvNets):
|
|
52
54
|
upsample_initial_channel: int,
|
53
55
|
resblock_kernel_sizes: List[Union[int, List[int]]],
|
54
56
|
resblock_dilation_sizes: List[int | List[int]],
|
57
|
+
residual: nn.Module
|
55
58
|
):
|
56
59
|
i, k, u = state
|
57
60
|
channels = upsample_initial_channel // (2 ** (i + 1))
|
@@ -69,11 +72,12 @@ class iSTFTGenerator(ConvNets):
|
|
69
72
|
)
|
70
73
|
).apply(self.init_weights),
|
71
74
|
),
|
72
|
-
residual=
|
75
|
+
residual=ResBlocks1D(
|
73
76
|
channels,
|
74
77
|
resblock_kernel_sizes,
|
75
78
|
resblock_dilation_sizes,
|
76
79
|
self.activation,
|
80
|
+
residual
|
77
81
|
),
|
78
82
|
)
|
79
83
|
)
|
@@ -15,6 +15,7 @@ from lt_tensor.model_zoo.discriminator import (
|
|
15
15
|
MultiPeriodDiscriminator,
|
16
16
|
MultiScaleDiscriminator,
|
17
17
|
)
|
18
|
+
from lt_tensor.model_zoo.residual import ResBlock1D2, ResBlock1D
|
18
19
|
|
19
20
|
|
20
21
|
def feature_loss(fmap_r, fmap_g):
|
@@ -66,6 +67,7 @@ class AudioSettings(ModelConfig):
|
|
66
67
|
scheduler_template: Callable[
|
67
68
|
[optim.Optimizer], optim.lr_scheduler.LRScheduler
|
68
69
|
] = lambda optimizer: optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.998),
|
70
|
+
residual_cls: Union[ResBlock1D, ResBlock1D2] = ResBlock1D,
|
69
71
|
):
|
70
72
|
self.in_channels = n_mels
|
71
73
|
self.upsample_rates = upsample_rates
|
@@ -81,6 +83,7 @@ class AudioSettings(ModelConfig):
|
|
81
83
|
self.lr = lr
|
82
84
|
self.adamw_betas = adamw_betas
|
83
85
|
self.scheduler_template = scheduler_template
|
86
|
+
self.residual_cls = residual_cls
|
84
87
|
|
85
88
|
|
86
89
|
class AudioDecoderTrainer(Model):
|
@@ -151,14 +154,20 @@ class AudioDecoderTrainer(Model):
|
|
151
154
|
return True
|
152
155
|
|
153
156
|
def update_schedulers_and_optimizer(self):
|
157
|
+
gc.collect()
|
158
|
+
self.g_optim = None
|
159
|
+
self.g_scheduler = None
|
160
|
+
gc.collect()
|
154
161
|
self.g_optim = optim.AdamW(
|
155
162
|
self.generator.parameters(),
|
156
163
|
lr=self.settings.lr,
|
157
164
|
betas=self.settings.adamw_betas,
|
158
165
|
)
|
166
|
+
gc.collect()
|
159
167
|
self.g_scheduler = self.settings.scheduler_template(self.g_optim)
|
160
168
|
if any([self.mpd is None, self.msd is None]):
|
161
169
|
return
|
170
|
+
gc.collect()
|
162
171
|
self.d_optim = optim.AdamW(
|
163
172
|
itertools.chain(self.mpd.parameters(), self.msd.parameters()),
|
164
173
|
lr=self.settings.lr,
|
@@ -250,9 +259,9 @@ class AudioDecoderTrainer(Model):
|
|
250
259
|
win_length=self.settings.n_fft,
|
251
260
|
)
|
252
261
|
if not return_dict:
|
253
|
-
return wave
|
262
|
+
return wave
|
254
263
|
return {
|
255
|
-
"wave": wave
|
264
|
+
"wave": wave,
|
256
265
|
"spec": spec,
|
257
266
|
"phase": phase,
|
258
267
|
}
|
@@ -286,8 +295,8 @@ class AudioDecoderTrainer(Model):
|
|
286
295
|
self.settings.n_fft,
|
287
296
|
hop_length=4,
|
288
297
|
win_length=self.settings.n_fft,
|
289
|
-
|
290
|
-
)
|
298
|
+
length=real_audio.shape[-1],
|
299
|
+
)
|
291
300
|
|
292
301
|
disc_kwargs = dict(
|
293
302
|
real_audio=real_audio,
|
@@ -373,7 +382,7 @@ class AudioDecoderTrainer(Model):
|
|
373
382
|
loss_fm_mpd = feature_loss(real_mpd_feats, fake_mpd_feats)
|
374
383
|
loss_fm_msd = feature_loss(real_msd_feats, fake_msd_feats)
|
375
384
|
|
376
|
-
loss_stft = self.audio_processor.stft_loss(fake_audio, real_audio) * stft_scale
|
385
|
+
# loss_stft = self.audio_processor.stft_loss(fake_audio, real_audio) * stft_scale
|
377
386
|
loss_mel = (
|
378
387
|
F.huber_loss(self.audio_processor.compute_mel(fake_audio), mels) * mel_scale
|
379
388
|
)
|
@@ -381,7 +390,7 @@ class AudioDecoderTrainer(Model):
|
|
381
390
|
|
382
391
|
loss_adv = (loss_adv_mpd + loss_adv_msd) * adv_scale
|
383
392
|
|
384
|
-
loss_g = loss_adv + loss_fm +
|
393
|
+
loss_g = loss_adv + loss_fm + loss_mel # + loss_stft
|
385
394
|
if not am_i_frozen:
|
386
395
|
loss_g.backward()
|
387
396
|
self.g_optim.step()
|
@@ -392,7 +401,7 @@ class AudioDecoderTrainer(Model):
|
|
392
401
|
"loss_d": loss_d,
|
393
402
|
"loss_adv": loss_adv.item(),
|
394
403
|
"loss_fm": loss_fm.item(),
|
395
|
-
"loss_stft": loss_stft.item(),
|
404
|
+
"loss_stft": 1.0, # loss_stft.item(),
|
396
405
|
"loss_mel": loss_mel.item(),
|
397
406
|
"lr_g": lr_g,
|
398
407
|
"lr_d": lr_d,
|
lt_tensor/model_zoo/residual.py
CHANGED
@@ -5,7 +5,9 @@ __all__ = [
|
|
5
5
|
"ResBlock2D",
|
6
6
|
"ResBlock1DShuffled",
|
7
7
|
"AdaResBlock1D",
|
8
|
-
"
|
8
|
+
"ResBlocks1D",
|
9
|
+
"ResBlock1D2",
|
10
|
+
"ShuffleBlock2D",
|
9
11
|
]
|
10
12
|
import math
|
11
13
|
from lt_utils.common import *
|
@@ -45,38 +47,8 @@ class ConvNets(Model):
|
|
45
47
|
m.weight.data.normal_(mean, std)
|
46
48
|
|
47
49
|
|
48
|
-
|
49
|
-
|
50
|
-
self,
|
51
|
-
channels: int,
|
52
|
-
resblock_kernel_sizes: List[Union[int, List[int]]] = [3, 7, 11],
|
53
|
-
resblock_dilation_sizes: List[Union[int, List[int]]] = [
|
54
|
-
[1, 3, 5],
|
55
|
-
[1, 3, 5],
|
56
|
-
[1, 3, 5],
|
57
|
-
],
|
58
|
-
activation: nn.Module = nn.LeakyReLU(0.1),
|
59
|
-
):
|
60
|
-
super().__init__()
|
61
|
-
self.num_kernels = len(resblock_kernel_sizes)
|
62
|
-
self.rb = nn.ModuleList()
|
63
|
-
self.activation = activation
|
64
|
-
|
65
|
-
for k, j in zip(resblock_kernel_sizes, resblock_dilation_sizes):
|
66
|
-
self.rb.append(ResBlock1D(channels, k, j, activation))
|
67
|
-
|
68
|
-
self.rb.apply(self.init_weights)
|
69
|
-
|
70
|
-
def forward(self, x: torch.Tensor):
|
71
|
-
xs = None
|
72
|
-
for i, block in enumerate(self.rb):
|
73
|
-
if i == 0:
|
74
|
-
xs = block(x)
|
75
|
-
else:
|
76
|
-
xs += block(x)
|
77
|
-
x = xs / self.num_kernels
|
78
|
-
return x
|
79
|
-
|
50
|
+
def get_padding(ks, d):
|
51
|
+
return int((ks * d - d) / 2)
|
80
52
|
|
81
53
|
|
82
54
|
class ResBlock1D(ConvNets):
|
@@ -92,14 +64,13 @@ class ResBlock1D(ConvNets):
|
|
92
64
|
self.conv_nets = nn.ModuleList(
|
93
65
|
[
|
94
66
|
self._get_conv_layer(i, channels, kernel_size, 1, dilation, activation)
|
95
|
-
for i in range(
|
67
|
+
for i in range(len(dilation))
|
96
68
|
]
|
97
69
|
)
|
98
70
|
self.conv_nets.apply(self.init_weights)
|
99
71
|
self.last_index = len(self.conv_nets) - 1
|
100
72
|
|
101
73
|
def _get_conv_layer(self, id, ch, k, stride, d, actv):
|
102
|
-
get_padding = lambda ks, d: int((ks * d - d) / 2)
|
103
74
|
return nn.Sequential(
|
104
75
|
actv, # 1
|
105
76
|
weight_norm(
|
@@ -126,16 +97,11 @@ class ResBlock1DShuffled(ConvNets):
|
|
126
97
|
kernel_size=3,
|
127
98
|
dilation=(1, 3, 5),
|
128
99
|
activation: nn.Module = nn.LeakyReLU(0.1),
|
129
|
-
add_channel_shuffle: bool = False, # requires pytorch 2.7.0 +
|
130
100
|
channel_shuffle_groups=1,
|
131
101
|
):
|
132
102
|
super().__init__()
|
133
103
|
|
134
|
-
self.channel_shuffle = (
|
135
|
-
nn.ChannelShuffle(channel_shuffle_groups)
|
136
|
-
if add_channel_shuffle
|
137
|
-
else nn.Identity()
|
138
|
-
)
|
104
|
+
self.channel_shuffle = nn.ChannelShuffle(channel_shuffle_groups)
|
139
105
|
|
140
106
|
self.conv_nets = nn.ModuleList(
|
141
107
|
[
|
@@ -171,29 +137,67 @@ class ResBlock1DShuffled(ConvNets):
|
|
171
137
|
class ResBlock2D(Model):
|
172
138
|
def __init__(
|
173
139
|
self,
|
174
|
-
in_channels,
|
175
|
-
out_channels,
|
176
|
-
|
140
|
+
in_channels: int,
|
141
|
+
out_channels: Optional[int] = None,
|
142
|
+
hidden_dim: int = 32,
|
143
|
+
downscale: bool = False,
|
144
|
+
activation: nn.Module = nn.LeakyReLU(0.2),
|
177
145
|
):
|
178
146
|
super().__init__()
|
179
|
-
stride = 2 if
|
147
|
+
stride = 2 if downscale else 1
|
148
|
+
if out_channels is None:
|
149
|
+
out_channels = in_channels
|
180
150
|
|
181
151
|
self.block = nn.Sequential(
|
182
|
-
nn.Conv2d(in_channels,
|
183
|
-
|
184
|
-
nn.Conv2d(
|
152
|
+
nn.Conv2d(in_channels, hidden_dim, 3, stride, 1),
|
153
|
+
activation,
|
154
|
+
nn.Conv2d(hidden_dim, hidden_dim, 7, 1, 3),
|
155
|
+
activation,
|
156
|
+
nn.Conv2d(hidden_dim, out_channels, 3, 1, 1),
|
185
157
|
)
|
186
158
|
|
187
159
|
self.skip = nn.Identity()
|
188
|
-
if
|
160
|
+
if downscale or in_channels != out_channels:
|
189
161
|
self.skip = spectral_norm_select(
|
190
162
|
nn.Conv2d(in_channels, out_channels, 1, stride)
|
191
163
|
)
|
192
|
-
# on less to be handled every
|
164
|
+
# on less to be handled every cycle
|
193
165
|
self.sqrt_2 = math.sqrt(2)
|
194
166
|
|
195
167
|
def forward(self, x: Tensor):
|
196
|
-
return (self.block(x) + self.skip(x)) / self.sqrt_2
|
168
|
+
return x + ((self.block(x) + self.skip(x)) / self.sqrt_2)
|
169
|
+
|
170
|
+
|
171
|
+
class ShuffleBlock2D(ConvNets):
|
172
|
+
def __init__(
|
173
|
+
self,
|
174
|
+
channels: int,
|
175
|
+
out_channels: Optional[int] = None,
|
176
|
+
hidden_dim: int = 32,
|
177
|
+
downscale: bool = False,
|
178
|
+
activation: nn.Module = nn.LeakyReLU(0.1),
|
179
|
+
):
|
180
|
+
super().__init__()
|
181
|
+
if out_channels is None:
|
182
|
+
out_channels = channels
|
183
|
+
self.shuffle = nn.ChannelShuffle(groups=2)
|
184
|
+
self.ch_split = lambda tensor: torch.split(tensor, 1, dim=1)
|
185
|
+
self.activation = activation
|
186
|
+
self.resblock_2d = ResBlock2D(
|
187
|
+
channels, out_channels, hidden_dim, downscale, activation
|
188
|
+
)
|
189
|
+
|
190
|
+
def shuffle_channels(self, tensor: torch.Tensor):
|
191
|
+
with torch.no_grad():
|
192
|
+
x = F.channel_shuffle(tensor.transpose(1, -1), tensor.shape[1]).transpose(
|
193
|
+
-1, 1
|
194
|
+
)
|
195
|
+
return self.ch_split(x)
|
196
|
+
|
197
|
+
def forward(self, x: torch.Tensor):
|
198
|
+
ch1, ch2 = self.shuffle_channels(x)
|
199
|
+
ch2 = self.resblock_2d(ch2)
|
200
|
+
return torch.cat((ch1, ch2), dim=1)
|
197
201
|
|
198
202
|
|
199
203
|
class AdaResBlock1D(ConvNets):
|
@@ -207,46 +211,111 @@ class AdaResBlock1D(ConvNets):
|
|
207
211
|
):
|
208
212
|
super().__init__()
|
209
213
|
|
214
|
+
self.alpha1 = nn.ModuleList()
|
215
|
+
self.alpha2 = nn.ModuleList()
|
210
216
|
self.conv_nets = nn.ModuleList(
|
211
217
|
[
|
212
218
|
self._get_conv_layer(
|
213
|
-
|
219
|
+
d,
|
214
220
|
res_block_channels,
|
215
221
|
ada_channel_in,
|
216
222
|
kernel_size,
|
217
|
-
1,
|
218
|
-
dilation,
|
219
223
|
)
|
220
|
-
for
|
224
|
+
for d in dilation
|
221
225
|
]
|
222
226
|
)
|
223
227
|
self.conv_nets.apply(self.init_weights)
|
224
228
|
self.last_index = len(self.conv_nets) - 1
|
225
229
|
self.activation = activation
|
226
230
|
|
227
|
-
def _get_conv_layer(self,
|
228
|
-
|
231
|
+
def _get_conv_layer(self, d, ch, ada_ch, k):
|
232
|
+
self.alpha1.append(nn.Parameter(torch.ones(1, ada_ch, 1)))
|
233
|
+
self.alpha2.append(nn.Parameter(torch.ones(1, ada_ch, 1)))
|
229
234
|
return nn.ModuleDict(
|
230
235
|
dict(
|
231
236
|
norm1=AdaFusion1D(ada_ch, ch),
|
232
237
|
norm2=AdaFusion1D(ada_ch, ch),
|
233
|
-
alpha1=nn.Parameter(torch.ones(1, ada_ch, 1)),
|
234
|
-
alpha2=nn.Parameter(torch.ones(1, ada_ch, 1)),
|
235
238
|
conv1=weight_norm(
|
236
239
|
nn.Conv1d(
|
237
|
-
ch, ch, k,
|
240
|
+
ch, ch, k, 1, dilation=d, padding=get_padding(k, d)
|
238
241
|
)
|
239
242
|
), # 2
|
240
243
|
conv2=weight_norm(
|
241
|
-
nn.Conv1d(ch, ch, k,
|
244
|
+
nn.Conv1d(ch, ch, k, 1, dilation=1, padding=get_padding(k, 1))
|
242
245
|
), # 4
|
243
246
|
)
|
244
247
|
)
|
245
248
|
|
246
249
|
def forward(self, x: torch.Tensor, y: torch.Tensor):
|
247
|
-
for cnn in self.conv_nets:
|
248
|
-
xt = self.activation(cnn["norm1"](x, y,
|
250
|
+
for i, cnn in enumerate(self.conv_nets):
|
251
|
+
xt = self.activation(cnn["norm1"](x, y, self.alpha1[i]))
|
249
252
|
xt = cnn["conv1"](xt)
|
250
|
-
xt = self.activation(cnn["norm2"](xt, y,
|
253
|
+
xt = self.activation(cnn["norm2"](xt, y, self.alpha2[i]))
|
251
254
|
x = cnn["conv2"](xt) + x
|
252
255
|
return x
|
256
|
+
|
257
|
+
|
258
|
+
class ResBlock1D2(ConvNets):
|
259
|
+
def __init__(
|
260
|
+
self,
|
261
|
+
channels,
|
262
|
+
kernel_size=3,
|
263
|
+
dilation=(1, 3, 5),
|
264
|
+
activation: nn.Module = nn.LeakyReLU(0.1),
|
265
|
+
):
|
266
|
+
super().__init__()
|
267
|
+
self.convs = nn.ModuleList(
|
268
|
+
[
|
269
|
+
weight_norm(
|
270
|
+
nn.Conv1d(
|
271
|
+
channels,
|
272
|
+
channels,
|
273
|
+
kernel_size,
|
274
|
+
dilation=d,
|
275
|
+
padding=get_padding(kernel_size, d),
|
276
|
+
)
|
277
|
+
)
|
278
|
+
for d in range(dilation)
|
279
|
+
]
|
280
|
+
)
|
281
|
+
self.convs.apply(self.init_weights)
|
282
|
+
self.activation = activation
|
283
|
+
|
284
|
+
def forward(self, x):
|
285
|
+
for c in self.convs:
|
286
|
+
xt = c(self.activation(x))
|
287
|
+
x = xt + x
|
288
|
+
return x
|
289
|
+
|
290
|
+
|
291
|
+
class ResBlocks1D(ConvNets):
|
292
|
+
def __init__(
|
293
|
+
self,
|
294
|
+
channels: int,
|
295
|
+
resblock_kernel_sizes: List[Union[int, List[int]]] = [3, 7, 11],
|
296
|
+
resblock_dilation_sizes: List[Union[int, List[int]]] = [
|
297
|
+
[1, 3, 5],
|
298
|
+
[1, 3, 5],
|
299
|
+
[1, 3, 5],
|
300
|
+
],
|
301
|
+
activation: nn.Module = nn.LeakyReLU(0.1),
|
302
|
+
block: Union[ResBlock1D, ResBlock1D2] = ResBlock1D,
|
303
|
+
):
|
304
|
+
super().__init__()
|
305
|
+
self.num_kernels = len(resblock_kernel_sizes)
|
306
|
+
self.rb = nn.ModuleList()
|
307
|
+
self.activation = activation
|
308
|
+
|
309
|
+
for k, j in zip(resblock_kernel_sizes, resblock_dilation_sizes):
|
310
|
+
self.rb.append(block(channels, k, j, activation))
|
311
|
+
|
312
|
+
self.rb.apply(self.init_weights)
|
313
|
+
|
314
|
+
def forward(self, x: torch.Tensor):
|
315
|
+
xs = None
|
316
|
+
for i, block in enumerate(self.rb):
|
317
|
+
if i == 0:
|
318
|
+
xs = block(x)
|
319
|
+
else:
|
320
|
+
xs += block(x)
|
321
|
+
return xs / self.num_kernels
|
@@ -4,29 +4,32 @@ lt_tensor/losses.py,sha256=zvkCOnE5XpF3v6ymivRIdqPTsMM5zc94ZMom7YDi3zM,4946
|
|
4
4
|
lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
|
5
5
|
lt_tensor/math_ops.py,sha256=TkD4WQG42KsQ9Fg7FXOjf8f-ixtW0apf2XjaooecVx4,2257
|
6
6
|
lt_tensor/misc_utils.py,sha256=S57M5XuGsIuaOKnEGZJsY3B2dTmggpdhsqQr51CQsYo,28754
|
7
|
-
lt_tensor/model_base.py,sha256=
|
7
|
+
lt_tensor/model_base.py,sha256=qqqIVpYz6nv01MnZuuAj1dxq4_NN-zSivP1GaegA9TI,21597
|
8
8
|
lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
|
9
9
|
lt_tensor/noise_tools.py,sha256=wFeAsHhLhSlEc5XU5LbFKaXoHeVxrWjiMeljjGdIKyM,11363
|
10
10
|
lt_tensor/torch_commons.py,sha256=fntsEU8lhBQo0ebonI1iXBkMbWMN3HpBsG13EWlP5s8,718
|
11
11
|
lt_tensor/transform.py,sha256=dZm8T_ov0blHMQu6nGiehsdG1VSB7bZBUVmTkT-PBdc,13257
|
12
12
|
lt_tensor/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
|
-
lt_tensor/datasets/audio.py,sha256=
|
13
|
+
lt_tensor/datasets/audio.py,sha256=5Wvz1BJ7xXkLYpVLLw9RY3X3RgMdPPeGiN0-MmJDQy0,8045
|
14
14
|
lt_tensor/model_zoo/__init__.py,sha256=RzG7fltZLyiIU_Za4pgfBPli5uPITiJkq4sTCd4uA_0,319
|
15
15
|
lt_tensor/model_zoo/basic.py,sha256=_26H_jJk5Ld3DZiNpIhGosGfMxoFDZrI8bpDAYUOYno,10660
|
16
|
-
lt_tensor/model_zoo/discriminator.py,sha256=
|
17
|
-
lt_tensor/model_zoo/features.py,sha256=
|
16
|
+
lt_tensor/model_zoo/discriminator.py,sha256=_HrgseU3KO_6ONNjISxkp6-9pRseVZr43x8NYxIq1Xg,9989
|
17
|
+
lt_tensor/model_zoo/features.py,sha256=DO8dlE0kmPKTNC1Xkv9wKegOOYkQa_rkxM4hhcNwJWA,15655
|
18
18
|
lt_tensor/model_zoo/fusion.py,sha256=usC1bcjQRNivDc8xzkIS5T1glm78OLcs2V_tPqfp-eI,5422
|
19
19
|
lt_tensor/model_zoo/pos_encoder.py,sha256=3d1EYLinCU9UAy-WuEWeYMGhMqaGknCiQ5qEmhw_UYM,4487
|
20
|
-
lt_tensor/model_zoo/residual.py,sha256=
|
20
|
+
lt_tensor/model_zoo/residual.py,sha256=i5V4ju7DB3WesKBVm6KH_LyPoKGDUOyo2Usfs-PyP58,9394
|
21
21
|
lt_tensor/model_zoo/transformer.py,sha256=HUFoFFh7EQJErxdd9XIxhssdjvNVx2tNGDJOTUfwG2A,4301
|
22
|
+
lt_tensor/model_zoo/diffwave/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
+
lt_tensor/model_zoo/diffwave/model.py,sha256=RwrJd7ZZ2uQdLid_m8-wbwEJ7l2gqukq2MSjbquN_Pg,6832
|
24
|
+
lt_tensor/model_zoo/diffwave/params.py,sha256=91aaBWNfWU-q3POS3TbNgdmhw5RAayoLudVNblM8ixU,1719
|
22
25
|
lt_tensor/model_zoo/istft/__init__.py,sha256=SV96w9WUWfHMee8Vjgn2MP0igKft7_mLTju9rFVYGHY,102
|
23
|
-
lt_tensor/model_zoo/istft/generator.py,sha256=
|
26
|
+
lt_tensor/model_zoo/istft/generator.py,sha256=R5Wym4Bocx1T5ijyETQe1thx4uY9ulMwcHqgsGG3h-0,3364
|
24
27
|
lt_tensor/model_zoo/istft/sg.py,sha256=EaEi3otw_uY5QfqDBNIWBWTJSg3KnwzzR4FBr0u09C0,4838
|
25
|
-
lt_tensor/model_zoo/istft/trainer.py,sha256=
|
28
|
+
lt_tensor/model_zoo/istft/trainer.py,sha256=WAoySxxuyJtMDt2q0kGbaJT19vAduHyxYwBo4TTU_LM,21302
|
26
29
|
lt_tensor/processors/__init__.py,sha256=4b9MxAJolXiJfSm20ZEspQTDm1tgLazwlPWA_jB1yLM,63
|
27
30
|
lt_tensor/processors/audio.py,sha256=SMqNSl4Den-x1awTCQ8-TcR-0jPiv5lDaUpU93SRRaw,14749
|
28
|
-
lt_tensor-0.0.
|
29
|
-
lt_tensor-0.0.
|
30
|
-
lt_tensor-0.0.
|
31
|
-
lt_tensor-0.0.
|
32
|
-
lt_tensor-0.0.
|
31
|
+
lt_tensor-0.0.1a15.dist-info/licenses/LICENSE,sha256=HUnu_iSPpnDfZS_PINhO3AoVizJD1A2vee8WX7D7uXo,11358
|
32
|
+
lt_tensor-0.0.1a15.dist-info/METADATA,sha256=RKfh13pzXJQtBwVMoXyqizQfshD7gFyC1491UCfSFP8,1033
|
33
|
+
lt_tensor-0.0.1a15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
34
|
+
lt_tensor-0.0.1a15.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
|
35
|
+
lt_tensor-0.0.1a15.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|