lt-tensor 0.0.1a22__py3-none-any.whl → 0.0.1a27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,20 +1,21 @@
1
1
  lt_tensor/__init__.py,sha256=8FTxpJ6td2bMr_GqzW2tCV6Tr5CelbQle8N5JRWtx8M,439
2
- lt_tensor/config_templates.py,sha256=RP7EFVRj6mRUj6xDLe7FMXgN5TIo8_o9h1Kb8epdmfo,2825
2
+ lt_tensor/config_templates.py,sha256=QVix17g5usoSxNPaQiwuvzpWahc6YYjFwqNHzISq3dA,2857
3
3
  lt_tensor/losses.py,sha256=zvkCOnE5XpF3v6ymivRIdqPTsMM5zc94ZMom7YDi3zM,4946
4
4
  lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
5
5
  lt_tensor/math_ops.py,sha256=TkD4WQG42KsQ9Fg7FXOjf8f-ixtW0apf2XjaooecVx4,2257
6
- lt_tensor/misc_utils.py,sha256=S57M5XuGsIuaOKnEGZJsY3B2dTmggpdhsqQr51CQsYo,28754
7
- lt_tensor/model_base.py,sha256=GvmQdt97ZSfOObBpBIq7UUTwpIE1g-aBm23za36YA0M,18431
6
+ lt_tensor/misc_utils.py,sha256=N2r3UmxC4RM2BZBQhpjDZ_BKLrzsyIlKzopTzJbnjFU,28962
7
+ lt_tensor/model_base.py,sha256=DTg44N6eTXLmpIAj_ac29-M5dI_iY_sC0yA_K3E13GI,17446
8
8
  lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
9
9
  lt_tensor/noise_tools.py,sha256=wFeAsHhLhSlEc5XU5LbFKaXoHeVxrWjiMeljjGdIKyM,11363
10
10
  lt_tensor/torch_commons.py,sha256=8l0bxmrAzwvyqjivCIVISXlbvKarlg4DdE0BOGSnMuQ,812
11
11
  lt_tensor/transform.py,sha256=dZm8T_ov0blHMQu6nGiehsdG1VSB7bZBUVmTkT-PBdc,13257
12
12
  lt_tensor/model_zoo/__init__.py,sha256=ltVTvmOlbOCfDc5Trvg0-Ta_Ujgkw0UVF9V5rqHx-RI,378
13
13
  lt_tensor/model_zoo/basic.py,sha256=pI8HyiHK-cmWcEEaVY_EduUJOjZW6HOtXvJd8Rbhq30,15452
14
+ lt_tensor/model_zoo/convs.py,sha256=YQRxek75Qpsha8nfc7wLhmJS9XxPeCa4WxuftLg6IcE,3927
14
15
  lt_tensor/model_zoo/features.py,sha256=DO8dlE0kmPKTNC1Xkv9wKegOOYkQa_rkxM4hhcNwJWA,15655
15
16
  lt_tensor/model_zoo/fusion.py,sha256=usC1bcjQRNivDc8xzkIS5T1glm78OLcs2V_tPqfp-eI,5422
16
17
  lt_tensor/model_zoo/pos_encoder.py,sha256=3d1EYLinCU9UAy-WuEWeYMGhMqaGknCiQ5qEmhw_UYM,4487
17
- lt_tensor/model_zoo/residual.py,sha256=hNI22Tl-NWSOawBrRJ921wHvQjoYiny10Ikp2F5LzJM,10632
18
+ lt_tensor/model_zoo/residual.py,sha256=tMXgif9Ggep9bk75K93yueeU5vk5S25AGCRFwOQOyB8,6452
18
19
  lt_tensor/model_zoo/transformer.py,sha256=HUFoFFh7EQJErxdd9XIxhssdjvNVx2tNGDJOTUfwG2A,4301
19
20
  lt_tensor/model_zoo/activations/alias_free_torch/__init__.py,sha256=ovguP4wzQEDNguczwiZnhMm4dRRVcvnzmHrfQtlRCNQ,15
20
21
  lt_tensor/model_zoo/activations/alias_free_torch/act.py,sha256=h79C93GzbSrCq4ui6iO7DjJLuJ7QK_ag_TU-WAcj0NI,1405
@@ -22,15 +23,13 @@ lt_tensor/model_zoo/activations/alias_free_torch/filter.py,sha256=5TvXESv31toD5s
22
23
  lt_tensor/model_zoo/activations/alias_free_torch/resample.py,sha256=3iM4fNr9fLNXXMyXvzW-MwkSjOZOrMZLfS80UHs6zk0,3386
23
24
  lt_tensor/model_zoo/activations/snake/__init__.py,sha256=Adb_xe-7YdYsNxvlSSO9zkae-cu7ElxkBKE3trDtOus,4517
24
25
  lt_tensor/model_zoo/audio_models/__init__.py,sha256=MoG9YjxLyvscq_6njK1ljGBletK9iedBXt66bplzW-s,83
25
- lt_tensor/model_zoo/audio_models/bigvgan/__init__.py,sha256=OlQ1SJl0rz_ne1GC32WJ8vXdxLMsJcuBo45EMDyI1tM,19348
26
- lt_tensor/model_zoo/audio_models/bigvgan/cuda/__init__.py,sha256=tGyPDiblFrzrF9PJccguJtx1WO-D0HZCT92QyblyLD8,4934
27
- lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=OUyh421xRCcxOMi_Ek6Ak3-FPe1k6WTDQ-6gd6OjaCU,8091
28
- lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=WSeZ8PpbWSoG6mYd5zLb6403qasn8C_J1jPaqv3dt3s,18247
29
- lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=ZLI6nJ98JJFl4iqUz54M1KU4IGTanSQDjWRKEm0cmTM,19294
30
- lt_tensor/processors/__init__.py,sha256=4b9MxAJolXiJfSm20ZEspQTDm1tgLazwlPWA_jB1yLM,63
31
- lt_tensor/processors/audio.py,sha256=wZYTjAKZlHGfiHpxvhCq_JAiTx70a34FQv5z1n9e7mM,14791
32
- lt_tensor-0.0.1a22.dist-info/licenses/LICENSE,sha256=tQHc38scHOba4kDBNG4U0U6PpObaloiZG-FvKSgv2b0,11336
33
- lt_tensor-0.0.1a22.dist-info/METADATA,sha256=H7AnERefauaZBVg3rGaC9i0AdWmuOjOQ_SlFenicHg0,1062
34
- lt_tensor-0.0.1a22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
35
- lt_tensor-0.0.1a22.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
36
- lt_tensor-0.0.1a22.dist-info/RECORD,,
26
+ lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=PDuDYN1omD1RoAXcmxH3tEgfAuM3ZHAWzimD6ElMqEQ,9073
27
+ lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=7GJqKLw7-juXpfp5IFzjASLut0uouDhjZ1CQknf3H68,16533
28
+ lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=ltIuD9t1gmS3bTmCqZIwJHKrhC6DYya3OaXlskWX9kw,17606
29
+ lt_tensor/processors/__init__.py,sha256=Pvxhh0KR65zLCgUd53_k5Z0y5JWWcO0ZBXFK9rv0o5w,109
30
+ lt_tensor/processors/audio.py,sha256=mZY7LOeYACnX8PLz5AeFe0zqEebPoN-Q44Bi3yrlZMQ,16881
31
+ lt_tensor-0.0.1a27.dist-info/licenses/LICENSE,sha256=tQHc38scHOba4kDBNG4U0U6PpObaloiZG-FvKSgv2b0,11336
32
+ lt_tensor-0.0.1a27.dist-info/METADATA,sha256=NpXqioPXZMvXo-HzhXrS6O1qiftDnoc8ZzOfhfUMBaY,1062
33
+ lt_tensor-0.0.1a27.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
+ lt_tensor-0.0.1a27.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
35
+ lt_tensor-0.0.1a27.dist-info/RECORD,,
@@ -1,536 +0,0 @@
1
- __all__ = ["BigVGANConfig", "BigVGAN"]
2
-
3
- import numpy as np
4
- from lt_tensor.torch_commons import *
5
- from torch.nn import functional as F
6
- from lt_tensor.config_templates import ModelConfig
7
- from lt_tensor.torch_commons import *
8
- from lt_tensor.model_base import Model
9
- from math import sqrt
10
- from lt_utils.common import *
11
- from lt_utils.file_ops import load_json, is_file, is_dir, is_path_valid
12
- from lt_tensor.model_zoo.residual import ConvNets
13
- from lt_tensor.model_zoo.activations.snake import snake as activations
14
- from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
15
- from lt_tensor.model_zoo.activations.alias_free_torch.act import (
16
- Activation1d as TorchActivation1d,
17
- )
18
-
19
-
20
- MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases)
21
-
22
-
23
- def get_padding(kernel_size, dilation=1):
24
- return int((kernel_size * dilation - dilation) / 2)
25
-
26
-
27
- class BigVGANConfig(ModelConfig):
28
-
29
- def __init__(
30
- self,
31
- resblock: int = 0,
32
- num_gpus: int = 0,
33
- batch_size: int = 32,
34
- learning_rate: float = 0.0001,
35
- adam_b1: float = 0.8,
36
- adam_b2: float = 0.99,
37
- lr_decay: float = 0.999,
38
- seed: int = 1234,
39
- upsample_rates: list[int] = [8, 8, 2, 2],
40
- upsample_kernel_sizes: list[int] = [16, 16, 4, 4],
41
- upsample_initial_channel: int = 512,
42
- resblock_kernel_sizes: list[int] = [3, 7, 11],
43
- resblock_dilation_sizes: list[int] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
44
- activation: str = "snakebeta",
45
- snake_logscale: bool = True,
46
- discriminator: str = "mrd",
47
- resolutions: list[list[int]] = [
48
- [1024, 120, 600],
49
- [2048, 240, 1200],
50
- [512, 50, 240],
51
- ],
52
- mpd_reshapes: list[int] = [2, 3, 5, 7, 11],
53
- use_spectral_norm: bool = False,
54
- discriminator_channel_mult: int = 1,
55
- segment_size: int = 8192,
56
- num_mels: int = 80,
57
- num_freq: int = 1025,
58
- n_fft: int = 1024,
59
- hop_size: int = 256,
60
- win_size: int = 1024,
61
- sampling_rate: int = 22050,
62
- use_cuda_kernel: bool = False,
63
- ):
64
- self.resblock = resblock
65
- self.num_gpus = num_gpus
66
- self.batch_size = batch_size
67
- self.learning_rate = learning_rate
68
- self.adam_b1 = adam_b1
69
- self.adam_b2 = adam_b2
70
- self.lr_decay = lr_decay
71
- self.seed = seed
72
- self.upsample_rates = upsample_rates
73
- self.upsample_kernel_sizes = upsample_kernel_sizes
74
- self.upsample_initial_channel = upsample_initial_channel
75
- self.resblock_kernel_sizes = resblock_kernel_sizes
76
- self.resblock_dilation_sizes = resblock_dilation_sizes
77
- self.activation = activation
78
- self.snake_logscale = snake_logscale
79
- self.discriminator = discriminator
80
- self.resolutions = resolutions
81
- self.mpd_reshapes = mpd_reshapes
82
- self.use_spectral_norm = use_spectral_norm
83
- self.discriminator_channel_mult = discriminator_channel_mult
84
- self.segment_size = segment_size
85
- self.num_mels = num_mels
86
- self.num_freq = num_freq
87
- self.n_fft = n_fft
88
- self.hop_size = hop_size
89
- self.win_size = win_size
90
- self.sampling_rate = sampling_rate
91
- self.use_cuda_kernel = use_cuda_kernel
92
-
93
-
94
- class AMPBlock1(ConvNets):
95
- """
96
- AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
97
- AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
98
- Args:
99
- h (BigVGANConfig): Hyperparameters.
100
- channels (int): Number of convolution channels.
101
- kernel_size (int): Size of the convolution kernel. Default is 3.
102
- dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
103
- activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
104
- """
105
-
106
- def __init__(
107
- self,
108
- h: BigVGANConfig,
109
- channels: int,
110
- kernel_size: int = 3,
111
- dilation: tuple = (1, 3, 5),
112
- activation: str = None,
113
- ):
114
- super().__init__()
115
-
116
- self.h = h
117
-
118
- self.convs1 = nn.ModuleList(
119
- [
120
- weight_norm(
121
- nn.Conv1d(
122
- channels,
123
- channels,
124
- kernel_size,
125
- stride=1,
126
- dilation=d,
127
- padding=get_padding(kernel_size, d),
128
- )
129
- )
130
- for d in dilation
131
- ]
132
- )
133
- self.convs1.apply(self.init_weights)
134
-
135
- self.convs2 = nn.ModuleList(
136
- [
137
- weight_norm(
138
- nn.Conv1d(
139
- channels,
140
- channels,
141
- kernel_size,
142
- stride=1,
143
- dilation=1,
144
- padding=get_padding(kernel_size, 1),
145
- )
146
- )
147
- for _ in range(len(dilation))
148
- ]
149
- )
150
- self.convs2.apply(self.init_weights)
151
-
152
- self.num_layers = len(self.convs1) + len(
153
- self.convs2
154
- ) # Total number of conv layers
155
-
156
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
157
- if self.h.use_cuda_kernel:
158
- from .cuda import (
159
- Activation1d as CudaActivation1d,
160
- )
161
-
162
- Activation1d = CudaActivation1d
163
- else:
164
- Activation1d = TorchActivation1d
165
-
166
- # Activation functions
167
- if activation == "snake":
168
- self.activations = nn.ModuleList(
169
- [
170
- Activation1d(
171
- activation=activations.Snake(
172
- channels, alpha_logscale=h.snake_logscale
173
- )
174
- )
175
- for _ in range(self.num_layers)
176
- ]
177
- )
178
- elif activation == "snakebeta":
179
- self.activations = nn.ModuleList(
180
- [
181
- Activation1d(
182
- activation=activations.SnakeBeta(
183
- channels, alpha_logscale=h.snake_logscale
184
- )
185
- )
186
- for _ in range(self.num_layers)
187
- ]
188
- )
189
- else:
190
- raise NotImplementedError(
191
- "activation incorrectly specified. check the config file and look for 'activation'."
192
- )
193
-
194
- def forward(self, x):
195
- acts1, acts2 = self.activations[::2], self.activations[1::2]
196
- for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
197
- xt = a1(x)
198
- xt = c1(xt)
199
- xt = a2(xt)
200
- xt = c2(xt)
201
- x = xt + x
202
-
203
- return x
204
-
205
- def remove_weight_norm(self):
206
- for l in self.convs1:
207
- remove_weight_norm(l)
208
- for l in self.convs2:
209
- remove_weight_norm(l)
210
-
211
-
212
- class AMPBlock2(ConvNets):
213
- """
214
- AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
215
- Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
216
- Args:
217
- h (AttrDict): Hyperparameters.
218
- channels (int): Number of convolution channels.
219
- kernel_size (int): Size of the convolution kernel. Default is 3.
220
- dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
221
- activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
222
- """
223
-
224
- def __init__(
225
- self,
226
- h: BigVGANConfig,
227
- channels: int,
228
- kernel_size: int = 3,
229
- dilation: tuple = (1, 3, 5),
230
- activation: str = None,
231
- ):
232
- super().__init__()
233
-
234
- self.h = h
235
-
236
- self.convs = nn.ModuleList(
237
- [
238
- weight_norm(
239
- nn.Conv1d(
240
- channels,
241
- channels,
242
- kernel_size,
243
- stride=1,
244
- dilation=d,
245
- padding=get_padding(kernel_size, d),
246
- )
247
- )
248
- for d in dilation
249
- ]
250
- )
251
- self.convs.apply(self.init_weights)
252
-
253
- self.num_layers = len(self.convs) # Total number of conv layers
254
-
255
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
256
- if self.h.use_cuda_kernel:
257
- from .cuda import (
258
- Activation1d as CudaActivation1d,
259
- )
260
-
261
- Activation1d = CudaActivation1d
262
- else:
263
- Activation1d = TorchActivation1d
264
-
265
- # Activation functions
266
- if activation == "snake":
267
- self.activations = nn.ModuleList(
268
- [
269
- Activation1d(
270
- activation=activations.Snake(
271
- channels, alpha_logscale=h.snake_logscale
272
- )
273
- )
274
- for _ in range(self.num_layers)
275
- ]
276
- )
277
- elif activation == "snakebeta":
278
- self.activations = nn.ModuleList(
279
- [
280
- Activation1d(
281
- activation=activations.SnakeBeta(
282
- channels, alpha_logscale=h.snake_logscale
283
- )
284
- )
285
- for _ in range(self.num_layers)
286
- ]
287
- )
288
- else:
289
- raise NotImplementedError(
290
- "activation incorrectly specified. check the config file and look for 'activation'."
291
- )
292
-
293
- def forward(self, x):
294
- for c, a in zip(self.convs, self.activations):
295
- xt = a(x)
296
- xt = c(xt)
297
- x = xt + x
298
-
299
- def remove_weight_norm(self):
300
- for l in self.convs:
301
- remove_weight_norm(l)
302
-
303
-
304
- class BigVGAN(ConvNets):
305
- """
306
- BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks).
307
- New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks.
308
- Args:
309
- h (AttrDict): Hyperparameters.
310
- use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels.
311
- Note:
312
- - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported.
313
- - Ensure that the activation function is correctly specified in the hyperparameters (h.activation).
314
- """
315
-
316
- def __init__(self, h: BigVGANConfig):
317
- super().__init__()
318
- self.h = h
319
-
320
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
321
- if self.h.use_cuda_kernel:
322
- from .cuda import (
323
- Activation1d as CudaActivation1d,
324
- )
325
-
326
- Activation1d = CudaActivation1d
327
- else:
328
- Activation1d = TorchActivation1d
329
-
330
- self.num_kernels = len(h.resblock_kernel_sizes)
331
- self.num_upsamples = len(h.upsample_rates)
332
-
333
- # Pre-conv
334
- self.conv_pre = weight_norm(
335
- nn.Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)
336
- )
337
-
338
- # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
339
- if h.resblock == 0:
340
- resblock_class = AMPBlock1
341
- elif h.resblock == 1:
342
- resblock_class = AMPBlock2
343
- else:
344
- raise ValueError(
345
- f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}"
346
- )
347
-
348
- # Transposed conv-based upsamplers. does not apply anti-aliasing
349
- self.ups = nn.ModuleList()
350
- for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
351
- self.ups.append(
352
- nn.ModuleList(
353
- [
354
- weight_norm(
355
- nn.ConvTranspose1d(
356
- h.upsample_initial_channel // (2**i),
357
- h.upsample_initial_channel // (2 ** (i + 1)),
358
- k,
359
- u,
360
- padding=(k - u) // 2,
361
- )
362
- )
363
- ]
364
- )
365
- )
366
-
367
- # Residual blocks using anti-aliased multi-periodicity composition modules (AMP)
368
- self.resblocks = nn.ModuleList()
369
- for i in range(len(self.ups)):
370
- ch = h.upsample_initial_channel // (2 ** (i + 1))
371
- for j, (k, d) in enumerate(
372
- zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)
373
- ):
374
- self.resblocks.append(
375
- resblock_class(h, ch, k, d, activation=h.activation)
376
- )
377
-
378
- # Post-conv
379
- activation_post = (
380
- activations.Snake(ch, alpha_logscale=h.snake_logscale)
381
- if h.activation == "snake"
382
- else (
383
- activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
384
- if h.activation == "snakebeta"
385
- else None
386
- )
387
- )
388
- if activation_post is None:
389
- raise NotImplementedError(
390
- "activation incorrectly specified. check the config file and look for 'activation'."
391
- )
392
-
393
- self.activation_post = Activation1d(activation=activation_post)
394
-
395
- # Whether to use bias for the final conv_post. Default to True for backward compatibility
396
- self.use_bias_at_final = h.get("use_bias_at_final", True)
397
- self.conv_post = weight_norm(
398
- nn.Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)
399
- )
400
-
401
- # Weight initialization
402
- for i in range(len(self.ups)):
403
- self.ups[i].apply(self.init_weights)
404
- self.conv_post.apply(self.init_weights)
405
-
406
- # Final tanh activation. Defaults to True for backward compatibility
407
- self.use_tanh_at_final = h.get("use_tanh_at_final", True)
408
-
409
- def forward(self, x):
410
- # Pre-conv
411
- x = self.conv_pre(x)
412
-
413
- for i in range(self.num_upsamples):
414
- # Upsampling
415
- for i_up in range(len(self.ups[i])):
416
- x = self.ups[i][i_up](x)
417
- # AMP blocks
418
- xs = None
419
- for j in range(self.num_kernels):
420
- if xs is None:
421
- xs = self.resblocks[i * self.num_kernels + j](x)
422
- else:
423
- xs += self.resblocks[i * self.num_kernels + j](x)
424
- x = xs / self.num_kernels
425
-
426
- # Post-conv
427
- x = self.activation_post(x)
428
- x = self.conv_post(x)
429
- # Final tanh activation
430
- if self.use_tanh_at_final:
431
- x = torch.tanh(x)
432
- else:
433
- x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1]
434
-
435
- return x
436
-
437
- @classmethod
438
- def from_pretrained(
439
- cls,
440
- model_id: str,
441
- map_location: str = "cpu",
442
- local_files_only: bool = False,
443
- strict: bool = False,
444
- *,
445
- subfolder: str | None = None,
446
- repo_type: str | None = None,
447
- revision: str | None = None,
448
- cache_dir: str | Path | None = None,
449
- force_download: bool = False,
450
- proxies: Dict | None = None,
451
- token: bool | str | None = None,
452
- resume_download: bool | None = None,
453
- local_dir_use_symlinks: bool | Literal["auto"] = "auto",
454
- **kwargs,
455
- ):
456
- """Load Pytorch pretrained weights and return the loaded model."""
457
- hub_kwargs = dict(
458
- repo_id=model_id,
459
- subfolder=subfolder,
460
- repo_type=repo_type,
461
- revision=revision,
462
- cache_dir=cache_dir,
463
- force_download=force_download,
464
- proxies=proxies,
465
- resume_download=resume_download,
466
- token=token,
467
- local_files_only=local_files_only,
468
- local_dir_use_symlinks=local_dir_use_symlinks,
469
- )
470
-
471
- # Download and load hyperparameters (h) used by BigVGAN
472
- _model_path = Path(model_id)
473
- if is_path_valid(model_id):
474
- if is_file(model_id):
475
- _p_conf = _model_path.parent / "config.json"
476
- else:
477
- _p_conf = _model_path / "config.json"
478
-
479
- if is_file(_p_conf):
480
- print("Loading config.json from local directory")
481
- config_file = Path(model_id, "config.json")
482
- else:
483
- print(f"Loading config from {model_id}")
484
- config_file = hf_hub_download(filename="config.json", **hub_kwargs)
485
- else:
486
- print(f"Loading config from {model_id}")
487
- config_file = hf_hub_download(filename="config.json", **hub_kwargs)
488
-
489
- h = BigVGANConfig(**load_json(config_file))
490
- # instantiate BigVGAN using h
491
- if h.use_cuda_kernel:
492
- print(
493
- f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!"
494
- )
495
- print(
496
- f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!"
497
- )
498
- print(
499
- f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis"
500
- )
501
-
502
- model = cls(h)
503
-
504
- # Download and load pretrained generator weight
505
- _retrieve_kwargs = dict(
506
- **hub_kwargs,
507
- filename="bigvgan_generator.pt",
508
- )
509
- path = Path(model_id)
510
- if path.exists():
511
- if path.is_dir():
512
- path = path / "bigvgan_generator.pt"
513
- if path.exists():
514
- print("Loading weights from local directory")
515
- model_file = str(path)
516
- else:
517
- print(f"Loading weights from {model_id}")
518
- model_file = hf_hub_download(**_retrieve_kwargs)
519
- else:
520
- print("Loading weights from local directory")
521
- model_file = str(path)
522
- else:
523
- print(f"Loading weights from {model_id}")
524
- model_file = hf_hub_download(**_retrieve_kwargs)
525
- checkpoint_dict = torch.load(model_file, map_location=map_location)
526
-
527
- try:
528
- model.load_state_dict(checkpoint_dict["generator"], strict=strict)
529
- except RuntimeError:
530
- print(
531
- f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
532
- )
533
- model.remove_norms()
534
- model.load_state_dict(checkpoint_dict["generator"], strict=strict)
535
-
536
- return model