lt-tensor 0.0.1a34__py3-none-any.whl → 0.0.1a36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lt_tensor/__init__.py +1 -1
- lt_tensor/losses.py +11 -7
- lt_tensor/lr_schedulers.py +147 -21
- lt_tensor/misc_utils.py +35 -42
- lt_tensor/model_zoo/activations/__init__.py +3 -0
- lt_tensor/model_zoo/activations/alias_free/__init__.py +3 -0
- lt_tensor/model_zoo/activations/{alias_free_torch → alias_free}/act.py +8 -6
- lt_tensor/model_zoo/activations/snake/__init__.py +41 -43
- lt_tensor/model_zoo/audio_models/__init__.py +2 -2
- lt_tensor/model_zoo/audio_models/bigvgan/__init__.py +243 -0
- lt_tensor/model_zoo/audio_models/hifigan/__init__.py +22 -357
- lt_tensor/model_zoo/audio_models/istft/__init__.py +14 -349
- lt_tensor/model_zoo/audio_models/resblocks.py +248 -0
- lt_tensor/model_zoo/convs.py +21 -32
- lt_tensor/model_zoo/losses/CQT/__init__.py +0 -0
- lt_tensor/model_zoo/losses/CQT/transforms.py +336 -0
- lt_tensor/model_zoo/losses/CQT/utils.py +519 -0
- lt_tensor/model_zoo/losses/discriminators.py +375 -37
- lt_tensor/processors/audio.py +67 -57
- {lt_tensor-0.0.1a34.dist-info → lt_tensor-0.0.1a36.dist-info}/METADATA +1 -1
- lt_tensor-0.0.1a36.dist-info/RECORD +43 -0
- lt_tensor/model_zoo/activations/alias_free_torch/__init__.py +0 -1
- lt_tensor-0.0.1a34.dist-info/RECORD +0 -37
- /lt_tensor/model_zoo/activations/{alias_free_torch → alias_free}/filter.py +0 -0
- /lt_tensor/model_zoo/activations/{alias_free_torch → alias_free}/resample.py +0 -0
- {lt_tensor-0.0.1a34.dist-info → lt_tensor-0.0.1a36.dist-info}/WHEEL +0 -0
- {lt_tensor-0.0.1a34.dist-info → lt_tensor-0.0.1a36.dist-info}/licenses/LICENSE +0 -0
- {lt_tensor-0.0.1a34.dist-info → lt_tensor-0.0.1a36.dist-info}/top_level.txt +0 -0
lt_tensor/processors/audio.py
CHANGED
@@ -73,7 +73,7 @@ class AudioProcessorConfig(ModelConfig):
|
|
73
73
|
def post_process(self):
|
74
74
|
self.n_stft = self.n_fft // 2 + 1
|
75
75
|
# some functions needs this to be a non-zero or not None value.
|
76
|
-
self.f_min = max(self.f_min, (self.sample_rate / (self.n_fft - 1))
|
76
|
+
self.f_min = max(self.f_min, (self.sample_rate / (self.n_fft - 1)) * 2)
|
77
77
|
self.default_f_max = min(
|
78
78
|
default(self.f_max, self.sample_rate // 2), self.sample_rate // 2
|
79
79
|
)
|
@@ -105,7 +105,6 @@ class AudioProcessor(Model):
|
|
105
105
|
f_min=self.cfg.f_min,
|
106
106
|
f_max=self.cfg.f_max,
|
107
107
|
mel_scale=self.cfg.mel_scale,
|
108
|
-
onesided=self.cfg.onesided,
|
109
108
|
normalized=self.cfg.normalized,
|
110
109
|
)
|
111
110
|
self._mel_rscale = torchaudio.transforms.InverseMelScale(
|
@@ -122,17 +121,15 @@ class AudioProcessor(Model):
|
|
122
121
|
(torch.hann_window(self.cfg.win_length) if window is None else window),
|
123
122
|
)
|
124
123
|
|
125
|
-
|
126
|
-
|
127
124
|
def compute_mel(
|
128
125
|
self,
|
129
126
|
wave: Tensor,
|
130
|
-
raw_mel_only: bool = False,
|
131
127
|
eps: float = 1e-5,
|
128
|
+
raw_mel_only: bool = False,
|
132
129
|
*,
|
133
130
|
_recall: bool = False,
|
134
131
|
) -> Tensor:
|
135
|
-
"""Returns:
|
132
|
+
"""Returns: (M, T) or (B, M, T) if batched"""
|
136
133
|
try:
|
137
134
|
mel_tensor = self._mel_spec(wave.to(self.device)) # [M, T]
|
138
135
|
if not raw_mel_only:
|
@@ -203,13 +200,16 @@ class AudioProcessor(Model):
|
|
203
200
|
rms_ = []
|
204
201
|
for i in range(B):
|
205
202
|
_t = _comp_rms_helper(i, audio, mel)
|
206
|
-
_r = librosa.feature.rms(**_t, **rms_kwargs)[
|
207
|
-
0
|
208
|
-
]
|
203
|
+
_r = librosa.feature.rms(**_t, **rms_kwargs)[0]
|
209
204
|
rms_.append(_r)
|
210
205
|
return self.from_numpy_batch(rms_, default_device, default_dtype).squeeze()
|
211
206
|
|
212
|
-
def pitch_shift(
|
207
|
+
def pitch_shift(
|
208
|
+
self,
|
209
|
+
audio: torch.Tensor,
|
210
|
+
sample_rate: Optional[int] = None,
|
211
|
+
n_steps: float = 2.0,
|
212
|
+
):
|
213
213
|
"""
|
214
214
|
Shifts the pitch of an audio tensor by `n_steps` semitones.
|
215
215
|
|
@@ -225,21 +225,25 @@ class AudioProcessor(Model):
|
|
225
225
|
src_dtype = audio.dtype
|
226
226
|
audio = audio.squeeze()
|
227
227
|
sample_rate = default(sample_rate, self.cfg.sample_rate)
|
228
|
+
|
228
229
|
def _shift_one(wav):
|
229
230
|
wav_np = self.to_numpy_safe(wav)
|
230
|
-
shifted_np = librosa.effects.pitch_shift(
|
231
|
+
shifted_np = librosa.effects.pitch_shift(
|
232
|
+
wav_np, sr=sample_rate, n_steps=n_steps
|
233
|
+
)
|
231
234
|
return torch.from_numpy(shifted_np)
|
232
235
|
|
233
236
|
if audio.ndim == 1:
|
234
237
|
return _shift_one(audio).to(device=src_device, dtype=src_dtype)
|
235
|
-
return torch.stack([_shift_one(a) for a in audio]).to(
|
236
|
-
|
238
|
+
return torch.stack([_shift_one(a) for a in audio]).to(
|
239
|
+
device=src_device, dtype=src_dtype
|
240
|
+
)
|
237
241
|
|
238
242
|
@staticmethod
|
239
|
-
def calc_pitch_fmin(sr:int, frame_length:float):
|
243
|
+
def calc_pitch_fmin(sr: int, frame_length: float):
|
240
244
|
"""For pitch f_min"""
|
241
245
|
return (sr / (frame_length - 1)) * 2
|
242
|
-
|
246
|
+
|
243
247
|
def compute_pitch(
|
244
248
|
self,
|
245
249
|
audio: Tensor,
|
@@ -261,8 +265,10 @@ class AudioProcessor(Model):
|
|
261
265
|
B = 1
|
262
266
|
sr = default(sr, self.cfg.sample_rate)
|
263
267
|
frame_length = default(frame_length, self.cfg.n_fft)
|
264
|
-
fmin = max(
|
265
|
-
|
268
|
+
fmin = max(
|
269
|
+
default(fmin, self.cfg.f_min), self.calc_pitch_fmin(sr, frame_length)
|
270
|
+
)
|
271
|
+
fmax = min(max(default(fmax, self.cfg.default_f_max), fmin + 1), sr // 2)
|
266
272
|
hop_length = default(hop_length, self.cfg.hop_length)
|
267
273
|
center = default(center, self.cfg.center)
|
268
274
|
yn_kwargs = dict(
|
@@ -361,7 +367,7 @@ class AudioProcessor(Model):
|
|
361
367
|
The modes available for upsampling are: `nearest`, `linear` (3D-only),
|
362
368
|
`bilinear`, `bicubic` (4D-only), `trilinear` (5D-only)
|
363
369
|
"""
|
364
|
-
|
370
|
+
tensor = tensor.squeeze()
|
365
371
|
if tensor.ndim == 2: # [1, T]
|
366
372
|
tensor = tensor.unsqueeze(1) # [1, 1, T]
|
367
373
|
elif tensor.ndim == 1:
|
@@ -384,7 +390,7 @@ class AudioProcessor(Model):
|
|
384
390
|
hop_length: Optional[int] = None,
|
385
391
|
win_length: Optional[int] = None,
|
386
392
|
length: Optional[int] = None,
|
387
|
-
center:
|
393
|
+
center: bool = True,
|
388
394
|
normalized: Optional[bool] = None,
|
389
395
|
onesided: Optional[bool] = None,
|
390
396
|
return_complex: bool = False,
|
@@ -403,7 +409,7 @@ class AudioProcessor(Model):
|
|
403
409
|
hop_length=default(hop_length, self.cfg.hop_length),
|
404
410
|
win_length=default(win_length, self.cfg.win_length),
|
405
411
|
window=window,
|
406
|
-
center=
|
412
|
+
center=center,
|
407
413
|
normalized=default(normalized, self.cfg.normalized),
|
408
414
|
onesided=default(onesided, self.cfg.onesided),
|
409
415
|
length=length,
|
@@ -421,44 +427,48 @@ class AudioProcessor(Model):
|
|
421
427
|
self,
|
422
428
|
wave: Tensor,
|
423
429
|
length: Optional[int] = None,
|
424
|
-
|
425
|
-
|
430
|
+
center: bool = True,
|
431
|
+
n_fft: Optional[int] = None,
|
432
|
+
hop_length: Optional[int] = None,
|
433
|
+
win_length: Optional[int] = None,
|
434
|
+
normalized: Optional[bool] = None,
|
435
|
+
onesided: Optional[bool] = None,
|
436
|
+
return_complex: bool = False,
|
426
437
|
):
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
raise e
|
438
|
+
|
439
|
+
if win_length is not None and win_length != self.cfg.win_length:
|
440
|
+
window = torch.hann_window(win_length, device=wave.device)
|
441
|
+
else:
|
442
|
+
window = self.window
|
443
|
+
spectrogram = torch.stft(
|
444
|
+
input=wave,
|
445
|
+
n_fft=default(n_fft, self.cfg.n_fft),
|
446
|
+
hop_length=default(hop_length, self.cfg.hop_length),
|
447
|
+
win_length=default(win_length, self.cfg.win_length),
|
448
|
+
window=window,
|
449
|
+
center=center,
|
450
|
+
pad_mode="reflect",
|
451
|
+
normalized=default(normalized, self.cfg.normalized),
|
452
|
+
onesided=default(onesided, self.cfg.onesided),
|
453
|
+
return_complex=True,
|
454
|
+
)
|
455
|
+
return torch.istft(
|
456
|
+
spectrogram
|
457
|
+
* torch.full(
|
458
|
+
spectrogram.size(),
|
459
|
+
fill_value=1,
|
460
|
+
device=spectrogram.device,
|
461
|
+
),
|
462
|
+
n_fft=default(n_fft, self.cfg.n_fft),
|
463
|
+
hop_length=default(hop_length, self.cfg.hop_length),
|
464
|
+
win_length=default(win_length, self.cfg.win_length),
|
465
|
+
window=self.window,
|
466
|
+
length=length,
|
467
|
+
center=center,
|
468
|
+
normalized=default(normalized, self.cfg.normalized),
|
469
|
+
onesided=default(onesided, self.cfg.onesided),
|
470
|
+
return_complex=return_complex,
|
471
|
+
)
|
462
472
|
|
463
473
|
def load_audio(
|
464
474
|
self,
|
@@ -0,0 +1,43 @@
|
|
1
|
+
lt_tensor/__init__.py,sha256=nBbiGH1byHU0aTTKKorRj8MIEO2oEMBXl7kt5DOCatU,441
|
2
|
+
lt_tensor/config_templates.py,sha256=F9UvL8paAjkSvio890kp8WznpYeI50pYnm9iqQroBxk,2797
|
3
|
+
lt_tensor/losses.py,sha256=Heco_WyoC1HkNkcJEircOAzS9umusATHiNAG-FKGyzc,8918
|
4
|
+
lt_tensor/lr_schedulers.py,sha256=6_vcfaPHrozfH3wvmNEdKSFYl6iTIijYoHL8vuG-45U,7651
|
5
|
+
lt_tensor/math_ops.py,sha256=ahX6Z1Mt3X-FhmwSZYZea5mB1B0S8GDuvKPfAm5e_FQ,2646
|
6
|
+
lt_tensor/misc_utils.py,sha256=stL6q3M7S2N4FBICFYbgYpdPDrJRlwmr24-iCXMRifM,28933
|
7
|
+
lt_tensor/model_base.py,sha256=5T4dbAh4MXbQmPRpihGtMYwTY8sJTQOhY6An3VboM58,18086
|
8
|
+
lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
|
9
|
+
lt_tensor/noise_tools.py,sha256=wFeAsHhLhSlEc5XU5LbFKaXoHeVxrWjiMeljjGdIKyM,11363
|
10
|
+
lt_tensor/torch_commons.py,sha256=8l0bxmrAzwvyqjivCIVISXlbvKarlg4DdE0BOGSnMuQ,812
|
11
|
+
lt_tensor/transform.py,sha256=dZm8T_ov0blHMQu6nGiehsdG1VSB7bZBUVmTkT-PBdc,13257
|
12
|
+
lt_tensor/model_zoo/__init__.py,sha256=yPUVchgVhU2nAJ2ocA4HFfG7IMEiBu8qOi8I1KWTTkU,404
|
13
|
+
lt_tensor/model_zoo/basic.py,sha256=pI8HyiHK-cmWcEEaVY_EduUJOjZW6HOtXvJd8Rbhq30,15452
|
14
|
+
lt_tensor/model_zoo/convs.py,sha256=Tws0jrPfs9m7OLmJ30W0AfkAvZgppW7lNi4xt0e-qRU,3518
|
15
|
+
lt_tensor/model_zoo/features.py,sha256=DO8dlE0kmPKTNC1Xkv9wKegOOYkQa_rkxM4hhcNwJWA,15655
|
16
|
+
lt_tensor/model_zoo/fusion.py,sha256=usC1bcjQRNivDc8xzkIS5T1glm78OLcs2V_tPqfp-eI,5422
|
17
|
+
lt_tensor/model_zoo/pos_encoder.py,sha256=3d1EYLinCU9UAy-WuEWeYMGhMqaGknCiQ5qEmhw_UYM,4487
|
18
|
+
lt_tensor/model_zoo/residual.py,sha256=tMXgif9Ggep9bk75K93yueeU5vk5S25AGCRFwOQOyB8,6452
|
19
|
+
lt_tensor/model_zoo/transformer.py,sha256=HUFoFFh7EQJErxdd9XIxhssdjvNVx2tNGDJOTUfwG2A,4301
|
20
|
+
lt_tensor/model_zoo/activations/__init__.py,sha256=f_IsuC-SaFsX6w4OtBWa5bbS4TqR90X-cvLxGUgYfjk,67
|
21
|
+
lt_tensor/model_zoo/activations/alias_free/__init__.py,sha256=dgLjatRm9nusoPVOl1pvCef5rZsaRfS3BJUs05SPYzw,64
|
22
|
+
lt_tensor/model_zoo/activations/alias_free/act.py,sha256=1wxmab2kMD88L6wsQgf3t25dBwR7_he2eM1DlV0FQak,1424
|
23
|
+
lt_tensor/model_zoo/activations/alias_free/filter.py,sha256=5TvXESv31toD5sePBe_OUJJfMXv6Ohwmx2YawjQL-pk,6004
|
24
|
+
lt_tensor/model_zoo/activations/alias_free/resample.py,sha256=3iM4fNr9fLNXXMyXvzW-MwkSjOZOrMZLfS80UHs6zk0,3386
|
25
|
+
lt_tensor/model_zoo/activations/snake/__init__.py,sha256=AtOAbJuMinxmKkppITGMzRbcbPQaALnl9mCtl1c3x0Q,4356
|
26
|
+
lt_tensor/model_zoo/audio_models/__init__.py,sha256=WwiP9MekJreMOfKPWLl24VkRJIpLk6hhL8ch0aKgOss,103
|
27
|
+
lt_tensor/model_zoo/audio_models/resblocks.py,sha256=u-foHxaFDUICjxSkpyHXljQYQG9zMxVYaOGqLR_nJ-k,7978
|
28
|
+
lt_tensor/model_zoo/audio_models/bigvgan/__init__.py,sha256=4EZG8Non75dHoDCizMHbMTvPrKwdUlPYGHc7hkfT_nw,8526
|
29
|
+
lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=PDuDYN1omD1RoAXcmxH3tEgfAuM3ZHAWzimD6ElMqEQ,9073
|
30
|
+
lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=ITSXHg3c0Um1P2HaPaXkQKI7meG5Ne60wTbyyYju3hY,6360
|
31
|
+
lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=blICjLX_z_IFmR3_TCz_dJiSayLYGza9eG6fd9aKyvE,7448
|
32
|
+
lt_tensor/model_zoo/losses/__init__.py,sha256=B9RAUxBiOZwooztnij1oLeRwZ7_MjnN3mPoum7saD6s,59
|
33
|
+
lt_tensor/model_zoo/losses/discriminators.py,sha256=o4cicNdOv0jH3ink7jTNeDqOnwmkmRtEj9E7IUIGnEI,31866
|
34
|
+
lt_tensor/model_zoo/losses/CQT/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35
|
+
lt_tensor/model_zoo/losses/CQT/transforms.py,sha256=Vkid0J9dqLnlINfyyUlQf-qB3gOQAgU7W9j7xLOjDFw,13218
|
36
|
+
lt_tensor/model_zoo/losses/CQT/utils.py,sha256=twGw6FVD7V5Ksfx_1BUEN3EP1tAS6wo-9LL3VnuHB8c,16751
|
37
|
+
lt_tensor/processors/__init__.py,sha256=Pvxhh0KR65zLCgUd53_k5Z0y5JWWcO0ZBXFK9rv0o5w,109
|
38
|
+
lt_tensor/processors/audio.py,sha256=3YzyEpMwh124rb1KMAly62qweeruF200BnM-vQIbzy0,18645
|
39
|
+
lt_tensor-0.0.1a36.dist-info/licenses/LICENSE,sha256=TbiyJWLgNqqgqhfCnrGwFIxy7EqGNrIZZcKhHrefcuU,11354
|
40
|
+
lt_tensor-0.0.1a36.dist-info/METADATA,sha256=mTmnoWn8EG48j_VOM3rr_8RLLgaxB5pWZE1tkPdFrac,1062
|
41
|
+
lt_tensor-0.0.1a36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
42
|
+
lt_tensor-0.0.1a36.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
|
43
|
+
lt_tensor-0.0.1a36.dist-info/RECORD,,
|
@@ -1 +0,0 @@
|
|
1
|
-
from . import *
|
@@ -1,37 +0,0 @@
|
|
1
|
-
lt_tensor/__init__.py,sha256=WAGPuMPq5c4DGAJ57x1Ykgzg3vMlLq9BiWk5EdJcUsU,441
|
2
|
-
lt_tensor/config_templates.py,sha256=F9UvL8paAjkSvio890kp8WznpYeI50pYnm9iqQroBxk,2797
|
3
|
-
lt_tensor/losses.py,sha256=fHVMqOFo3ekjORYy89R_aRjmtT6lo27Z1egzOYjQ1W8,8646
|
4
|
-
lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
|
5
|
-
lt_tensor/math_ops.py,sha256=ahX6Z1Mt3X-FhmwSZYZea5mB1B0S8GDuvKPfAm5e_FQ,2646
|
6
|
-
lt_tensor/misc_utils.py,sha256=N2r3UmxC4RM2BZBQhpjDZ_BKLrzsyIlKzopTzJbnjFU,28962
|
7
|
-
lt_tensor/model_base.py,sha256=5T4dbAh4MXbQmPRpihGtMYwTY8sJTQOhY6An3VboM58,18086
|
8
|
-
lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
|
9
|
-
lt_tensor/noise_tools.py,sha256=wFeAsHhLhSlEc5XU5LbFKaXoHeVxrWjiMeljjGdIKyM,11363
|
10
|
-
lt_tensor/torch_commons.py,sha256=8l0bxmrAzwvyqjivCIVISXlbvKarlg4DdE0BOGSnMuQ,812
|
11
|
-
lt_tensor/transform.py,sha256=dZm8T_ov0blHMQu6nGiehsdG1VSB7bZBUVmTkT-PBdc,13257
|
12
|
-
lt_tensor/model_zoo/__init__.py,sha256=yPUVchgVhU2nAJ2ocA4HFfG7IMEiBu8qOi8I1KWTTkU,404
|
13
|
-
lt_tensor/model_zoo/basic.py,sha256=pI8HyiHK-cmWcEEaVY_EduUJOjZW6HOtXvJd8Rbhq30,15452
|
14
|
-
lt_tensor/model_zoo/convs.py,sha256=YQRxek75Qpsha8nfc7wLhmJS9XxPeCa4WxuftLg6IcE,3927
|
15
|
-
lt_tensor/model_zoo/features.py,sha256=DO8dlE0kmPKTNC1Xkv9wKegOOYkQa_rkxM4hhcNwJWA,15655
|
16
|
-
lt_tensor/model_zoo/fusion.py,sha256=usC1bcjQRNivDc8xzkIS5T1glm78OLcs2V_tPqfp-eI,5422
|
17
|
-
lt_tensor/model_zoo/pos_encoder.py,sha256=3d1EYLinCU9UAy-WuEWeYMGhMqaGknCiQ5qEmhw_UYM,4487
|
18
|
-
lt_tensor/model_zoo/residual.py,sha256=tMXgif9Ggep9bk75K93yueeU5vk5S25AGCRFwOQOyB8,6452
|
19
|
-
lt_tensor/model_zoo/transformer.py,sha256=HUFoFFh7EQJErxdd9XIxhssdjvNVx2tNGDJOTUfwG2A,4301
|
20
|
-
lt_tensor/model_zoo/activations/alias_free_torch/__init__.py,sha256=ovguP4wzQEDNguczwiZnhMm4dRRVcvnzmHrfQtlRCNQ,15
|
21
|
-
lt_tensor/model_zoo/activations/alias_free_torch/act.py,sha256=h79C93GzbSrCq4ui6iO7DjJLuJ7QK_ag_TU-WAcj0NI,1405
|
22
|
-
lt_tensor/model_zoo/activations/alias_free_torch/filter.py,sha256=5TvXESv31toD5sePBe_OUJJfMXv6Ohwmx2YawjQL-pk,6004
|
23
|
-
lt_tensor/model_zoo/activations/alias_free_torch/resample.py,sha256=3iM4fNr9fLNXXMyXvzW-MwkSjOZOrMZLfS80UHs6zk0,3386
|
24
|
-
lt_tensor/model_zoo/activations/snake/__init__.py,sha256=Adb_xe-7YdYsNxvlSSO9zkae-cu7ElxkBKE3trDtOus,4517
|
25
|
-
lt_tensor/model_zoo/audio_models/__init__.py,sha256=MoG9YjxLyvscq_6njK1ljGBletK9iedBXt66bplzW-s,83
|
26
|
-
lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=PDuDYN1omD1RoAXcmxH3tEgfAuM3ZHAWzimD6ElMqEQ,9073
|
27
|
-
lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=7GJqKLw7-juXpfp5IFzjASLut0uouDhjZ1CQknf3H68,16533
|
28
|
-
lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=ltIuD9t1gmS3bTmCqZIwJHKrhC6DYya3OaXlskWX9kw,17606
|
29
|
-
lt_tensor/model_zoo/losses/__init__.py,sha256=B9RAUxBiOZwooztnij1oLeRwZ7_MjnN3mPoum7saD6s,59
|
30
|
-
lt_tensor/model_zoo/losses/discriminators.py,sha256=ZpyByFgc7L7uV_XRBsV9vkdVItbJO3z--Y6LlvTvtwY,20765
|
31
|
-
lt_tensor/processors/__init__.py,sha256=Pvxhh0KR65zLCgUd53_k5Z0y5JWWcO0ZBXFK9rv0o5w,109
|
32
|
-
lt_tensor/processors/audio.py,sha256=HNr1GS-6M2q0Rda4cErf5y2Jlc9f4jD58FvpX2ua9d4,18369
|
33
|
-
lt_tensor-0.0.1a34.dist-info/licenses/LICENSE,sha256=TbiyJWLgNqqgqhfCnrGwFIxy7EqGNrIZZcKhHrefcuU,11354
|
34
|
-
lt_tensor-0.0.1a34.dist-info/METADATA,sha256=WkTafcY5nYZbrZ7WzUc3JXnmg9NtUAXrchx42dCok9I,1062
|
35
|
-
lt_tensor-0.0.1a34.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
36
|
-
lt_tensor-0.0.1a34.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
|
37
|
-
lt_tensor-0.0.1a34.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|