lt-tensor 0.0.1a4__py3-none-any.whl → 0.0.1a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,193 @@
1
+ __all__ = ["AudioProcessor"]
2
+ from ..torch_commons import *
3
+ from lt_utils.common import *
4
+ from lt_utils.type_utils import is_file, is_array
5
+ from ..misc_utils import log_tensor
6
+ import librosa
7
+ import torchaudio
8
+
9
+ from ..transform import InverseTransformConfig, InverseTransform
10
+ from lt_utils.file_ops import FileScan, get_file_name, path_to_str
11
+
12
+ from lt_tensor.model_base import Model
13
+
14
+
15
+ class AudioProcessor(Model):
16
+ def __init__(
17
+ self,
18
+ sample_rate: int = 24000,
19
+ n_mels: int = 80,
20
+ n_fft: int = 1024,
21
+ win_length: Optional[int] = None,
22
+ hop_length: Optional[int] = None,
23
+ f_min: float = 0,
24
+ f_max: float | None = None,
25
+ n_iter: int = 32,
26
+ center: bool = True,
27
+ mel_scale: Literal["htk", "slaney"] = "htk",
28
+ std: int = 4,
29
+ mean: int = -4,
30
+ inverse_transform_config: Union[
31
+ Dict[str, Union[Number, Tensor, bool]], InverseTransformConfig
32
+ ] = dict(n_fft=16, hop_length=4, win_length=16, center=True),
33
+ *__,
34
+ **_,
35
+ ):
36
+ super().__init__()
37
+ assert isinstance(inverse_transform_config, (InverseTransformConfig, dict))
38
+ self.mean = mean
39
+ self.std = std
40
+ self.n_mels = n_mels
41
+ self.n_fft = n_fft
42
+ self.n_stft = n_fft // 2 + 1
43
+ self.f_min = f_min
44
+ self.f_max = f_max
45
+ self.n_iter = n_iter
46
+ self.hop_length = hop_length or n_fft // 4
47
+ self.win_length = win_length or n_fft
48
+ self.sample_rate = sample_rate
49
+ self.mel_spec = torchaudio.transforms.MelSpectrogram(
50
+ sample_rate=sample_rate,
51
+ n_mels=n_mels,
52
+ n_fft=n_fft,
53
+ win_length=win_length,
54
+ hop_length=hop_length,
55
+ center=center,
56
+ f_min=f_min,
57
+ f_max=f_max,
58
+ mel_scale=mel_scale,
59
+ )
60
+ self.mel_rscale = torchaudio.transforms.InverseMelScale(
61
+ n_stft=self.n_stft,
62
+ n_mels=n_mels,
63
+ sample_rate=sample_rate,
64
+ f_min=f_min,
65
+ f_max=f_max,
66
+ mel_scale=mel_scale,
67
+ )
68
+ self.giffin_lim = torchaudio.transforms.GriffinLim(
69
+ n_fft=n_fft,
70
+ n_iter=n_iter,
71
+ win_length=win_length,
72
+ hop_length=hop_length,
73
+ )
74
+ if isinstance(inverse_transform_config, dict):
75
+ inverse_transform_config = InverseTransformConfig(
76
+ **inverse_transform_config
77
+ )
78
+ self._inv_transform = InverseTransform(**inverse_transform_config.to_dict())
79
+
80
+ def inverse_transform(self, spec: Tensor, phase: Tensor, *_, **kwargs):
81
+ return self._inv_transform(spec, phase, **kwargs)
82
+
83
+ def compute_mel(
84
+ self, wave: Tensor, base: float = 1e-6, add_base: bool = False
85
+ ) -> Tensor:
86
+ """Returns: [B, M, ML]"""
87
+ wave_device = wave.device
88
+ mel_tensor = self.mel_spec(wave.to(self.device)) # [M, ML]
89
+ if not add_base:
90
+ return (mel_tensor - self.mean) / self.std
91
+ return ((torch.log(base + mel_tensor.unsqueeze(0)) - self.mean) / self.std).to(
92
+ device=wave_device
93
+ )
94
+
95
+ def reverse_mel(self, mel: Tensor, n_iter: Optional[int] = None):
96
+ if isinstance(n_iter, int) and n_iter != self.n_iter:
97
+ self.giffin_lim = torchaudio.transforms.GriffinLim(
98
+ n_fft=self.n_fft,
99
+ n_iter=n_iter,
100
+ win_length=self.win_length,
101
+ hop_length=self.hop_length,
102
+ )
103
+ self.n_iter = n_iter
104
+ return self.giffin_lim.forward(
105
+ self.mel_rscale(mel),
106
+ )
107
+
108
+ def load_audio(
109
+ self,
110
+ path: PathLike,
111
+ top_db: float = 30,
112
+ ) -> Tensor:
113
+ is_file(path, True)
114
+ wave, sr = librosa.load(str(path), sr=self.sample_rate)
115
+ wave, _ = librosa.effects.trim(wave, top_db=top_db)
116
+ return (
117
+ torch.from_numpy(
118
+ librosa.resample(wave, orig_sr=sr, target_sr=self.sample_rate)
119
+ if sr != self.sample_rate
120
+ else wave
121
+ )
122
+ .float()
123
+ .unsqueeze(0)
124
+ )
125
+
126
+ def find_audios(self, path: PathLike, additional_extensions: List[str] = []):
127
+ extensions = [
128
+ "*.wav",
129
+ "*.aac",
130
+ "*.m4a",
131
+ "*.mp3",
132
+ "*.ogg",
133
+ "*.opus",
134
+ "*.flac",
135
+ ]
136
+ extensions.extend(
137
+ [x for x in additional_extensions if isinstance(x, str) and "*" in x]
138
+ )
139
+ return FileScan.files(
140
+ path,
141
+ extensions,
142
+ )
143
+
144
+ def find_audio_text_pairs(
145
+ self,
146
+ path,
147
+ additional_extensions: List[str] = [],
148
+ text_file_patterns: List[str] = [".normalized.txt", ".original.txt"],
149
+ ):
150
+ is_array(text_file_patterns, True, validate=True) # Rases if empty or not valid
151
+ additional_extensions = [
152
+ x
153
+ for x in additional_extensions
154
+ if isinstance(x, str)
155
+ and "*" in x
156
+ and not any(list(map(lambda y: y in x), text_file_patterns))
157
+ ]
158
+ audio_files = self.find_audios(path, additional_extensions)
159
+ results = []
160
+ for audio in audio_files:
161
+ base_audio_dir = Path(audio).parent
162
+ audio_name = get_file_name(audio, False)
163
+ for pattern in text_file_patterns:
164
+ possible_txt_file = Path(base_audio_dir, audio_name + pattern)
165
+ if is_file(possible_txt_file):
166
+ results.append((audio, path_to_str(possible_txt_file)))
167
+ break
168
+ return results
169
+
170
+ def stft_loss(self, signal: Tensor, ground: Tensor, base: float = 1e-5):
171
+ sig_mel = self(signal, base)
172
+ gnd_mel = self(ground, base)
173
+ return torch.norm(gnd_mel - sig_mel, p=1) / torch.norm(gnd_mel, p=1)
174
+
175
+ # def forward(self, wave: Tensor, base: Optional[float] = None):
176
+ def forward(
177
+ self,
178
+ *inputs: Union[Tensor, float],
179
+ ap_task: Literal[
180
+ "get_mel", "get_loss", "inv_transform", "revert_mel"
181
+ ] = "get_mel",
182
+ **inputs_kwargs,
183
+ ):
184
+ if ap_task == "get_mel":
185
+ return self.compute_mel(*inputs, **inputs_kwargs)
186
+ elif ap_task == "get_loss":
187
+ return self.stft_loss(*inputs, **inputs_kwargs)
188
+ elif ap_task == "inv_transform":
189
+ return self.inverse_transform(*inputs, **inputs_kwargs)
190
+ elif ap_task == "revert_mel":
191
+ return self.reverse_mel(*inputs, **inputs_kwargs)
192
+ else:
193
+ raise ValueError(f"Invalid task '{ap_task}'")
lt_tensor/transform.py CHANGED
@@ -8,8 +8,6 @@ __all__ = [
8
8
  "normalize",
9
9
  "min_max_scale",
10
10
  "mel_to_linear",
11
- "add_noise",
12
- "shift_time",
13
11
  "stretch_tensor",
14
12
  "pad_tensor",
15
13
  "get_sinusoidal_embedding",
@@ -17,6 +15,8 @@ __all__ = [
17
15
  "normalize",
18
16
  "window_sumsquare",
19
17
  "inverse_transform",
18
+ "InverseTransformConfig",
19
+ "InverseTransform",
20
20
  "stft_istft_rebuild",
21
21
  ]
22
22
 
@@ -25,12 +25,15 @@ import torchaudio
25
25
  import math
26
26
  from .misc_utils import log_tensor
27
27
  from lt_utils.common import *
28
+ from lt_utils.misc_utils import cache_wrapper, default
28
29
  import torch.nn.functional as F
30
+ from .model_base import Model
31
+ import warnings
29
32
 
30
33
 
31
34
  def to_mel_spectrogram(
32
35
  waveform: torch.Tensor,
33
- sample_rate: int = 22050,
36
+ sample_rate: int = 24000,
34
37
  n_fft: int = 1024,
35
38
  hop_length: Optional[int] = None,
36
39
  win_length: Optional[int] = None,
@@ -39,16 +42,19 @@ def to_mel_spectrogram(
39
42
  f_max: Optional[float] = None,
40
43
  ) -> torch.Tensor:
41
44
  """Converts waveform to mel spectrogram."""
42
- mel_spectrogram = torchaudio.transforms.MelSpectrogram(
43
- sample_rate=sample_rate,
44
- n_fft=n_fft,
45
- hop_length=hop_length,
46
- win_length=win_length,
47
- n_mels=n_mels,
48
- f_min=f_min,
49
- f_max=f_max,
45
+ return (
46
+ torchaudio.transforms.MelSpectrogram(
47
+ sample_rate=sample_rate,
48
+ n_fft=n_fft,
49
+ hop_length=hop_length,
50
+ win_length=win_length,
51
+ n_mels=n_mels,
52
+ f_min=f_min,
53
+ f_max=f_max,
54
+ )
55
+ .to(device=waveform.device)
56
+ .forward(waveform)
50
57
  )
51
- return mel_spectrogram(waveform)
52
58
 
53
59
 
54
60
  def stft(
@@ -151,16 +157,6 @@ def mel_to_linear(
151
157
  return torch.matmul(mel_fb_inv, mel_spec + eps)
152
158
 
153
159
 
154
- def add_noise(x: torch.Tensor, noise_level: float = 0.01) -> torch.Tensor:
155
- """Adds Gaussian noise to tensor."""
156
- return x + noise_level * torch.randn_like(x)
157
-
158
-
159
- def shift_time(x: torch.Tensor, shift: int) -> torch.Tensor:
160
- """Shifts tensor along time axis (last dim)."""
161
- return torch.roll(x, shifts=shift, dims=-1)
162
-
163
-
164
160
  def stretch_tensor(x: torch.Tensor, rate: float, mode: str = "linear") -> torch.Tensor:
165
161
  """Time-stretch tensor using interpolation."""
166
162
  B, C, T = x.shape if x.ndim == 3 else (1, 1, x.shape[0])
@@ -207,8 +203,7 @@ def generate_window(
207
203
  return torch.ones(1, device=device)
208
204
 
209
205
  n = torch.arange(M, dtype=torch.float32, device=device)
210
- window = alpha - (1.0 - alpha) * torch.cos(2.0 * math.pi * n / (M - 1))
211
- return window
206
+ return alpha - (1.0 - alpha) * torch.cos(2.0 * math.pi * n / (M - 1))
212
207
 
213
208
 
214
209
  def pad_center(tensor: torch.Tensor, size: int, axis: int = -1) -> torch.Tensor:
@@ -269,12 +264,12 @@ def normalize(
269
264
  def window_sumsquare(
270
265
  window_spec: Union[str, int, float, Callable, List[Any], Tuple[Any, ...]],
271
266
  n_frames: int,
272
- hop_length: int = 300,
273
- win_length: int = 1200,
267
+ hop_length: int = 256,
268
+ win_length: int = 1024,
274
269
  n_fft: int = 2048,
275
270
  dtype: torch.dtype = torch.float32,
276
271
  norm: Optional[Union[int, float]] = None,
277
- device: Optional[torch.device] = "cpu",
272
+ device: Optional[torch.device] = None,
278
273
  ):
279
274
  if win_length is None:
280
275
  win_length = n_fft
@@ -303,9 +298,9 @@ def window_sumsquare(
303
298
  def inverse_transform(
304
299
  spec: Tensor,
305
300
  phase: Tensor,
306
- n_fft: int = 2048,
307
- hop_length: int = 300,
308
- win_length: int = 1200,
301
+ n_fft: int = 1024,
302
+ hop_length: Optional[int] = None,
303
+ win_length: Optional[int] = None,
309
304
  length: Optional[Any] = None,
310
305
  window: Optional[Tensor] = None,
311
306
  ):
@@ -319,3 +314,168 @@ def inverse_transform(
319
314
  window=window,
320
315
  length=length,
321
316
  )
317
+
318
+
319
+ def is_nand(a: bool, b: bool):
320
+ """[a -> b = result]
321
+ ```
322
+ False -> False = True
323
+ False -> True = True
324
+ True -> False = True
325
+ True -> True = False
326
+ ```
327
+ """
328
+ return not (a and b)
329
+
330
+
331
+ class InverseTransformConfig:
332
+ def __init__(
333
+ self,
334
+ n_fft: int = 1024,
335
+ hop_length: Optional[int] = None,
336
+ win_length: Optional[int] = None,
337
+ length: Optional[int] = None,
338
+ window: Optional[Tensor] = None,
339
+ onesided: Optional[bool] = None,
340
+ return_complex: bool = False,
341
+ normalized: bool = False,
342
+ center: bool = True,
343
+ ):
344
+ self.n_fft = n_fft
345
+ self.hop_length = hop_length
346
+ self.win_length = win_length
347
+ self.length = length
348
+ self.onesided = onesided
349
+ self.return_complex = return_complex
350
+ self.normalized = normalized
351
+ self.center = center
352
+ self.window = window
353
+
354
+ def to_dict(self):
355
+ return self.__dict__.copy()
356
+
357
+
358
+ class InverseTransform(Model):
359
+ def __init__(
360
+ self,
361
+ n_fft: int = 1024,
362
+ hop_length: Optional[int] = None,
363
+ win_length: Optional[int] = None,
364
+ length: Optional[int] = None,
365
+ window: Optional[Tensor] = None,
366
+ onesided: Optional[bool] = None,
367
+ return_complex: bool = False,
368
+ normalized: bool = False,
369
+ center: bool = True,
370
+ ):
371
+ """
372
+ Module for inverting a magnitude + phase spectrogram to a waveform using ISTFT.
373
+
374
+ This class encapsulates common ISTFT parameters at initialization and applies
375
+ the inverse transformation in the `forward()` method with minimal per-call overhead.
376
+
377
+ Parameters
378
+ ----------
379
+ n_fft : int, optional
380
+ Size of FFT to use during inversion. Default is 1024.
381
+ hop_length : int, optional
382
+ Number of audio samples between STFT columns. Defaults to `n_fft`.
383
+ win_length : int, optional
384
+ Size of the window function. Defaults to `n_fft // 4`.
385
+ length : int, optional
386
+ Output waveform length. If not provided, length will be inferred.
387
+ window : Tensor, optional
388
+ Custom window tensor. If None, a Hann window is used.
389
+ onesided : bool, optional
390
+ Whether the input STFT was onesided. Required only for consistency checks.
391
+ return_complex : bool, default=False
392
+ Must be False if `onesided` is True. Not used internally.
393
+ normalized : bool, default=False
394
+ Whether the STFT was normalized.
395
+ center : bool, default=True
396
+ Whether the signal was padded during STFT.
397
+
398
+ Methods
399
+ -------
400
+ forward(spec, phase)
401
+ Applies ISTFT using stored settings on the given magnitude and phase tensors.
402
+ update_settings(...)
403
+ Updates ISTFT parameters dynamically (used internally during forward).
404
+ """
405
+ super().__init__()
406
+ assert window is None or isinstance(window, Tensor)
407
+ assert any(
408
+ (
409
+ (not return_complex and not onesided),
410
+ (not onesided and return_complex),
411
+ (not return_complex and onesided),
412
+ )
413
+ )
414
+ self.n_fft = n_fft
415
+ self.length = length
416
+ self.win_length = win_length or n_fft // 4
417
+ self.hop_length = hop_length or n_fft
418
+ self.center = center // 4
419
+ self.return_complex = return_complex
420
+ self.onesided = onesided
421
+ self.normalized = normalized
422
+ self.window = torch.hann_window(win_length) if window is None else window
423
+
424
+ def _apply_device_to(self):
425
+ """Applies to device while used with module `Model`"""
426
+ self.window = self.window.to(device=self.device)
427
+
428
+ def update_settings(
429
+ self,
430
+ *,
431
+ n_fft: Optional[int] = None,
432
+ hop_length: Optional[int] = None,
433
+ win_length: Optional[int] = None,
434
+ length: Optional[int] = None,
435
+ window: Optional[Tensor] = None,
436
+ onesided: Optional[bool] = None,
437
+ return_complex: Optional[bool] = None,
438
+ center: Optional[bool] = None,
439
+ normalized: Optional[bool] = None,
440
+ **_,
441
+ ):
442
+
443
+ self.kwargs = dict(
444
+ n_fft=default(n_fft, self.n_fft),
445
+ hop_length=default(hop_length, self.hop_length),
446
+ win_length=default(win_length, self.win_length),
447
+ length=default(length, self.length),
448
+ window=default(window, self.window),
449
+ onesided=default(onesided, self.onesided),
450
+ return_complex=default(return_complex, self.return_complex),
451
+ center=default(center, self.center),
452
+ normalized=default(normalized, self.normalized),
453
+ )
454
+ if self.kwargs["onesided"] and self.kwargs["return_complex"]:
455
+ warnings.warn(
456
+ "You cannot use return_complex with `onesided` enabled. `return_complex` is set to False."
457
+ )
458
+ self.kwargs["return_complex"] = False
459
+
460
+ def forward(self, spec: Tensor, phase: Tensor, **kwargs):
461
+ """
462
+ Perform the inverse short-time Fourier transform.
463
+
464
+ Parameters
465
+ ----------
466
+ spec : Tensor
467
+ Magnitude spectrogram of shape (batch, freq, time).
468
+ phase : Tensor
469
+ Phase angles tensor, same shape as `spec`, in radians.
470
+ **kwargs : dict, optional
471
+ Optional ISTFT override parameters (same as in `update_settings`).
472
+
473
+ Returns
474
+ -------
475
+ Tensor
476
+ Time-domain waveform reconstructed from `spec` and `phase`.
477
+ """
478
+ if kwargs:
479
+ self.update_settings(**kwargs)
480
+
481
+ return torch.istft(spec * torch.exp(phase * 1j), **self.kwargs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lt-tensor
3
- Version: 0.0.1a4
3
+ Version: 0.0.1a7
4
4
  Summary: General utilities for PyTorch and others. Built for general use.
5
5
  Home-page: https://github.com/gr1336/lt-tensor/
6
6
  Author: gr1336
@@ -17,7 +17,7 @@ Requires-Dist: numpy>=1.26.4
17
17
  Requires-Dist: tokenizers
18
18
  Requires-Dist: pyyaml>=6.0.0
19
19
  Requires-Dist: numba>0.60.0
20
- Requires-Dist: lt-utils==0.0.1.a3
20
+ Requires-Dist: lt-utils==0.0.1
21
21
  Requires-Dist: librosa>=0.11.0
22
22
  Dynamic: author
23
23
  Dynamic: classifier
@@ -0,0 +1,28 @@
1
+ lt_tensor/__init__.py,sha256=uwJ7uiO18VYj8Z1V4KSOQ3ZrnowSgJWKCIiFBrzLMOI,429
2
+ lt_tensor/losses.py,sha256=TinZJP2ypZ7Tdg6d9nnFWFkPyormfgQ0Z9P2ER3sqzE,4341
3
+ lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
4
+ lt_tensor/math_ops.py,sha256=ewIYkvxIy_Lab_9ExjFUgLs-oYLOu8IRRDo7f1pn3i8,2248
5
+ lt_tensor/misc_utils.py,sha256=sjWUkUaHFhaCdN4rZ6X-cQDbPieimfKchKq9VtjiwEA,17029
6
+ lt_tensor/model_base.py,sha256=8qN7oklALFanOz-eqVzdnB9RD2kN_3ltynSMAPOl-TI,13413
7
+ lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
8
+ lt_tensor/noise_tools.py,sha256=JkWw0-bCMRNNMShwXKKt5KbO3104tvNiBePt-ThPkEo,11366
9
+ lt_tensor/torch_commons.py,sha256=fntsEU8lhBQo0ebonI1iXBkMbWMN3HpBsG13EWlP5s8,718
10
+ lt_tensor/transform.py,sha256=va4bQjpfhH-tnaBDvJZpmYmfg9zwn5_Y6pPOoTswS-U,14471
11
+ lt_tensor/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ lt_tensor/datasets/audio.py,sha256=kVluXRbLX7C5W5aFN7CUMb-O1KTHjTiTvE4f7iBcnZk,3754
13
+ lt_tensor/model_zoo/__init__.py,sha256=jipEk50_DTMQbGg8FnDDukxmh7Bcwvl_QVRS3rkb7aY,283
14
+ lt_tensor/model_zoo/bsc.py,sha256=muxIR7dU-Pvf-HFE-iy3zmRb1sTJlcs1vqdlnbU1Hss,6307
15
+ lt_tensor/model_zoo/disc.py,sha256=jZPhoSV1hlrba3ohXGutYAAcSl4pWkqGYFpOlOoN3eo,4740
16
+ lt_tensor/model_zoo/fsn.py,sha256=5ySsg2OHjvTV_coPAdZQ0f7bz4ugJB8mDYsItmd61qA,2102
17
+ lt_tensor/model_zoo/gns.py,sha256=Tirr_grONp_FFQ_L7K-zV2lvkaC39h8mMl4QDpx9vLQ,6028
18
+ lt_tensor/model_zoo/istft.py,sha256=0Xms2QNPAgz_ib8XTfaWl1SCHgS53oKC6-EkDkl_qe4,4863
19
+ lt_tensor/model_zoo/pos.py,sha256=N28v-rF8CELouYxQ9r45Jbd4ri5DNydwDgg7nzmQ4Ig,4471
20
+ lt_tensor/model_zoo/rsd.py,sha256=5bba50g1Hm5kMexuJ4SwOIJuyQ1qJd8Acrq-Ax6CqE8,6958
21
+ lt_tensor/model_zoo/tfrms.py,sha256=kauh-A13pk08SZ5OspEE5a-gPKD4rZr6tqMKWu3KGhk,4237
22
+ lt_tensor/processors/__init__.py,sha256=4b9MxAJolXiJfSm20ZEspQTDm1tgLazwlPWA_jB1yLM,63
23
+ lt_tensor/processors/audio.py,sha256=mU0usiagVyNPd0uEadL_lC4BFzSMNpjTIwth82gFJRI,6650
24
+ lt_tensor-0.0.1a7.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
25
+ lt_tensor-0.0.1a7.dist-info/METADATA,sha256=5A9UrpFdhQikU44UlJbLQmQAVTceVhcbPV25jmLM9Os,965
26
+ lt_tensor-0.0.1a7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
27
+ lt_tensor-0.0.1a7.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
28
+ lt_tensor-0.0.1a7.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- lt_tensor/__init__.py,sha256=bvCjaIsYjbGFbR5MNezgLyRgN4_CsyrjmVEvuClsgOU,303
2
- lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
3
- lt_tensor/math_ops.py,sha256=ZtnJ9WB-pbFQLsXuNfQl2dAaeob5BWfxmhkwpxITUZ4,2066
4
- lt_tensor/misc_utils.py,sha256=e44FCQbjNHP-4WOHIbtqqH0x590DzUE6CrD_4Vl_d38,19880
5
- lt_tensor/model_base.py,sha256=tmRu5pTcELKMFcybOiZ1thJPuJWRSPkbUUtp9Y1NJWw,9555
6
- lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
7
- lt_tensor/torch_commons.py,sha256=fntsEU8lhBQo0ebonI1iXBkMbWMN3HpBsG13EWlP5s8,718
8
- lt_tensor/transform.py,sha256=IVAaQlq12OvMVhX3lX4lgsTCJYJce5n5MtMy7IK_AU4,8892
9
- lt_tensor/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- lt_tensor/datasets/audio.py,sha256=BZTceP9MlmyrVioHpWLkd_ZcyawYYZUAlVWKfKwyWAg,3318
11
- lt_tensor/model_zoo/__init__.py,sha256=jipEk50_DTMQbGg8FnDDukxmh7Bcwvl_QVRS3rkb7aY,283
12
- lt_tensor/model_zoo/bsc.py,sha256=muxIR7dU-Pvf-HFE-iy3zmRb1sTJlcs1vqdlnbU1Hss,6307
13
- lt_tensor/model_zoo/disc.py,sha256=ND6JR_x6b2Y1VqxZejalv8Cz5_TO3H_Z-0x6UnACbBM,4740
14
- lt_tensor/model_zoo/fsn.py,sha256=5ySsg2OHjvTV_coPAdZQ0f7bz4ugJB8mDYsItmd61qA,2102
15
- lt_tensor/model_zoo/gns.py,sha256=Tirr_grONp_FFQ_L7K-zV2lvkaC39h8mMl4QDpx9vLQ,6028
16
- lt_tensor/model_zoo/istft.py,sha256=RV7KVY7q4CYzzsWXH4NGJQwSqrYWwHh-16Q62lKoA2k,3594
17
- lt_tensor/model_zoo/pos.py,sha256=N28v-rF8CELouYxQ9r45Jbd4ri5DNydwDgg7nzmQ4Ig,4471
18
- lt_tensor/model_zoo/rsd.py,sha256=5bba50g1Hm5kMexuJ4SwOIJuyQ1qJd8Acrq-Ax6CqE8,6958
19
- lt_tensor/model_zoo/tfrms.py,sha256=kauh-A13pk08SZ5OspEE5a-gPKD4rZr6tqMKWu3KGhk,4237
20
- lt_tensor-0.0.1a4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
21
- lt_tensor-0.0.1a4.dist-info/METADATA,sha256=sbT9xduzE-huVvSjnak9iCo1Eyp45bsMUarc16oTD3o,968
22
- lt_tensor-0.0.1a4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
23
- lt_tensor-0.0.1a4.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
24
- lt_tensor-0.0.1a4.dist-info/RECORD,,