lt-tensor 0.0.1a34__py3-none-any.whl → 0.0.1a35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -40,42 +40,22 @@ class ConvNets(Model):
40
40
 
41
41
  def remove_norms(self, name: str = "weight"):
42
42
  for module in self.modules():
43
- if "Conv" in module.__class__.__name__:
44
- remove_norm(module, name)
43
+ try:
44
+ if "Conv" in module.__class__.__name__:
45
+ remove_norm(module, name)
46
+ except:
47
+ pass
45
48
 
46
49
  @staticmethod
47
- def init_weights(
48
- m: nn.Module,
49
- norm: Optional[Literal["spectral", "weight"]] = None,
50
- mean=0.0,
51
- std=0.02,
52
- name: str = "weight",
53
- n_power_iterations: int = 1,
54
- eps: float = 1e-9,
55
- dim_sn: Optional[int] = None,
56
- dim_wn: int = 0,
57
- ):
50
+ def init_weights(m: nn.Module, mean=0.0, std=0.02):
58
51
  if "Conv" in m.__class__.__name__:
59
- if norm is not None:
60
- try:
61
- if norm == "spectral":
62
- m.apply(
63
- lambda m: spectral_norm(
64
- m,
65
- n_power_iterations=n_power_iterations,
66
- eps=eps,
67
- name=name,
68
- dim=dim_sn,
69
- )
70
- )
71
- else:
72
- m.apply(lambda m: weight_norm(m, name=name, dim=dim_wn))
73
- except ValueError:
74
- pass
75
52
  m.weight.data.normal_(mean, std)
76
53
 
77
54
 
78
55
  class Conv1dEXT(ConvNets):
56
+
57
+ # TODO: Use this module to replace all that are using normalizations, mostly those in `audio_models`
58
+
79
59
  def __init__(
80
60
  self,
81
61
  in_channels: int,
@@ -90,7 +70,8 @@ class Conv1dEXT(ConvNets):
90
70
  device: Optional[Any] = None,
91
71
  dtype: Optional[Any] = None,
92
72
  apply_norm: Optional[Literal["weight", "spectral"]] = None,
93
- activation: nn.Module = nn.Identity(),
73
+ activation_in: nn.Module = nn.Identity(),
74
+ activation_out: nn.Module = nn.Identity(),
94
75
  *args,
95
76
  **kwargs,
96
77
  ):
@@ -112,13 +93,21 @@ class Conv1dEXT(ConvNets):
112
93
  )
113
94
  if apply_norm is None:
114
95
  self.cnn = nn.Conv1d(**cnn_kwargs)
96
+ self.has_wn = False
115
97
  else:
98
+ self.has_wn = True
116
99
  if apply_norm == "spectral":
117
100
  self.cnn = spectral_norm(nn.Conv1d(**cnn_kwargs))
118
101
  else:
119
102
  self.cnn = weight_norm(nn.Conv1d(**cnn_kwargs))
120
- self.activation = activation
103
+ self.actv_in = activation_in
104
+ self.actv_out = activation_out
121
105
  self.cnn.apply(self.init_weights)
122
106
 
123
107
  def forward(self, input: Tensor):
124
- return self.cnn(self.activation(input))
108
+ return self.actv_out(self.cnn(self.actv_in(input)))
109
+
110
+ def remove_norms(self, name="weight"):
111
+ if self.has_wn:
112
+ remove_norm(self.cnn, name)
113
+ self.has_wn = False
@@ -7,8 +7,6 @@ from lt_tensor.model_base import Model
7
7
  from lt_tensor.model_zoo.convs import ConvNets
8
8
  from torch.nn import functional as F
9
9
  from torchaudio import transforms as T
10
- from lt_tensor.processors import AudioProcessor, AudioProcessorConfig
11
-
12
10
 
13
11
  MULTI_DISC_OUT_TYPE: TypeAlias = Tuple[
14
12
  List[Tensor],
@@ -19,9 +17,11 @@ MULTI_DISC_OUT_TYPE: TypeAlias = Tuple[
19
17
 
20
18
 
21
19
  class MultiDiscriminatorWrapper(Model):
22
- def __init__(self, list_discriminator: List["_MultiDiscriminatorT"]):
20
+ def __init__(
21
+ self, list_discriminator: Union[List["_MultiDiscriminatorT"], nn.ModuleList]
22
+ ):
23
23
  """Setup example:
24
- model_d = MultiDiscriminatorStep(
24
+ model_d = MultiDiscriminatorWrapper(
25
25
  [
26
26
  MultiEnvelopeDiscriminator(),
27
27
  MultiBandDiscriminator(),
@@ -31,7 +31,12 @@ class MultiDiscriminatorWrapper(Model):
31
31
  )
32
32
  """
33
33
  super().__init__()
34
- self.disc: Sequence[_MultiDiscriminatorT] = nn.ModuleList(list_discriminator)
34
+
35
+ self.disc: Sequence[_MultiDiscriminatorT] = (
36
+ nn.ModuleList(list_discriminator)
37
+ if isinstance(list_discriminator, (list, tuple, set))
38
+ else list_discriminator
39
+ )
35
40
  self.total = len(self.disc)
36
41
 
37
42
  def forward(
@@ -96,7 +101,6 @@ class _MultiDiscriminatorT(ConvNets):
96
101
  def forward(self, y: Tensor, y_hat: Tensor) -> MULTI_DISC_OUT_TYPE:
97
102
  pass
98
103
 
99
- # for type hinting
100
104
  def __call__(self, *args, **kwds) -> MULTI_DISC_OUT_TYPE:
101
105
  return super().__call__(*args, **kwds)
102
106
 
@@ -159,7 +163,7 @@ class DiscriminatorP(ConvNets):
159
163
  def __init__(
160
164
  self,
161
165
  period: List[int],
162
- discriminator_channel_mult: Number = 1,
166
+ discriminator_channel_multi: Number = 1,
163
167
  kernel_size: int = 5,
164
168
  stride: int = 3,
165
169
  use_spectral_norm: bool = False,
@@ -167,7 +171,7 @@ class DiscriminatorP(ConvNets):
167
171
  super().__init__()
168
172
  self.period = period
169
173
  norm_f = weight_norm if not use_spectral_norm else spectral_norm
170
- dsc = lambda x: int(x * discriminator_channel_mult)
174
+ dsc = lambda x: int(x * discriminator_channel_multi)
171
175
  self.convs = nn.ModuleList(
172
176
  [
173
177
  norm_f(
@@ -242,19 +246,18 @@ class DiscriminatorP(ConvNets):
242
246
  class MultiPeriodDiscriminator(_MultiDiscriminatorT):
243
247
  def __init__(
244
248
  self,
245
- discriminator_channel_mult: Number = 1,
249
+ discriminator_channel_multi: Number = 1,
246
250
  mpd_reshapes: list[int] = [2, 3, 5, 7, 11],
247
251
  use_spectral_norm: bool = False,
248
252
  ):
249
253
  super().__init__()
250
254
  self.mpd_reshapes = mpd_reshapes
251
- print(f"mpd_reshapes: {self.mpd_reshapes}")
252
255
  self.discriminators = nn.ModuleList(
253
256
  [
254
257
  DiscriminatorP(
255
258
  rs,
256
259
  use_spectral_norm=use_spectral_norm,
257
- discriminator_channel_mult=discriminator_channel_mult,
260
+ discriminator_channel_multi=discriminator_channel_multi,
258
261
  )
259
262
  for rs in self.mpd_reshapes
260
263
  ]
@@ -276,6 +279,79 @@ class MultiPeriodDiscriminator(_MultiDiscriminatorT):
276
279
  return y_d_rs, y_d_gs, fmap_rs, fmap_gs
277
280
 
278
281
 
282
+ class DiscriminatorS(ConvNets):
283
+ def __init__(
284
+ self,
285
+ use_spectral_norm=False,
286
+ discriminator_channel_multi: Number = 1,
287
+ ):
288
+ super().__init__()
289
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
290
+ dsc = lambda x: int(x * discriminator_channel_multi)
291
+ self.convs = nn.ModuleList(
292
+ [
293
+ norm_f(nn.Conv1d(1, dsc(128), 15, 1, padding=7)),
294
+ norm_f(nn.Conv1d(dsc(128), dsc(128), 41, 2, groups=4, padding=20)),
295
+ norm_f(nn.Conv1d(dsc(128), dsc(256), 41, 2, groups=16, padding=20)),
296
+ norm_f(nn.Conv1d(dsc(256), dsc(512), 41, 4, groups=16, padding=20)),
297
+ norm_f(nn.Conv1d(dsc(512), dsc(1024), 41, 4, groups=16, padding=20)),
298
+ norm_f(nn.Conv1d(dsc(1024), dsc(1024), 41, 1, groups=16, padding=20)),
299
+ norm_f(nn.Conv1d(dsc(1024), dsc(1024), 5, 1, padding=2)),
300
+ ]
301
+ )
302
+ self.conv_post = norm_f(nn.Conv1d(dsc(1024), 1, 3, 1, padding=1))
303
+ self.activation = nn.LeakyReLU(0.1)
304
+
305
+ def forward(self, x):
306
+ fmap = []
307
+ for l in self.convs:
308
+ x = l(x)
309
+ x = self.activation(x)
310
+ fmap.append(x)
311
+ x = self.conv_post(x)
312
+ fmap.append(x)
313
+ return x.flatten(1, -1), fmap
314
+
315
+
316
+ class MultiScaleDiscriminator(ConvNets):
317
+ def __init__(
318
+ self,
319
+ discriminator_channel_multi: Number = 1,
320
+ ):
321
+ super().__init__()
322
+ self.discriminators = nn.ModuleList(
323
+ [
324
+ DiscriminatorS(
325
+ use_spectral_norm=True,
326
+ discriminator_channel_multi=discriminator_channel_multi,
327
+ ),
328
+ DiscriminatorS(discriminator_channel_multi=discriminator_channel_multi),
329
+ DiscriminatorS(discriminator_channel_multi=discriminator_channel_multi),
330
+ ]
331
+ )
332
+ self.meanpools = nn.ModuleList(
333
+ [nn.AvgPool1d(4, 2, padding=2), nn.AvgPool1d(4, 2, padding=2)]
334
+ )
335
+
336
+ def forward(self, y, y_hat):
337
+ y_d_rs = []
338
+ y_d_gs = []
339
+ fmap_rs = []
340
+ fmap_gs = []
341
+ for i, d in enumerate(self.discriminators):
342
+ if i > 0:
343
+ y = self.meanpools[i - 1](y)
344
+ y_hat = self.meanpools[i - 1](y_hat)
345
+ y_d_r, fmap_r = d(y)
346
+ y_d_g, fmap_g = d(y_hat)
347
+ y_d_rs.append(y_d_r)
348
+ fmap_rs.append(fmap_r)
349
+ y_d_gs.append(y_d_g)
350
+ fmap_gs.append(fmap_g)
351
+
352
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
353
+
354
+
279
355
  class EnvelopeExtractor(Model):
280
356
  """Extracts the amplitude envelope of the audio signal."""
281
357
 
@@ -297,21 +373,35 @@ class EnvelopeExtractor(Model):
297
373
 
298
374
 
299
375
  class DiscriminatorEnvelope(ConvNets):
300
- def __init__(self, use_spectral_norm=False):
376
+ def __init__(
377
+ self,
378
+ use_spectral_norm=False,
379
+ discriminator_channel_multi: Number = 1,
380
+ kernel_size: int = 101,
381
+ ):
301
382
  super().__init__()
302
383
  norm_f = weight_norm if not use_spectral_norm else spectral_norm
303
- self.extractor = EnvelopeExtractor(kernel_size=101)
384
+ self.extractor = EnvelopeExtractor(kernel_size=kernel_size)
385
+ dsc = lambda x: int(x * discriminator_channel_multi)
304
386
  self.convs = nn.ModuleList(
305
387
  [
306
- norm_f(nn.Conv1d(1, 64, 15, stride=1, padding=7)),
307
- norm_f(nn.Conv1d(64, 128, 41, stride=2, groups=4, padding=20)),
308
- norm_f(nn.Conv1d(128, 256, 41, stride=2, groups=16, padding=20)),
309
- norm_f(nn.Conv1d(256, 512, 41, stride=4, groups=16, padding=20)),
310
- norm_f(nn.Conv1d(512, 512, 41, stride=4, groups=16, padding=20)),
311
- norm_f(nn.Conv1d(512, 512, 5, stride=1, padding=2)),
388
+ norm_f(nn.Conv1d(1, dsc(64), 15, stride=1, padding=7)),
389
+ norm_f(
390
+ nn.Conv1d(dsc(64), dsc(128), 41, stride=2, groups=4, padding=20)
391
+ ),
392
+ norm_f(
393
+ nn.Conv1d(dsc(128), dsc(256), 41, stride=2, groups=16, padding=20)
394
+ ),
395
+ norm_f(
396
+ nn.Conv1d(dsc(256), dsc(512), 41, stride=4, groups=16, padding=20)
397
+ ),
398
+ norm_f(
399
+ nn.Conv1d(dsc(512), dsc(512), 41, stride=4, groups=16, padding=20)
400
+ ),
401
+ norm_f(nn.Conv1d(dsc(512), dsc(512), 5, stride=1, padding=2)),
312
402
  ]
313
403
  )
314
- self.conv_post = norm_f(nn.Conv1d(512, 1, 3, stride=1, padding=1))
404
+ self.conv_post = norm_f(nn.Conv1d(dsc(512), 1, 3, stride=1, padding=1))
315
405
  self.activation = nn.LeakyReLU(0.1)
316
406
 
317
407
  def forward(self, x):
@@ -327,11 +417,17 @@ class DiscriminatorEnvelope(ConvNets):
327
417
 
328
418
 
329
419
  class MultiEnvelopeDiscriminator(_MultiDiscriminatorT):
330
- def __init__(self, use_spectral_norm: bool = False):
420
+ def __init__(
421
+ self,
422
+ use_spectral_norm: bool = False,
423
+ discriminator_channel_multi: Number = 1,
424
+ ):
331
425
  super().__init__()
332
426
  self.discriminators = nn.ModuleList(
333
427
  [
334
- DiscriminatorEnvelope(use_spectral_norm), # raw envelope
428
+ DiscriminatorEnvelope(
429
+ use_spectral_norm, discriminator_channel_multi
430
+ ), # raw envelope
335
431
  DiscriminatorEnvelope(use_spectral_norm), # downsampled once
336
432
  DiscriminatorEnvelope(use_spectral_norm), # downsampled twice
337
433
  ]
@@ -431,7 +527,7 @@ class DiscriminatorB(ConvNets):
431
527
  for band, stack in zip(x_bands, self.band_convs):
432
528
  for i, layer in enumerate(stack):
433
529
  band = layer(band)
434
- band = torch.nn.functional.leaky_relu(band, 0.1)
530
+ band = F.leaky_relu(band, 0.1)
435
531
  if i > 0:
436
532
  fmap.append(band)
437
533
  x.append(band)
@@ -452,11 +548,21 @@ class MultiBandDiscriminator(_MultiDiscriminatorT):
452
548
  def __init__(
453
549
  self,
454
550
  mbd_fft_sizes: list[int] = [2048, 1024, 512],
551
+ channels: int = 32,
552
+ hop_factor: float = 0.25,
553
+ bands: Tuple[Tuple[float, float], ...] = (
554
+ (0.0, 0.1),
555
+ (0.1, 0.25),
556
+ (0.25, 0.5),
557
+ (0.5, 0.75),
558
+ (0.75, 1.0),
559
+ ),
455
560
  ):
456
561
  super().__init__()
457
562
  self.fft_sizes = mbd_fft_sizes
563
+ kwargs_disc = dict(channels=channels, hop_factor=hop_factor, bands=bands)
458
564
  self.discriminators = nn.ModuleList(
459
- [DiscriminatorB(window_length=w) for w in self.fft_sizes]
565
+ [DiscriminatorB(window_length=w, **kwargs_disc) for w in self.fft_sizes]
460
566
  )
461
567
 
462
568
  def forward(self, y: Tensor, y_hat: Tensor) -> MULTI_DISC_OUT_TYPE:
@@ -483,7 +589,7 @@ class DiscriminatorR(ConvNets):
483
589
  self,
484
590
  resolution: List[int],
485
591
  use_spectral_norm: bool = False,
486
- discriminator_channel_mult: int = 1,
592
+ discriminator_channel_multi: Number = 1,
487
593
  ):
488
594
  super().__init__()
489
595
 
@@ -501,13 +607,13 @@ class DiscriminatorR(ConvNets):
501
607
  [
502
608
  norm_f(
503
609
  nn.Conv2d(
504
- 1, int(32 * discriminator_channel_mult), (3, 9), padding=(1, 4)
610
+ 1, int(32 * discriminator_channel_multi), (3, 9), padding=(1, 4)
505
611
  )
506
612
  ),
507
613
  norm_f(
508
614
  nn.Conv2d(
509
- int(32 * discriminator_channel_mult),
510
- int(32 * discriminator_channel_mult),
615
+ int(32 * discriminator_channel_multi),
616
+ int(32 * discriminator_channel_multi),
511
617
  (3, 9),
512
618
  stride=(1, 2),
513
619
  padding=(1, 4),
@@ -515,8 +621,8 @@ class DiscriminatorR(ConvNets):
515
621
  ),
516
622
  norm_f(
517
623
  nn.Conv2d(
518
- int(32 * discriminator_channel_mult),
519
- int(32 * discriminator_channel_mult),
624
+ int(32 * discriminator_channel_multi),
625
+ int(32 * discriminator_channel_multi),
520
626
  (3, 9),
521
627
  stride=(1, 2),
522
628
  padding=(1, 4),
@@ -524,8 +630,8 @@ class DiscriminatorR(ConvNets):
524
630
  ),
525
631
  norm_f(
526
632
  nn.Conv2d(
527
- int(32 * discriminator_channel_mult),
528
- int(32 * discriminator_channel_mult),
633
+ int(32 * discriminator_channel_multi),
634
+ int(32 * discriminator_channel_multi),
529
635
  (3, 9),
530
636
  stride=(1, 2),
531
637
  padding=(1, 4),
@@ -533,8 +639,8 @@ class DiscriminatorR(ConvNets):
533
639
  ),
534
640
  norm_f(
535
641
  nn.Conv2d(
536
- int(32 * discriminator_channel_mult),
537
- int(32 * discriminator_channel_mult),
642
+ int(32 * discriminator_channel_multi),
643
+ int(32 * discriminator_channel_multi),
538
644
  (3, 3),
539
645
  padding=(1, 1),
540
646
  )
@@ -542,7 +648,7 @@ class DiscriminatorR(ConvNets):
542
648
  ]
543
649
  )
544
650
  self.conv_post = norm_f(
545
- nn.Conv2d(int(32 * discriminator_channel_mult), 1, (3, 3), padding=(1, 1))
651
+ nn.Conv2d(int(32 * discriminator_channel_multi), 1, (3, 3), padding=(1, 1))
546
652
  )
547
653
 
548
654
  def forward(self, x: Tensor) -> Tuple[Tensor, List[Tensor]]:
@@ -586,7 +692,7 @@ class MultiResolutionDiscriminator(_MultiDiscriminatorT):
586
692
  def __init__(
587
693
  self,
588
694
  use_spectral_norm: bool = False,
589
- discriminator_channel_mult: int = 1,
695
+ discriminator_channel_multi: Number = 1,
590
696
  resolutions: List[List[int]] = [
591
697
  [1024, 120, 600],
592
698
  [2048, 240, 1200],
@@ -601,7 +707,7 @@ class MultiResolutionDiscriminator(_MultiDiscriminatorT):
601
707
  self.discriminators = nn.ModuleList(
602
708
  [
603
709
  DiscriminatorR(
604
- resolution, use_spectral_norm, discriminator_channel_mult
710
+ resolution, use_spectral_norm, discriminator_channel_multi
605
711
  )
606
712
  for resolution in self.resolutions
607
713
  ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lt-tensor
3
- Version: 0.0.1a34
3
+ Version: 0.0.1a35
4
4
  Summary: General utilities for PyTorch and others. Built for general use.
5
5
  Home-page: https://github.com/gr1336/lt-tensor/
6
6
  Author: gr1336
@@ -0,0 +1,40 @@
1
+ lt_tensor/__init__.py,sha256=4NqhrI_O5q4YQMBpyoLtNUUbBnnbWkO92GE1hxHcrd8,441
2
+ lt_tensor/config_templates.py,sha256=F9UvL8paAjkSvio890kp8WznpYeI50pYnm9iqQroBxk,2797
3
+ lt_tensor/losses.py,sha256=Heco_WyoC1HkNkcJEircOAzS9umusATHiNAG-FKGyzc,8918
4
+ lt_tensor/lr_schedulers.py,sha256=6_vcfaPHrozfH3wvmNEdKSFYl6iTIijYoHL8vuG-45U,7651
5
+ lt_tensor/math_ops.py,sha256=ahX6Z1Mt3X-FhmwSZYZea5mB1B0S8GDuvKPfAm5e_FQ,2646
6
+ lt_tensor/misc_utils.py,sha256=stL6q3M7S2N4FBICFYbgYpdPDrJRlwmr24-iCXMRifM,28933
7
+ lt_tensor/model_base.py,sha256=5T4dbAh4MXbQmPRpihGtMYwTY8sJTQOhY6An3VboM58,18086
8
+ lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
9
+ lt_tensor/noise_tools.py,sha256=wFeAsHhLhSlEc5XU5LbFKaXoHeVxrWjiMeljjGdIKyM,11363
10
+ lt_tensor/torch_commons.py,sha256=8l0bxmrAzwvyqjivCIVISXlbvKarlg4DdE0BOGSnMuQ,812
11
+ lt_tensor/transform.py,sha256=dZm8T_ov0blHMQu6nGiehsdG1VSB7bZBUVmTkT-PBdc,13257
12
+ lt_tensor/model_zoo/__init__.py,sha256=yPUVchgVhU2nAJ2ocA4HFfG7IMEiBu8qOi8I1KWTTkU,404
13
+ lt_tensor/model_zoo/basic.py,sha256=pI8HyiHK-cmWcEEaVY_EduUJOjZW6HOtXvJd8Rbhq30,15452
14
+ lt_tensor/model_zoo/convs.py,sha256=Tws0jrPfs9m7OLmJ30W0AfkAvZgppW7lNi4xt0e-qRU,3518
15
+ lt_tensor/model_zoo/features.py,sha256=DO8dlE0kmPKTNC1Xkv9wKegOOYkQa_rkxM4hhcNwJWA,15655
16
+ lt_tensor/model_zoo/fusion.py,sha256=usC1bcjQRNivDc8xzkIS5T1glm78OLcs2V_tPqfp-eI,5422
17
+ lt_tensor/model_zoo/pos_encoder.py,sha256=3d1EYLinCU9UAy-WuEWeYMGhMqaGknCiQ5qEmhw_UYM,4487
18
+ lt_tensor/model_zoo/residual.py,sha256=tMXgif9Ggep9bk75K93yueeU5vk5S25AGCRFwOQOyB8,6452
19
+ lt_tensor/model_zoo/transformer.py,sha256=HUFoFFh7EQJErxdd9XIxhssdjvNVx2tNGDJOTUfwG2A,4301
20
+ lt_tensor/model_zoo/activations/__init__.py,sha256=f_IsuC-SaFsX6w4OtBWa5bbS4TqR90X-cvLxGUgYfjk,67
21
+ lt_tensor/model_zoo/activations/alias_free/__init__.py,sha256=dgLjatRm9nusoPVOl1pvCef5rZsaRfS3BJUs05SPYzw,64
22
+ lt_tensor/model_zoo/activations/alias_free/act.py,sha256=1wxmab2kMD88L6wsQgf3t25dBwR7_he2eM1DlV0FQak,1424
23
+ lt_tensor/model_zoo/activations/alias_free/filter.py,sha256=5TvXESv31toD5sePBe_OUJJfMXv6Ohwmx2YawjQL-pk,6004
24
+ lt_tensor/model_zoo/activations/alias_free/resample.py,sha256=3iM4fNr9fLNXXMyXvzW-MwkSjOZOrMZLfS80UHs6zk0,3386
25
+ lt_tensor/model_zoo/activations/snake/__init__.py,sha256=AtOAbJuMinxmKkppITGMzRbcbPQaALnl9mCtl1c3x0Q,4356
26
+ lt_tensor/model_zoo/audio_models/__init__.py,sha256=WwiP9MekJreMOfKPWLl24VkRJIpLk6hhL8ch0aKgOss,103
27
+ lt_tensor/model_zoo/audio_models/resblocks.py,sha256=u-foHxaFDUICjxSkpyHXljQYQG9zMxVYaOGqLR_nJ-k,7978
28
+ lt_tensor/model_zoo/audio_models/bigvgan/__init__.py,sha256=Dpt_3JXUToldxQrZx4a1gfI-awsLIVipAXqWm4lzBzM,8495
29
+ lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=PDuDYN1omD1RoAXcmxH3tEgfAuM3ZHAWzimD6ElMqEQ,9073
30
+ lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=3HVfEreQ4NqYIC9AWEkmL4ePcIbR1kTyH0cBG8u_Jik,6387
31
+ lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=blICjLX_z_IFmR3_TCz_dJiSayLYGza9eG6fd9aKyvE,7448
32
+ lt_tensor/model_zoo/losses/__init__.py,sha256=B9RAUxBiOZwooztnij1oLeRwZ7_MjnN3mPoum7saD6s,59
33
+ lt_tensor/model_zoo/losses/discriminators.py,sha256=HBO7jwCsUGsYfSz-JZPZccuYLnto6jfZs3Ve5j51JQE,24247
34
+ lt_tensor/processors/__init__.py,sha256=Pvxhh0KR65zLCgUd53_k5Z0y5JWWcO0ZBXFK9rv0o5w,109
35
+ lt_tensor/processors/audio.py,sha256=HNr1GS-6M2q0Rda4cErf5y2Jlc9f4jD58FvpX2ua9d4,18369
36
+ lt_tensor-0.0.1a35.dist-info/licenses/LICENSE,sha256=TbiyJWLgNqqgqhfCnrGwFIxy7EqGNrIZZcKhHrefcuU,11354
37
+ lt_tensor-0.0.1a35.dist-info/METADATA,sha256=0FrtLNnbU49bKOlyshasXPZOZ90Sok03XkXbtxP4VMI,1062
38
+ lt_tensor-0.0.1a35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
+ lt_tensor-0.0.1a35.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
40
+ lt_tensor-0.0.1a35.dist-info/RECORD,,
@@ -1 +0,0 @@
1
- from . import *
@@ -1,37 +0,0 @@
1
- lt_tensor/__init__.py,sha256=WAGPuMPq5c4DGAJ57x1Ykgzg3vMlLq9BiWk5EdJcUsU,441
2
- lt_tensor/config_templates.py,sha256=F9UvL8paAjkSvio890kp8WznpYeI50pYnm9iqQroBxk,2797
3
- lt_tensor/losses.py,sha256=fHVMqOFo3ekjORYy89R_aRjmtT6lo27Z1egzOYjQ1W8,8646
4
- lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
5
- lt_tensor/math_ops.py,sha256=ahX6Z1Mt3X-FhmwSZYZea5mB1B0S8GDuvKPfAm5e_FQ,2646
6
- lt_tensor/misc_utils.py,sha256=N2r3UmxC4RM2BZBQhpjDZ_BKLrzsyIlKzopTzJbnjFU,28962
7
- lt_tensor/model_base.py,sha256=5T4dbAh4MXbQmPRpihGtMYwTY8sJTQOhY6An3VboM58,18086
8
- lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
9
- lt_tensor/noise_tools.py,sha256=wFeAsHhLhSlEc5XU5LbFKaXoHeVxrWjiMeljjGdIKyM,11363
10
- lt_tensor/torch_commons.py,sha256=8l0bxmrAzwvyqjivCIVISXlbvKarlg4DdE0BOGSnMuQ,812
11
- lt_tensor/transform.py,sha256=dZm8T_ov0blHMQu6nGiehsdG1VSB7bZBUVmTkT-PBdc,13257
12
- lt_tensor/model_zoo/__init__.py,sha256=yPUVchgVhU2nAJ2ocA4HFfG7IMEiBu8qOi8I1KWTTkU,404
13
- lt_tensor/model_zoo/basic.py,sha256=pI8HyiHK-cmWcEEaVY_EduUJOjZW6HOtXvJd8Rbhq30,15452
14
- lt_tensor/model_zoo/convs.py,sha256=YQRxek75Qpsha8nfc7wLhmJS9XxPeCa4WxuftLg6IcE,3927
15
- lt_tensor/model_zoo/features.py,sha256=DO8dlE0kmPKTNC1Xkv9wKegOOYkQa_rkxM4hhcNwJWA,15655
16
- lt_tensor/model_zoo/fusion.py,sha256=usC1bcjQRNivDc8xzkIS5T1glm78OLcs2V_tPqfp-eI,5422
17
- lt_tensor/model_zoo/pos_encoder.py,sha256=3d1EYLinCU9UAy-WuEWeYMGhMqaGknCiQ5qEmhw_UYM,4487
18
- lt_tensor/model_zoo/residual.py,sha256=tMXgif9Ggep9bk75K93yueeU5vk5S25AGCRFwOQOyB8,6452
19
- lt_tensor/model_zoo/transformer.py,sha256=HUFoFFh7EQJErxdd9XIxhssdjvNVx2tNGDJOTUfwG2A,4301
20
- lt_tensor/model_zoo/activations/alias_free_torch/__init__.py,sha256=ovguP4wzQEDNguczwiZnhMm4dRRVcvnzmHrfQtlRCNQ,15
21
- lt_tensor/model_zoo/activations/alias_free_torch/act.py,sha256=h79C93GzbSrCq4ui6iO7DjJLuJ7QK_ag_TU-WAcj0NI,1405
22
- lt_tensor/model_zoo/activations/alias_free_torch/filter.py,sha256=5TvXESv31toD5sePBe_OUJJfMXv6Ohwmx2YawjQL-pk,6004
23
- lt_tensor/model_zoo/activations/alias_free_torch/resample.py,sha256=3iM4fNr9fLNXXMyXvzW-MwkSjOZOrMZLfS80UHs6zk0,3386
24
- lt_tensor/model_zoo/activations/snake/__init__.py,sha256=Adb_xe-7YdYsNxvlSSO9zkae-cu7ElxkBKE3trDtOus,4517
25
- lt_tensor/model_zoo/audio_models/__init__.py,sha256=MoG9YjxLyvscq_6njK1ljGBletK9iedBXt66bplzW-s,83
26
- lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=PDuDYN1omD1RoAXcmxH3tEgfAuM3ZHAWzimD6ElMqEQ,9073
27
- lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=7GJqKLw7-juXpfp5IFzjASLut0uouDhjZ1CQknf3H68,16533
28
- lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=ltIuD9t1gmS3bTmCqZIwJHKrhC6DYya3OaXlskWX9kw,17606
29
- lt_tensor/model_zoo/losses/__init__.py,sha256=B9RAUxBiOZwooztnij1oLeRwZ7_MjnN3mPoum7saD6s,59
30
- lt_tensor/model_zoo/losses/discriminators.py,sha256=ZpyByFgc7L7uV_XRBsV9vkdVItbJO3z--Y6LlvTvtwY,20765
31
- lt_tensor/processors/__init__.py,sha256=Pvxhh0KR65zLCgUd53_k5Z0y5JWWcO0ZBXFK9rv0o5w,109
32
- lt_tensor/processors/audio.py,sha256=HNr1GS-6M2q0Rda4cErf5y2Jlc9f4jD58FvpX2ua9d4,18369
33
- lt_tensor-0.0.1a34.dist-info/licenses/LICENSE,sha256=TbiyJWLgNqqgqhfCnrGwFIxy7EqGNrIZZcKhHrefcuU,11354
34
- lt_tensor-0.0.1a34.dist-info/METADATA,sha256=WkTafcY5nYZbrZ7WzUc3JXnmg9NtUAXrchx42dCok9I,1062
35
- lt_tensor-0.0.1a34.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
- lt_tensor-0.0.1a34.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
37
- lt_tensor-0.0.1a34.dist-info/RECORD,,