lt-tensor 0.0.1a18__py3-none-any.whl → 0.0.1a19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,75 +7,79 @@ from lt_tensor.misc_utils import updateDict
7
7
 
8
8
  class ModelConfig(ABC, OrderedDict):
9
9
  _default_settings: Dict[str, Any] = {}
10
- _forbidden_list: List[str] = ["_default_settings", "_forbidden_list" "path_name"]
10
+ _forbidden_list: List[str] = [
11
+ "_default_settings",
12
+ "_forbidden_list",
13
+ "path_name",
14
+ ]
15
+ path: Optional[str] = None
11
16
 
12
17
  def __init__(
13
18
  self,
14
- settings: Dict[str, Any] = {},
15
- path_name: Optional[Union[str, PathLike]] = None,
19
+ path: Optional[Union[str, PathLike]] = None,
20
+ **settings,
16
21
  ):
17
- assert is_dict(settings, False)
18
- self._default_settings = settings
19
- if path_name is not None and is_pathlike(path_name):
20
- if not str(path_name).endswith(".json"):
21
- self.path_name = str(Path(path_name, "config.json")).replace("\\", "/")
22
- else:
23
- self.path_name = str(path_name).replace("\\", "/")
22
+ self._setup_path_name(path)
23
+ if self.path is not None:
24
+ self._default_settings = load_json(self.path, default=settings)
24
25
  else:
25
- self.path_name = "config.json"
26
- self.reset_settings()
26
+ self._default_settings = settings
27
+
28
+ self.set_state_dict(self._default_settings)
27
29
 
28
30
  def _setup_path_name(self, path_name: Union[str, PathLike]):
29
31
  if is_file(path_name):
30
32
  self.from_path(path_name)
31
- self.path_name = str(path_name).replace("\\", "/")
33
+ self.path = str(path_name).replace("\\", "/")
32
34
  elif is_str(path_name):
33
- self.path_name = str(path_name).replace("\\", "/")
34
- if not self.path_name.endswith((".json")):
35
- self.path_name += ".json"
35
+ self.path = str(path_name).replace("\\", "/")
36
+ if not self.path.endswith((".json")):
37
+ self.path += ".json"
36
38
 
37
39
  def reset_settings(self):
38
- dk_keys = self.__dict__.keys()
39
- for s_name, setting in self._default_settings.items():
40
- if s_name in self._forbidden_list or s_name not in dk_keys:
41
- continue
42
- updateDict(self, {s_name: setting})
40
+ raise NotImplementedError("Not implemented")
43
41
 
44
42
  def save_config(
45
43
  self,
46
- path_name: Optional[Union[PathLike, str]] = None,
44
+ path: Optional[Union[PathLike, str]] = None,
47
45
  ):
48
- if not is_pathlike(path_name, True):
46
+ if not is_pathlike(path, True):
49
47
  assert (
50
- path_name is None
51
- ), f"path_name should be a non-empty string or pathlike object! received instead: {path_name}."
52
- path_name = self.path_name
48
+ path is None
49
+ ), f"path_name should be a non-empty string or pathlike object! received instead: {path}."
50
+ path = self.path
53
51
  else:
54
- self._setup_path_name(path_name)
52
+ self._setup_path_name(path)
55
53
 
56
- base = self.get_state_dict()
57
- save_json(self.path_name, base, indent=2)
54
+ base = self.state_dict()
55
+ save_json(self.path, base, indent=2)
58
56
 
59
57
  def set_value(self, var_name: str, value: str) -> None:
60
- assert var_name in self.__dict__, "Value not registered!"
61
58
  assert var_name not in self._forbidden_list, "Not allowed!"
62
59
  updateDict(self, {var_name: value})
60
+ self.update({var_name: value})
63
61
 
64
62
  def get_value(self, var_name: str) -> Any:
65
63
  return self.__dict__.get(var_name)
66
64
 
67
- def __getattribute__(self, name):
68
- return self.__dict__.get(name)
65
+ def set_state_dict(self, new_state: dict[str, str]):
66
+ new_state = {
67
+ k: y for k, y in new_state.items() if k not in self._forbidden_list
68
+ }
69
+ updateDict(self, new_state)
70
+ self.update(**new_state)
69
71
 
70
- def get_state_dict(self):
72
+ def state_dict(self):
71
73
  return {k: y for k, y in self.__dict__.items() if k not in self._forbidden_list}
72
74
 
73
75
  @classmethod
74
76
  def from_dict(
75
- cls, dictionary: Dict[str, Any], path: Optional[Union[str, PathLike]] = None
77
+ cls,
78
+ dictionary: Dict[str, Any],
79
+ path: Optional[Union[str, PathLike]] = None,
76
80
  ) -> "ModelConfig":
77
81
  assert is_dict(dictionary)
78
- return ModelConfig(dictionary, path)
82
+ return ModelConfig(path, **dictionary)
79
83
 
80
84
  @classmethod
81
85
  def from_path(cls, path_name: PathLike) -> "ModelConfig":
@@ -102,4 +106,4 @@ class ModelConfig(ABC, OrderedDict):
102
106
  )
103
107
  assert files, "No config file found in the provided directory!"
104
108
  settings.update(load_json(files[-1], {}, errors="ignore"))
105
- return ModelConfig(settings, path_name)
109
+ return ModelConfig(path_name, **settings)
@@ -1,9 +1,8 @@
1
1
  __all__ = ["DiffWave", "DiffWaveConfig", "SpectrogramUpsample", "DiffusionEmbedding"]
2
2
 
3
3
  import numpy as np
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
4
+ from lt_tensor.torch_commons import *
5
+ from torch.nn import functional as F
7
6
  from lt_tensor.config_templates import ModelConfig
8
7
  from lt_tensor.torch_commons import *
9
8
  from lt_tensor.model_base import Model
@@ -12,16 +11,9 @@ from lt_utils.common import *
12
11
 
13
12
 
14
13
  class DiffWaveConfig(ModelConfig):
15
- # Training params
16
- batch_size = 16
17
- learning_rate = 2e-4
18
- max_grad_norm = None
19
- # Data params
20
- sample_rate = 24000
14
+ # Model params
21
15
  n_mels = 80
22
- n_fft = 1024
23
16
  hop_samples = 256
24
- # Model params
25
17
  residual_layers = 30
26
18
  residual_channels = 64
27
19
  dilation_cycle_length = 10
@@ -35,10 +27,30 @@ class DiffWaveConfig(ModelConfig):
35
27
 
36
28
  def __init__(
37
29
  self,
38
- settings: Dict[str, Any] = {},
39
- path_name: Optional[Union[str, PathLike]] = None,
30
+ n_mels = 80,
31
+ hop_samples = 256,
32
+ residual_layers = 30,
33
+ residual_channels = 64,
34
+ dilation_cycle_length = 10,
35
+ unconditional = False,
36
+ noise_schedule: list[int] = np.linspace(1e-4, 0.05, 50).tolist(),
37
+ interpolate_cond = False,
38
+ interpolation_mode: Literal[
39
+ "nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"
40
+ ] = "nearest",
40
41
  ):
41
- super().__init__(settings, path_name)
42
+ settings = {
43
+ "n_mels": n_mels,
44
+ "hop_samples": hop_samples,
45
+ "residual_layers": residual_layers,
46
+ "dilation_cycle_length": dilation_cycle_length,
47
+ "residual_channels": residual_channels,
48
+ "unconditional": unconditional,
49
+ "noise_schedule": noise_schedule,
50
+ "interpolate": interpolate_cond,
51
+ "interpolation_mode": interpolation_mode,
52
+ }
53
+ super().__init__(**settings)
42
54
 
43
55
 
44
56
  def Conv1d(*args, **kwargs):
@@ -4,10 +4,6 @@ from lt_tensor.torch_commons import *
4
4
  from lt_tensor.model_zoo.residual import ConvNets
5
5
  from torch.nn import functional as F
6
6
 
7
- import torch
8
- import torch.nn.functional as F
9
- import torch.nn as nn
10
-
11
7
 
12
8
  def get_padding(kernel_size, dilation=1):
13
9
  return int((kernel_size * dilation - dilation) / 2)
@@ -21,7 +17,7 @@ class HifiganConfig(ModelConfig):
21
17
  in_channels: int = 80
22
18
  upsample_rates: List[Union[int, List[int]]] = [8, 8]
23
19
  upsample_kernel_sizes: List[Union[int, List[int]]] = [16, 16]
24
- upsample_initial_channel: int = (512,)
20
+ upsample_initial_channel: int = 512
25
21
  resblock_kernel_sizes: List[Union[int, List[int]]] = [3, 7, 11]
26
22
  resblock_dilation_sizes: List[Union[int, List[int]]] = [
27
23
  [1, 3, 5],
@@ -34,10 +30,32 @@ class HifiganConfig(ModelConfig):
34
30
 
35
31
  def __init__(
36
32
  self,
37
- settings: Dict[str, Any] = {},
38
- path_name: Optional[Union[str, PathLike]] = None,
33
+ in_channels: int = 80,
34
+ upsample_rates: List[Union[int, List[int]]] = [8, 8],
35
+ upsample_kernel_sizes: List[Union[int, List[int]]] = [16, 16],
36
+ upsample_initial_channel: int = 512,
37
+ resblock_kernel_sizes: List[Union[int, List[int]]] = [3, 7, 11],
38
+ resblock_dilation_sizes: List[Union[int, List[int]]] = [
39
+ [1, 3, 5],
40
+ [1, 3, 5],
41
+ [1, 3, 5],
42
+ ],
43
+ activation: nn.Module = nn.LeakyReLU(0.1),
44
+ resblock: int = 0,
45
+ *args,
46
+ **kwargs,
39
47
  ):
40
- super().__init__(settings, path_name)
48
+ settings = {
49
+ "in_channels": in_channels,
50
+ "upsample_rates": upsample_rates,
51
+ "upsample_kernel_sizes": upsample_kernel_sizes,
52
+ "upsample_initial_channel": upsample_initial_channel,
53
+ "resblock_kernel_sizes": resblock_kernel_sizes,
54
+ "resblock_dilation_sizes": resblock_dilation_sizes,
55
+ "activation": activation,
56
+ "resblock": resblock,
57
+ }
58
+ super().__init__(**settings)
41
59
 
42
60
 
43
61
  class ResBlock1(ConvNets):
@@ -177,10 +195,10 @@ class HifiganGenerator(ConvNets):
177
195
  self.conv_pre = weight_norm(
178
196
  nn.Conv1d(cfg.in_channels, cfg.upsample_initial_channel, 7, 1, padding=3)
179
197
  )
180
- resblock = ResBlock1 if resblock == 0 else ResBlock2
198
+ resblock = ResBlock1 if cfg.resblock == 0 else ResBlock2
181
199
  self.activation = cfg.activation
182
200
  self.ups = nn.ModuleList()
183
- for i, (u, k) in enumerate(zip(cfg.psample_rates, cfg.upsample_kernel_sizes)):
201
+ for i, (u, k) in enumerate(zip(cfg.upsample_rates, cfg.upsample_kernel_sizes)):
184
202
  self.ups.append(
185
203
  weight_norm(
186
204
  nn.ConvTranspose1d(
@@ -11,7 +11,7 @@ class iSTFTNetConfig(ModelConfig):
11
11
  in_channels: int = 80
12
12
  upsample_rates: List[Union[int, List[int]]] = [8, 8]
13
13
  upsample_kernel_sizes: List[Union[int, List[int]]] = [16, 16]
14
- upsample_initial_channel: int = (512,)
14
+ upsample_initial_channel: int = 512
15
15
  resblock_kernel_sizes: List[Union[int, List[int]]] = [3, 7, 11]
16
16
  resblock_dilation_sizes: List[Union[int, List[int]]] = [
17
17
  [1, 3, 5],
@@ -26,10 +26,36 @@ class iSTFTNetConfig(ModelConfig):
26
26
 
27
27
  def __init__(
28
28
  self,
29
- settings: Dict[str, Any] = {},
30
- path_name: Optional[Union[str, PathLike]] = None,
29
+ in_channels: int = 80,
30
+ upsample_rates: List[Union[int, List[int]]] = [8, 8],
31
+ upsample_kernel_sizes: List[Union[int, List[int]]] = [16, 16],
32
+ upsample_initial_channel: int = 512,
33
+ resblock_kernel_sizes: List[Union[int, List[int]]] = [3, 7, 11],
34
+ resblock_dilation_sizes: List[Union[int, List[int]]] = [
35
+ [1, 3, 5],
36
+ [1, 3, 5],
37
+ [1, 3, 5],
38
+ ],
39
+ activation: nn.Module = nn.LeakyReLU(0.1),
40
+ resblock: int = 0,
41
+ gen_istft_n_fft: int = 16,
42
+ sampling_rate: Number = 24000,
43
+ *args,
44
+ **kwargs,
31
45
  ):
32
- super().__init__(settings, path_name)
46
+ settings = {
47
+ "in_channels": in_channels,
48
+ "upsample_rates": upsample_rates,
49
+ "upsample_kernel_sizes": upsample_kernel_sizes,
50
+ "upsample_initial_channel": upsample_initial_channel,
51
+ "resblock_kernel_sizes": resblock_kernel_sizes,
52
+ "resblock_dilation_sizes": resblock_dilation_sizes,
53
+ "activation": activation,
54
+ "resblock": resblock,
55
+ "gen_istft_n_fft": gen_istft_n_fft,
56
+ "sampling_rate": sampling_rate,
57
+ }
58
+ super().__init__(**settings)
33
59
 
34
60
 
35
61
  def get_padding(ks, d):
@@ -169,7 +195,7 @@ class iSTFTNetGenerator(ConvNets):
169
195
  self.conv_pre = weight_norm(
170
196
  nn.Conv1d(cfg.in_channels, cfg.upsample_initial_channel, 7, 1, padding=3)
171
197
  )
172
- resblock = ResBlock1 if resblock == 0 else ResBlock2
198
+ resblock = ResBlock1 if cfg.resblock == 0 else ResBlock2
173
199
 
174
200
  self.ups = nn.ModuleList()
175
201
  for i, (u, k) in enumerate(zip(cfg.upsample_rates, cfg.upsample_kernel_sizes)):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lt-tensor
3
- Version: 0.0.1a18
3
+ Version: 0.0.1a19
4
4
  Summary: General utilities for PyTorch and others. Built for general use.
5
5
  Home-page: https://github.com/gr1336/lt-tensor/
6
6
  Author: gr1336
@@ -1,5 +1,5 @@
1
1
  lt_tensor/__init__.py,sha256=XxNCGcVL-haJyMpifr-GRaamo32R6jmqe3iOuS4ecfs,469
2
- lt_tensor/config_templates.py,sha256=xWZhktYVlkwvJVreqyACpWo-lJ5htG9vTZyqZ6OexzA,3899
2
+ lt_tensor/config_templates.py,sha256=9hLt7OLq3z1y8FKNoGY_sIJHHnVoXsLcuI4x2zoE0Q4,3634
3
3
  lt_tensor/losses.py,sha256=zvkCOnE5XpF3v6ymivRIdqPTsMM5zc94ZMom7YDi3zM,4946
4
4
  lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
5
5
  lt_tensor/math_ops.py,sha256=TkD4WQG42KsQ9Fg7FXOjf8f-ixtW0apf2XjaooecVx4,2257
@@ -19,13 +19,13 @@ lt_tensor/model_zoo/pos_encoder.py,sha256=3d1EYLinCU9UAy-WuEWeYMGhMqaGknCiQ5qEmh
19
19
  lt_tensor/model_zoo/residual.py,sha256=i5V4ju7DB3WesKBVm6KH_LyPoKGDUOyo2Usfs-PyP58,9394
20
20
  lt_tensor/model_zoo/transformer.py,sha256=HUFoFFh7EQJErxdd9XIxhssdjvNVx2tNGDJOTUfwG2A,4301
21
21
  lt_tensor/model_zoo/audio_models/__init__.py,sha256=MoG9YjxLyvscq_6njK1ljGBletK9iedBXt66bplzW-s,83
22
- lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=R14hY-nCbCO-T3ox9f4MXCPgQQogFUKAJ2WtntLz09w,7393
23
- lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=6ZGYyNiTMGHnOjGU0gq_TSM8Y9LtYlP3neGwa01Ghyk,13135
24
- lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=noi4GLGZQ_qg5H-ipe5d7j8rvt4Hic_sXiME-TE-B2c,13783
22
+ lt_tensor/model_zoo/audio_models/diffwave/__init__.py,sha256=vSrQJ0NXYvTbjOyjLjiMNy95Ib7VO1BJ5UqhoQ7dzYo,8032
23
+ lt_tensor/model_zoo/audio_models/hifigan/__init__.py,sha256=JNebaYO3nsyyqpYCCOyL13zY2uxLY3NOCeNynF6-96k,13940
24
+ lt_tensor/model_zoo/audio_models/istft/__init__.py,sha256=JdFChpPhURaI2qb9mDV6vzDcZN757FBGGtgzN3vxtJ0,14821
25
25
  lt_tensor/processors/__init__.py,sha256=4b9MxAJolXiJfSm20ZEspQTDm1tgLazwlPWA_jB1yLM,63
26
26
  lt_tensor/processors/audio.py,sha256=SMqNSl4Den-x1awTCQ8-TcR-0jPiv5lDaUpU93SRRaw,14749
27
- lt_tensor-0.0.1a18.dist-info/licenses/LICENSE,sha256=HUnu_iSPpnDfZS_PINhO3AoVizJD1A2vee8WX7D7uXo,11358
28
- lt_tensor-0.0.1a18.dist-info/METADATA,sha256=fgRzOiw5tMmkaEY9HrGEKNL2v9mN5JVbf9r-bf18Am0,1033
29
- lt_tensor-0.0.1a18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
30
- lt_tensor-0.0.1a18.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
31
- lt_tensor-0.0.1a18.dist-info/RECORD,,
27
+ lt_tensor-0.0.1a19.dist-info/licenses/LICENSE,sha256=HUnu_iSPpnDfZS_PINhO3AoVizJD1A2vee8WX7D7uXo,11358
28
+ lt_tensor-0.0.1a19.dist-info/METADATA,sha256=lkXND2y0Ue6-y_1LDUcpbPWEJ9jnUG71zJMfcSwKdJs,1033
29
+ lt_tensor-0.0.1a19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
30
+ lt_tensor-0.0.1a19.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
31
+ lt_tensor-0.0.1a19.dist-info/RECORD,,