lt-tensor 0.0.1a0__py3-none-any.whl → 0.0.1a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,10 @@
1
1
  import torch
2
- from torch import nn, optim
3
2
  import torch.nn.functional as F
4
3
  from torch.optim import Optimizer
5
- from torch.nn import Module, L1Loss, MSELoss
6
4
  from torch.nn.utils import remove_weight_norm
7
- from torch import Tensor, FloatTensor, device, LongTensor
8
5
  from torch.nn.utils.parametrizations import weight_norm, spectral_norm
6
+ from torch import nn, optim, Tensor, FloatTensor, LongTensor
9
7
 
10
8
  from lt_utils.common import *
11
9
 
12
- DeviceType: TypeAlias = Union[device, str]
10
+ DeviceType: TypeAlias = Union[torch.device, str]
lt_tensor/misc_utils.py CHANGED
@@ -83,12 +83,12 @@ def set_seed(seed: int):
83
83
  torch.xpu.manual_seed_all(seed)
84
84
 
85
85
 
86
- def count_parameters(model: Module) -> int:
86
+ def count_parameters(model: nn.Module) -> int:
87
87
  """Returns total number of trainable parameters."""
88
88
  return sum(p.numel() for p in model.parameters() if p.requires_grad)
89
89
 
90
90
 
91
- def freeze_all_except(model: Module, except_layers: Optional[list[str]] = None):
91
+ def freeze_all_except(model: nn.Module, except_layers: Optional[list[str]] = None):
92
92
  """Freezes all model parameters except specified layers."""
93
93
  no_exceptions = not except_layers
94
94
  for name, param in model.named_parameters():
@@ -98,14 +98,14 @@ def freeze_all_except(model: Module, except_layers: Optional[list[str]] = None):
98
98
  param.requires_grad_(False)
99
99
 
100
100
 
101
- def freeze_selected_weights(model: Module, target_layers: list[str]):
101
+ def freeze_selected_weights(model: nn.Module, target_layers: list[str]):
102
102
  """Freezes only parameters on specified layers."""
103
103
  for name, param in model.named_parameters():
104
104
  if any(layer in name for layer in target_layers):
105
105
  param.requires_grad_(False)
106
106
 
107
107
 
108
- def unfreeze_all_except(model: Module, except_layers: Optional[list[str]] = None):
108
+ def unfreeze_all_except(model: nn.Module, except_layers: Optional[list[str]] = None):
109
109
  """Unfreezes all model parameters except specified layers."""
110
110
  no_exceptions = not except_layers
111
111
  for name, param in model.named_parameters():
@@ -115,14 +115,14 @@ def unfreeze_all_except(model: Module, except_layers: Optional[list[str]] = None
115
115
  param.requires_grad_(True)
116
116
 
117
117
 
118
- def unfreeze_selected_weights(model: Module, target_layers: list[str]):
118
+ def unfreeze_selected_weights(model: nn.Module, target_layers: list[str]):
119
119
  """Unfreezes only parameters on specified layers."""
120
120
  for name, param in model.named_parameters():
121
121
  if not any(layer in name for layer in target_layers):
122
122
  param.requires_grad_(True)
123
123
 
124
124
 
125
- def clip_gradients(model: Module, max_norm: float = 1.0):
125
+ def clip_gradients(model: nn.Module, max_norm: float = 1.0):
126
126
  """Applies gradient clipping."""
127
127
  return nn.utils.clip_grad_norm_(model.parameters(), max_norm)
128
128
 
@@ -576,7 +576,7 @@ def masked_cross_entropy(
576
576
  return loss
577
577
 
578
578
 
579
- class NoiseScheduler(Module):
579
+ class NoiseScheduler(nn.Module):
580
580
  def __init__(self, timesteps: int = 512):
581
581
  super().__init__()
582
582
 
@@ -9,7 +9,7 @@ from .._torch_commons import *
9
9
  from .._basics import Model
10
10
 
11
11
 
12
- class RotaryEmbedding(Module):
12
+ class RotaryEmbedding(nn.Module):
13
13
  def __init__(self, dim: int, base: int = 10000):
14
14
  """
15
15
  Rotary Positional Embedding Module.
@@ -76,7 +76,7 @@ class RotaryEmbedding(Module):
76
76
  return x_rotated.view(b, s, d) # Back to [b, s, d]
77
77
 
78
78
 
79
- class PositionalEncoding(Module):
79
+ class PositionalEncoding(nn.Module):
80
80
  def __init__(self, d_model: int, max_len: int = 8192):
81
81
  super().__init__()
82
82
  # create a matrix of [seq_len, hidden_dim] representing positional encoding for each token in sequence
@@ -100,7 +100,7 @@ class PositionalEncoding(Module):
100
100
  return x
101
101
 
102
102
 
103
- class LearnedPositionalEncoding(Module):
103
+ class LearnedPositionalEncoding(nn.Module):
104
104
  def __init__(self, max_len: int, dim_model: int, dropout: float = 0.1):
105
105
  super().__init__()
106
106
  self.embedding = nn.Embedding(max_len, dim_model)
@@ -11,7 +11,7 @@ import math
11
11
  from ..misc_utils import log_tensor
12
12
 
13
13
 
14
- def spectral_norm_select(module: Module, enabled: bool):
14
+ def spectral_norm_select(module: nn.Module, enabled: bool):
15
15
  if enabled:
16
16
  return spectral_norm(module)
17
17
  return module
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lt-tensor
3
- Version: 0.0.1a0
3
+ Version: 0.0.1a2
4
4
  Summary: General utilities for PyTorch and others. Built for general use.
5
5
  Home-page: https://github.com/gr1336/lt-tensor/
6
6
  Author: gr1336
@@ -17,7 +17,7 @@ Requires-Dist: numpy>=1.26.4
17
17
  Requires-Dist: tokenizers
18
18
  Requires-Dist: pyyaml>=6.0.0
19
19
  Requires-Dist: numba>0.60.0
20
- Requires-Dist: lt-utils==0.0.1a0
20
+ Requires-Dist: lt-utils
21
21
  Dynamic: author
22
22
  Dynamic: classifier
23
23
  Dynamic: description
@@ -1,20 +1,20 @@
1
1
  lt_tensor/__init__.py,sha256=pUB05ZkgkpP10ivzwoWdbq_HCxw-iOsbf6m8eFtx-YM,26
2
2
  lt_tensor/_basics.py,sha256=Zty5XZ5qeVFoZJRhtpGvOH7rg9hbAS7mIULOdrOKBDQ,9189
3
- lt_tensor/_torch_commons.py,sha256=_2Eck-MsQ46PxW5ku7NJvNSL5vg54_4GkLCqdzFevwA,402
3
+ lt_tensor/_torch_commons.py,sha256=DM0pP5tTachpmbELWs-MkwuEob4qBLEG6t_2HFK-SW0,338
4
4
  lt_tensor/lr_schedulers.py,sha256=LSZzqrOOLzSthD8k-W4cYPJt0vCjmHkiJkLr5e3yRTE,3659
5
5
  lt_tensor/math_ops.py,sha256=j4Arst-kOdm0bcZbXD4rzcVdiyYOJ59ZQQIyH7r0Wug,2067
6
- lt_tensor/misc_utils.py,sha256=3r6ikrBCj2IjSWZMRU1Lif0OgYTF3HExANG_IqhPtic,19799
6
+ lt_tensor/misc_utils.py,sha256=ZUArnxbMUD7bZ6RNhnhble-E0ASv1iqbTcAiVlHPUkg,19820
7
7
  lt_tensor/monotonic_align.py,sha256=LhBd8p1xdBzg6jQrQX1j7b4PNeYGwIqM24zcU-pHOLE,2239
8
8
  lt_tensor/transform.py,sha256=IYPT2YHT9NDvHrdtJvTLmxL9Cm26Ck2Uc9zE0k6l2aI,9504
9
9
  lt_tensor/model_zoo/__init__.py,sha256=ybyd3St8wiswnBGKFcy6FqRo5NlfGPJPC7jbRJlTlv8,205
10
10
  lt_tensor/model_zoo/bsc.py,sha256=6jBICcy8FT81EUiN9g1eZuHhPF4xA7gzS5kaVT3RngU,6305
11
11
  lt_tensor/model_zoo/dfs.py,sha256=0dTA1aveZT5OZu8eI6Cb8q8IGSjZyFYDcfc2FpDH5S8,5980
12
12
  lt_tensor/model_zoo/fsn.py,sha256=YDu1sbLwJwSKCPlmPlqQujivlgfNvwpwGa5q4SY9MYk,2108
13
- lt_tensor/model_zoo/pos.py,sha256=L2j6zYkdBWjrgROJt4cFOwdnne6j94m2lGi9m_QC7oc,4460
14
- lt_tensor/model_zoo/rsd.py,sha256=QGfkhoP7BVCGlCyBkIxHE7eWUp71JFkK6bM4dgBw1Hw,4720
13
+ lt_tensor/model_zoo/pos.py,sha256=h4ukjUEl_E0HirmGupAJh_nL7QlJfVtPYyKpZQuAC1M,4469
14
+ lt_tensor/model_zoo/rsd.py,sha256=mqVcte15BUzh6hR6GEYUA-GdpFYpvBvBs7Zfi2ZCBTM,4723
15
15
  lt_tensor/model_zoo/tfr.py,sha256=mIwu6WqDxcLGlBfofIIspzGpUe2jsR0hrzT9mEW-MHE,4208
16
- lt_tensor-0.0.1a0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
17
- lt_tensor-0.0.1a0.dist-info/METADATA,sha256=hQVkxd4J5C7KX1DRVVYkIVKK0MIlGf-0kSLQ--HkTdY,936
18
- lt_tensor-0.0.1a0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- lt_tensor-0.0.1a0.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
20
- lt_tensor-0.0.1a0.dist-info/RECORD,,
16
+ lt_tensor-0.0.1a2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
17
+ lt_tensor-0.0.1a2.dist-info/METADATA,sha256=EfecXQ2VLO4pFyZLvYbhWoZQbLM-ndb9ue4GYpZQnD0,927
18
+ lt_tensor-0.0.1a2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ lt_tensor-0.0.1a2.dist-info/top_level.txt,sha256=35FuhFeXnUyvHWdbVHGPh0hS8euofafnJ_GJAVSF4Kk,10
20
+ lt_tensor-0.0.1a2.dist-info/RECORD,,