deeplotx 0.8.5__tar.gz → 0.8.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {deeplotx-0.8.5 → deeplotx-0.8.7}/PKG-INFO +1 -1
  2. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/encoder/encoder.py +1 -1
  3. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/base_neural_network.py +6 -3
  4. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/feed_forward.py +1 -1
  5. deeplotx-0.8.7/deeplotx/nn/linear_regression.py +29 -0
  6. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/logistic_regression.py +2 -2
  7. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/recursive_sequential.py +12 -12
  8. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/roformer_encoder.py +3 -3
  9. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/softmax_regression.py +2 -2
  10. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx.egg-info/PKG-INFO +1 -1
  11. {deeplotx-0.8.5 → deeplotx-0.8.7}/pyproject.toml +1 -1
  12. deeplotx-0.8.5/deeplotx/nn/linear_regression.py +0 -26
  13. {deeplotx-0.8.5 → deeplotx-0.8.7}/LICENSE +0 -0
  14. {deeplotx-0.8.5 → deeplotx-0.8.7}/README.md +0 -0
  15. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/__init__.py +0 -0
  16. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/encoder/__init__.py +0 -0
  17. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/encoder/long_text_encoder.py +0 -0
  18. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/encoder/longformer_encoder.py +0 -0
  19. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/__init__.py +0 -0
  20. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/attention.py +0 -0
  21. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/auto_regression.py +0 -0
  22. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/long_context_auto_regression.py +0 -0
  23. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/long_context_recursive_sequential.py +0 -0
  24. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/multi_head_attention.py +0 -0
  25. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/multi_head_feed_forward.py +0 -0
  26. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/nn/rope.py +0 -0
  27. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/similarity/__init__.py +0 -0
  28. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/similarity/distribution.py +0 -0
  29. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/similarity/set.py +0 -0
  30. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/similarity/vector.py +0 -0
  31. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/trainer/__init__.py +0 -0
  32. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/trainer/base_trainer.py +0 -0
  33. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/trainer/text_binary_classification_trainer.py +0 -0
  34. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/util/__init__.py +0 -0
  35. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/util/hash.py +0 -0
  36. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx/util/read_file.py +0 -0
  37. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx.egg-info/SOURCES.txt +0 -0
  38. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx.egg-info/dependency_links.txt +0 -0
  39. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx.egg-info/requires.txt +0 -0
  40. {deeplotx-0.8.5 → deeplotx-0.8.7}/deeplotx.egg-info/top_level.txt +0 -0
  41. {deeplotx-0.8.5 → deeplotx-0.8.7}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deeplotx
3
- Version: 0.8.5
3
+ Version: 0.8.7
4
4
  Summary: Easy-2-use long text NLP toolkit.
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
@@ -48,7 +48,7 @@ class Encoder(nn.Module):
48
48
  return self.encoder.forward(_input_tup[0], attention_mask=_input_tup[1]).last_hidden_state[:, 0, :]
49
49
 
50
50
  num_chunks = math.ceil(input_ids.shape[-1] / self.embed_dim)
51
- chunks = chunk_results = []
51
+ chunks, chunk_results = [], []
52
52
  for i in range(num_chunks):
53
53
  start_idx = i * self.embed_dim
54
54
  end_idx = min(start_idx + self.embed_dim, input_ids.shape[-1])
@@ -99,11 +99,14 @@ class BaseNeuralNetwork(nn.Module):
99
99
 
100
100
  def predict(self, x: torch.Tensor) -> torch.Tensor:
101
101
  x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
102
- __train = self.training
103
- self.training = False
102
+ training_state_dict = dict()
103
+ for m in self.modules():
104
+ training_state_dict[m] = m.training
105
+ m.training = False
104
106
  with torch.no_grad():
105
107
  res = self.forward(x)
106
- self.training = __train
108
+ for m, training_state in training_state_dict.items():
109
+ m.training = training_state
107
110
  return res
108
111
 
109
112
  def save(self, model_name: str | None = None, model_dir: str = '.', _suffix: str = DEFAULT_SUFFIX):
@@ -28,7 +28,7 @@ class FeedForwardUnit(BaseNeuralNetwork):
28
28
  x = self.layer_norm(x)
29
29
  x = self.up_proj(x)
30
30
  x = self.parametric_relu(x)
31
- if self._dropout_rate > .0:
31
+ if self._dropout_rate > .0 and self.training:
32
32
  x = torch.dropout(x, p=self._dropout_rate, train=self.training)
33
33
  return self.down_proj(x) + residual
34
34
 
@@ -0,0 +1,29 @@
1
+ from typing_extensions import override
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
+ from deeplotx.nn.multi_head_feed_forward import MultiHeadFeedForward
8
+
9
+
10
+ class LinearRegression(BaseNeuralNetwork):
11
+ def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
12
+ expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
13
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
14
+ super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
15
+ self.multi_head_ffn_layers = nn.ModuleList([MultiHeadFeedForward(feature_dim=input_dim, num_heads=num_heads,
16
+ num_layers=kwargs.get('head_layers', 1),
17
+ expansion_factor=expansion_factor,
18
+ bias=bias, dropout_rate=dropout_rate,
19
+ device=self.device, dtype=self.dtype) for _ in range(num_layers)])
20
+ self.out_proj = nn.Linear(in_features=input_dim, out_features=output_dim,
21
+ bias=bias, device=self.device, dtype=self.dtype)
22
+
23
+ @override
24
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
25
+ x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
26
+ residual = x
27
+ for ffn in self.multi_head_ffn_layers:
28
+ x = ffn(x)
29
+ return self.out_proj(x + residual)
@@ -8,10 +8,10 @@ from deeplotx.nn.linear_regression import LinearRegression
8
8
  class LogisticRegression(LinearRegression):
9
9
  def __init__(self, input_dim: int, output_dim: int = 1, num_heads: int = 1, num_layers: int = 1,
10
10
  expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
11
- model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
11
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
12
12
  super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
13
13
  expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
14
- model_name=model_name, device=device, dtype=dtype)
14
+ model_name=model_name, device=device, dtype=dtype, **kwargs)
15
15
 
16
16
  @override
17
17
  def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -4,7 +4,7 @@ import torch
4
4
  from torch import nn
5
5
 
6
6
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
- from deeplotx.nn.multi_head_feed_forward import MultiHeadFeedForward
7
+ from deeplotx.nn.linear_regression import LinearRegression
8
8
 
9
9
 
10
10
  class RecursiveSequential(BaseNeuralNetwork):
@@ -20,11 +20,10 @@ class RecursiveSequential(BaseNeuralNetwork):
20
20
  num_layers=recursive_layers, batch_first=True,
21
21
  bias=True, bidirectional=True, device=self.device,
22
22
  dtype=self.dtype)
23
- self.ffn = MultiHeadFeedForward(feature_dim=recursive_hidden_dim * 2, num_heads=kwargs.get('ffn_heads', 1),
24
- num_layers=ffn_layers, expansion_factor=ffn_expansion_factor,
25
- bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
26
- self.__proj = nn.Linear(in_features=recursive_hidden_dim * 2, out_features=output_dim, bias=bias,
27
- device=self.device, dtype=self.dtype)
23
+ self.out_proj = LinearRegression(input_dim=recursive_hidden_dim * 2, output_dim=output_dim,
24
+ num_heads=kwargs.get('ffn_heads', 1), head_layers=kwargs.get('ffn_head_layers', 1),
25
+ num_layers=ffn_layers, expansion_factor=ffn_expansion_factor,
26
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
28
27
 
29
28
  def initial_state(self, batch_size: int = 1) -> tuple[torch.Tensor, torch.Tensor]:
30
29
  zeros = torch.zeros(self.lstm.num_layers * 2, batch_size, self.lstm.hidden_size, device=self.device, dtype=self.dtype)
@@ -37,16 +36,17 @@ class RecursiveSequential(BaseNeuralNetwork):
37
36
  self.ensure_device_and_dtype(state[1], device=self.device, dtype=self.dtype))
38
37
  x, (hidden_state, cell_state) = self.lstm(x, state)
39
38
  x = x[:, -1, :]
40
- residual = x
41
- x = self.ffn(x) + residual
42
- x = self.__proj(x)
39
+ x = self.out_proj(x)
43
40
  return x, (hidden_state, cell_state)
44
41
 
45
42
  @override
46
43
  def predict(self, x: torch.Tensor) -> torch.Tensor:
47
- __train = self.training
48
- self.training = False
44
+ training_state_dict = dict()
45
+ for m in self.modules():
46
+ training_state_dict[m] = m.training
47
+ m.training = False
49
48
  with torch.no_grad():
50
49
  res = self.forward(x.unsqueeze(0), self.initial_state(batch_size=1))[0]
51
- self.training = __train
50
+ for m, training_state in training_state_dict.items():
51
+ m.training = training_state
52
52
  return res
@@ -27,8 +27,8 @@ class RoFormerEncoder(BaseNeuralNetwork):
27
27
  device=self.device, dtype=self.dtype)
28
28
  self.layer_norm = nn.LayerNorm(normalized_shape=feature_dim, eps=1e-9,
29
29
  device=self.device, dtype=self.dtype)
30
- self.__proj = nn.Linear(in_features=feature_dim * 2, out_features=feature_dim,
31
- bias=bias, device=self.device, dtype=self.dtype)
30
+ self.out_proj = nn.Linear(in_features=feature_dim * 2, out_features=feature_dim,
31
+ bias=bias, device=self.device, dtype=self.dtype)
32
32
 
33
33
  @override
34
34
  def forward(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
@@ -37,4 +37,4 @@ class RoFormerEncoder(BaseNeuralNetwork):
37
37
  mask = self.ensure_device_and_dtype(mask, device=self.device, dtype=self.dtype)
38
38
  attn = self.attn(x=self.layer_norm(x), y=None, mask=mask)
39
39
  x = torch.concat([attn, x], dim=-1)
40
- return self.__proj(self.ffn(x))
40
+ return self.out_proj(self.ffn(x))
@@ -8,10 +8,10 @@ from deeplotx.nn.linear_regression import LinearRegression
8
8
  class SoftmaxRegression(LinearRegression):
9
9
  def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
10
10
  expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
11
- model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
11
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
12
12
  super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
13
13
  expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
14
- model_name=model_name, device=device, dtype=dtype)
14
+ model_name=model_name, device=device, dtype=dtype, **kwargs)
15
15
 
16
16
  @override
17
17
  def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deeplotx
3
- Version: 0.8.5
3
+ Version: 0.8.7
4
4
  Summary: Easy-2-use long text NLP toolkit.
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "deeplotx"
3
- version = "0.8.5"
3
+ version = "0.8.7"
4
4
  description = "Easy-2-use long text NLP toolkit."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -1,26 +0,0 @@
1
- from typing_extensions import override
2
-
3
- import torch
4
- from torch import nn
5
-
6
- from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
- from deeplotx.nn.multi_head_feed_forward import MultiHeadFeedForward
8
-
9
-
10
- class LinearRegression(BaseNeuralNetwork):
11
- def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
12
- expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
13
- model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
14
- super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
15
- self.ffn = MultiHeadFeedForward(feature_dim=input_dim, num_heads=num_heads,
16
- num_layers=num_layers, expansion_factor=expansion_factor,
17
- bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
18
- self.proj = nn.Linear(in_features=input_dim, out_features=output_dim,
19
- bias=bias, device=self.device, dtype=self.dtype)
20
-
21
- @override
22
- def forward(self, x: torch.Tensor) -> torch.Tensor:
23
- x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
24
- residual = x
25
- x = self.ffn(x) + residual
26
- return self.proj(x)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes