deeplotx 0.8.5__tar.gz → 0.8.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {deeplotx-0.8.5 → deeplotx-0.8.6}/PKG-INFO +1 -1
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/encoder/encoder.py +1 -1
- deeplotx-0.8.6/deeplotx/nn/linear_regression.py +29 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/logistic_regression.py +2 -2
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/recursive_sequential.py +6 -9
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/roformer_encoder.py +3 -3
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/softmax_regression.py +2 -2
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx.egg-info/PKG-INFO +1 -1
- {deeplotx-0.8.5 → deeplotx-0.8.6}/pyproject.toml +1 -1
- deeplotx-0.8.5/deeplotx/nn/linear_regression.py +0 -26
- {deeplotx-0.8.5 → deeplotx-0.8.6}/LICENSE +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/README.md +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/__init__.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/encoder/__init__.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/encoder/long_text_encoder.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/encoder/longformer_encoder.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/__init__.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/attention.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/auto_regression.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/base_neural_network.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/feed_forward.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/long_context_auto_regression.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/long_context_recursive_sequential.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/multi_head_attention.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/multi_head_feed_forward.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/nn/rope.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/similarity/__init__.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/similarity/distribution.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/similarity/set.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/similarity/vector.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/trainer/__init__.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/trainer/base_trainer.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/trainer/text_binary_classification_trainer.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/util/__init__.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/util/hash.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx/util/read_file.py +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx.egg-info/SOURCES.txt +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx.egg-info/dependency_links.txt +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx.egg-info/requires.txt +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/deeplotx.egg-info/top_level.txt +0 -0
- {deeplotx-0.8.5 → deeplotx-0.8.6}/setup.cfg +0 -0
@@ -48,7 +48,7 @@ class Encoder(nn.Module):
|
|
48
48
|
return self.encoder.forward(_input_tup[0], attention_mask=_input_tup[1]).last_hidden_state[:, 0, :]
|
49
49
|
|
50
50
|
num_chunks = math.ceil(input_ids.shape[-1] / self.embed_dim)
|
51
|
-
chunks
|
51
|
+
chunks, chunk_results = [], []
|
52
52
|
for i in range(num_chunks):
|
53
53
|
start_idx = i * self.embed_dim
|
54
54
|
end_idx = min(start_idx + self.embed_dim, input_ids.shape[-1])
|
@@ -0,0 +1,29 @@
|
|
1
|
+
from typing_extensions import override
|
2
|
+
|
3
|
+
import torch
|
4
|
+
from torch import nn
|
5
|
+
|
6
|
+
from deeplotx.nn.base_neural_network import BaseNeuralNetwork
|
7
|
+
from deeplotx.nn.multi_head_feed_forward import MultiHeadFeedForward
|
8
|
+
|
9
|
+
|
10
|
+
class LinearRegression(BaseNeuralNetwork):
|
11
|
+
def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
|
12
|
+
expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
|
13
|
+
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
|
14
|
+
super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
|
15
|
+
self.multi_head_ffn_layers = nn.ModuleList([MultiHeadFeedForward(feature_dim=input_dim, num_heads=num_heads,
|
16
|
+
num_layers=kwargs.get('head_layers', 1),
|
17
|
+
expansion_factor=expansion_factor,
|
18
|
+
bias=bias, dropout_rate=dropout_rate,
|
19
|
+
device=self.device, dtype=self.dtype) for _ in range(num_layers)])
|
20
|
+
self.out_proj = nn.Linear(in_features=input_dim, out_features=output_dim,
|
21
|
+
bias=bias, device=self.device, dtype=self.dtype)
|
22
|
+
|
23
|
+
@override
|
24
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
25
|
+
x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
|
26
|
+
residual = x
|
27
|
+
for ffn in self.multi_head_ffn_layers:
|
28
|
+
x = ffn(x)
|
29
|
+
return self.out_proj(x + residual)
|
@@ -8,10 +8,10 @@ from deeplotx.nn.linear_regression import LinearRegression
|
|
8
8
|
class LogisticRegression(LinearRegression):
|
9
9
|
def __init__(self, input_dim: int, output_dim: int = 1, num_heads: int = 1, num_layers: int = 1,
|
10
10
|
expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
|
11
|
-
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
|
11
|
+
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
|
12
12
|
super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
|
13
13
|
expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
|
14
|
-
model_name=model_name, device=device, dtype=dtype)
|
14
|
+
model_name=model_name, device=device, dtype=dtype, **kwargs)
|
15
15
|
|
16
16
|
@override
|
17
17
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
@@ -4,7 +4,7 @@ import torch
|
|
4
4
|
from torch import nn
|
5
5
|
|
6
6
|
from deeplotx.nn.base_neural_network import BaseNeuralNetwork
|
7
|
-
from deeplotx.nn.
|
7
|
+
from deeplotx.nn.linear_regression import LinearRegression
|
8
8
|
|
9
9
|
|
10
10
|
class RecursiveSequential(BaseNeuralNetwork):
|
@@ -20,11 +20,10 @@ class RecursiveSequential(BaseNeuralNetwork):
|
|
20
20
|
num_layers=recursive_layers, batch_first=True,
|
21
21
|
bias=True, bidirectional=True, device=self.device,
|
22
22
|
dtype=self.dtype)
|
23
|
-
self.
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
device=self.device, dtype=self.dtype)
|
23
|
+
self.out_proj = LinearRegression(input_dim=recursive_hidden_dim * 2, output_dim=output_dim,
|
24
|
+
num_heads=kwargs.get('ffn_heads', 1), head_layers=kwargs.get('ffn_head_layers', 1),
|
25
|
+
num_layers=ffn_layers, expansion_factor=ffn_expansion_factor,
|
26
|
+
bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
|
28
27
|
|
29
28
|
def initial_state(self, batch_size: int = 1) -> tuple[torch.Tensor, torch.Tensor]:
|
30
29
|
zeros = torch.zeros(self.lstm.num_layers * 2, batch_size, self.lstm.hidden_size, device=self.device, dtype=self.dtype)
|
@@ -37,9 +36,7 @@ class RecursiveSequential(BaseNeuralNetwork):
|
|
37
36
|
self.ensure_device_and_dtype(state[1], device=self.device, dtype=self.dtype))
|
38
37
|
x, (hidden_state, cell_state) = self.lstm(x, state)
|
39
38
|
x = x[:, -1, :]
|
40
|
-
|
41
|
-
x = self.ffn(x) + residual
|
42
|
-
x = self.__proj(x)
|
39
|
+
x = self.out_proj(x)
|
43
40
|
return x, (hidden_state, cell_state)
|
44
41
|
|
45
42
|
@override
|
@@ -27,8 +27,8 @@ class RoFormerEncoder(BaseNeuralNetwork):
|
|
27
27
|
device=self.device, dtype=self.dtype)
|
28
28
|
self.layer_norm = nn.LayerNorm(normalized_shape=feature_dim, eps=1e-9,
|
29
29
|
device=self.device, dtype=self.dtype)
|
30
|
-
self.
|
31
|
-
|
30
|
+
self.out_proj = nn.Linear(in_features=feature_dim * 2, out_features=feature_dim,
|
31
|
+
bias=bias, device=self.device, dtype=self.dtype)
|
32
32
|
|
33
33
|
@override
|
34
34
|
def forward(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
|
@@ -37,4 +37,4 @@ class RoFormerEncoder(BaseNeuralNetwork):
|
|
37
37
|
mask = self.ensure_device_and_dtype(mask, device=self.device, dtype=self.dtype)
|
38
38
|
attn = self.attn(x=self.layer_norm(x), y=None, mask=mask)
|
39
39
|
x = torch.concat([attn, x], dim=-1)
|
40
|
-
return self.
|
40
|
+
return self.out_proj(self.ffn(x))
|
@@ -8,10 +8,10 @@ from deeplotx.nn.linear_regression import LinearRegression
|
|
8
8
|
class SoftmaxRegression(LinearRegression):
|
9
9
|
def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
|
10
10
|
expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
|
11
|
-
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
|
11
|
+
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
|
12
12
|
super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
|
13
13
|
expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
|
14
|
-
model_name=model_name, device=device, dtype=dtype)
|
14
|
+
model_name=model_name, device=device, dtype=dtype, **kwargs)
|
15
15
|
|
16
16
|
@override
|
17
17
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
@@ -1,26 +0,0 @@
|
|
1
|
-
from typing_extensions import override
|
2
|
-
|
3
|
-
import torch
|
4
|
-
from torch import nn
|
5
|
-
|
6
|
-
from deeplotx.nn.base_neural_network import BaseNeuralNetwork
|
7
|
-
from deeplotx.nn.multi_head_feed_forward import MultiHeadFeedForward
|
8
|
-
|
9
|
-
|
10
|
-
class LinearRegression(BaseNeuralNetwork):
|
11
|
-
def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
|
12
|
-
expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
|
13
|
-
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
|
14
|
-
super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
|
15
|
-
self.ffn = MultiHeadFeedForward(feature_dim=input_dim, num_heads=num_heads,
|
16
|
-
num_layers=num_layers, expansion_factor=expansion_factor,
|
17
|
-
bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
|
18
|
-
self.proj = nn.Linear(in_features=input_dim, out_features=output_dim,
|
19
|
-
bias=bias, device=self.device, dtype=self.dtype)
|
20
|
-
|
21
|
-
@override
|
22
|
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
23
|
-
x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
|
24
|
-
residual = x
|
25
|
-
x = self.ffn(x) + residual
|
26
|
-
return self.proj(x)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|