deeplotx 0.8.5__py3-none-any.whl → 0.8.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deeplotx/encoder/encoder.py +1 -1
- deeplotx/nn/base_neural_network.py +6 -3
- deeplotx/nn/feed_forward.py +1 -1
- deeplotx/nn/linear_regression.py +11 -8
- deeplotx/nn/logistic_regression.py +2 -2
- deeplotx/nn/recursive_sequential.py +12 -12
- deeplotx/nn/roformer_encoder.py +3 -3
- deeplotx/nn/softmax_regression.py +2 -2
- {deeplotx-0.8.5.dist-info → deeplotx-0.8.7.dist-info}/METADATA +1 -1
- {deeplotx-0.8.5.dist-info → deeplotx-0.8.7.dist-info}/RECORD +13 -13
- {deeplotx-0.8.5.dist-info → deeplotx-0.8.7.dist-info}/WHEEL +0 -0
- {deeplotx-0.8.5.dist-info → deeplotx-0.8.7.dist-info}/licenses/LICENSE +0 -0
- {deeplotx-0.8.5.dist-info → deeplotx-0.8.7.dist-info}/top_level.txt +0 -0
deeplotx/encoder/encoder.py
CHANGED
@@ -48,7 +48,7 @@ class Encoder(nn.Module):
|
|
48
48
|
return self.encoder.forward(_input_tup[0], attention_mask=_input_tup[1]).last_hidden_state[:, 0, :]
|
49
49
|
|
50
50
|
num_chunks = math.ceil(input_ids.shape[-1] / self.embed_dim)
|
51
|
-
chunks
|
51
|
+
chunks, chunk_results = [], []
|
52
52
|
for i in range(num_chunks):
|
53
53
|
start_idx = i * self.embed_dim
|
54
54
|
end_idx = min(start_idx + self.embed_dim, input_ids.shape[-1])
|
@@ -99,11 +99,14 @@ class BaseNeuralNetwork(nn.Module):
|
|
99
99
|
|
100
100
|
def predict(self, x: torch.Tensor) -> torch.Tensor:
|
101
101
|
x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
|
102
|
-
|
103
|
-
self.
|
102
|
+
training_state_dict = dict()
|
103
|
+
for m in self.modules():
|
104
|
+
training_state_dict[m] = m.training
|
105
|
+
m.training = False
|
104
106
|
with torch.no_grad():
|
105
107
|
res = self.forward(x)
|
106
|
-
|
108
|
+
for m, training_state in training_state_dict.items():
|
109
|
+
m.training = training_state
|
107
110
|
return res
|
108
111
|
|
109
112
|
def save(self, model_name: str | None = None, model_dir: str = '.', _suffix: str = DEFAULT_SUFFIX):
|
deeplotx/nn/feed_forward.py
CHANGED
@@ -28,7 +28,7 @@ class FeedForwardUnit(BaseNeuralNetwork):
|
|
28
28
|
x = self.layer_norm(x)
|
29
29
|
x = self.up_proj(x)
|
30
30
|
x = self.parametric_relu(x)
|
31
|
-
if self._dropout_rate > .0:
|
31
|
+
if self._dropout_rate > .0 and self.training:
|
32
32
|
x = torch.dropout(x, p=self._dropout_rate, train=self.training)
|
33
33
|
return self.down_proj(x) + residual
|
34
34
|
|
deeplotx/nn/linear_regression.py
CHANGED
@@ -10,17 +10,20 @@ from deeplotx.nn.multi_head_feed_forward import MultiHeadFeedForward
|
|
10
10
|
class LinearRegression(BaseNeuralNetwork):
|
11
11
|
def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
|
12
12
|
expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
|
13
|
-
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
|
13
|
+
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
|
14
14
|
super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
|
15
|
-
self.
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
15
|
+
self.multi_head_ffn_layers = nn.ModuleList([MultiHeadFeedForward(feature_dim=input_dim, num_heads=num_heads,
|
16
|
+
num_layers=kwargs.get('head_layers', 1),
|
17
|
+
expansion_factor=expansion_factor,
|
18
|
+
bias=bias, dropout_rate=dropout_rate,
|
19
|
+
device=self.device, dtype=self.dtype) for _ in range(num_layers)])
|
20
|
+
self.out_proj = nn.Linear(in_features=input_dim, out_features=output_dim,
|
21
|
+
bias=bias, device=self.device, dtype=self.dtype)
|
20
22
|
|
21
23
|
@override
|
22
24
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
23
25
|
x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
|
24
26
|
residual = x
|
25
|
-
|
26
|
-
|
27
|
+
for ffn in self.multi_head_ffn_layers:
|
28
|
+
x = ffn(x)
|
29
|
+
return self.out_proj(x + residual)
|
@@ -8,10 +8,10 @@ from deeplotx.nn.linear_regression import LinearRegression
|
|
8
8
|
class LogisticRegression(LinearRegression):
|
9
9
|
def __init__(self, input_dim: int, output_dim: int = 1, num_heads: int = 1, num_layers: int = 1,
|
10
10
|
expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
|
11
|
-
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
|
11
|
+
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
|
12
12
|
super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
|
13
13
|
expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
|
14
|
-
model_name=model_name, device=device, dtype=dtype)
|
14
|
+
model_name=model_name, device=device, dtype=dtype, **kwargs)
|
15
15
|
|
16
16
|
@override
|
17
17
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
@@ -4,7 +4,7 @@ import torch
|
|
4
4
|
from torch import nn
|
5
5
|
|
6
6
|
from deeplotx.nn.base_neural_network import BaseNeuralNetwork
|
7
|
-
from deeplotx.nn.
|
7
|
+
from deeplotx.nn.linear_regression import LinearRegression
|
8
8
|
|
9
9
|
|
10
10
|
class RecursiveSequential(BaseNeuralNetwork):
|
@@ -20,11 +20,10 @@ class RecursiveSequential(BaseNeuralNetwork):
|
|
20
20
|
num_layers=recursive_layers, batch_first=True,
|
21
21
|
bias=True, bidirectional=True, device=self.device,
|
22
22
|
dtype=self.dtype)
|
23
|
-
self.
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
device=self.device, dtype=self.dtype)
|
23
|
+
self.out_proj = LinearRegression(input_dim=recursive_hidden_dim * 2, output_dim=output_dim,
|
24
|
+
num_heads=kwargs.get('ffn_heads', 1), head_layers=kwargs.get('ffn_head_layers', 1),
|
25
|
+
num_layers=ffn_layers, expansion_factor=ffn_expansion_factor,
|
26
|
+
bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
|
28
27
|
|
29
28
|
def initial_state(self, batch_size: int = 1) -> tuple[torch.Tensor, torch.Tensor]:
|
30
29
|
zeros = torch.zeros(self.lstm.num_layers * 2, batch_size, self.lstm.hidden_size, device=self.device, dtype=self.dtype)
|
@@ -37,16 +36,17 @@ class RecursiveSequential(BaseNeuralNetwork):
|
|
37
36
|
self.ensure_device_and_dtype(state[1], device=self.device, dtype=self.dtype))
|
38
37
|
x, (hidden_state, cell_state) = self.lstm(x, state)
|
39
38
|
x = x[:, -1, :]
|
40
|
-
|
41
|
-
x = self.ffn(x) + residual
|
42
|
-
x = self.__proj(x)
|
39
|
+
x = self.out_proj(x)
|
43
40
|
return x, (hidden_state, cell_state)
|
44
41
|
|
45
42
|
@override
|
46
43
|
def predict(self, x: torch.Tensor) -> torch.Tensor:
|
47
|
-
|
48
|
-
self.
|
44
|
+
training_state_dict = dict()
|
45
|
+
for m in self.modules():
|
46
|
+
training_state_dict[m] = m.training
|
47
|
+
m.training = False
|
49
48
|
with torch.no_grad():
|
50
49
|
res = self.forward(x.unsqueeze(0), self.initial_state(batch_size=1))[0]
|
51
|
-
|
50
|
+
for m, training_state in training_state_dict.items():
|
51
|
+
m.training = training_state
|
52
52
|
return res
|
deeplotx/nn/roformer_encoder.py
CHANGED
@@ -27,8 +27,8 @@ class RoFormerEncoder(BaseNeuralNetwork):
|
|
27
27
|
device=self.device, dtype=self.dtype)
|
28
28
|
self.layer_norm = nn.LayerNorm(normalized_shape=feature_dim, eps=1e-9,
|
29
29
|
device=self.device, dtype=self.dtype)
|
30
|
-
self.
|
31
|
-
|
30
|
+
self.out_proj = nn.Linear(in_features=feature_dim * 2, out_features=feature_dim,
|
31
|
+
bias=bias, device=self.device, dtype=self.dtype)
|
32
32
|
|
33
33
|
@override
|
34
34
|
def forward(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
|
@@ -37,4 +37,4 @@ class RoFormerEncoder(BaseNeuralNetwork):
|
|
37
37
|
mask = self.ensure_device_and_dtype(mask, device=self.device, dtype=self.dtype)
|
38
38
|
attn = self.attn(x=self.layer_norm(x), y=None, mask=mask)
|
39
39
|
x = torch.concat([attn, x], dim=-1)
|
40
|
-
return self.
|
40
|
+
return self.out_proj(self.ffn(x))
|
@@ -8,10 +8,10 @@ from deeplotx.nn.linear_regression import LinearRegression
|
|
8
8
|
class SoftmaxRegression(LinearRegression):
|
9
9
|
def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
|
10
10
|
expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
|
11
|
-
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
|
11
|
+
model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
|
12
12
|
super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
|
13
13
|
expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
|
14
|
-
model_name=model_name, device=device, dtype=dtype)
|
14
|
+
model_name=model_name, device=device, dtype=dtype, **kwargs)
|
15
15
|
|
16
16
|
@override
|
17
17
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
@@ -1,23 +1,23 @@
|
|
1
1
|
deeplotx/__init__.py,sha256=xEq8WQ2LpEZoLX_Z464d0dy4aemFGrEV6ZMJr6ioFnQ,1186
|
2
2
|
deeplotx/encoder/__init__.py,sha256=BrsF5_4O-4pfihYF2wjExDOoAY-03kGJTH-Mhez4tsE,129
|
3
|
-
deeplotx/encoder/encoder.py,sha256=
|
3
|
+
deeplotx/encoder/encoder.py,sha256=tksTtmz9JRDSimCdhMkxpbGUHNWhARGaeKh2pBvLgEI,3988
|
4
4
|
deeplotx/encoder/long_text_encoder.py,sha256=3ScdKDi65J5tdO8PFCXBjCzNUCLlJRwVhpDR0BrphG4,3951
|
5
5
|
deeplotx/encoder/longformer_encoder.py,sha256=NNYLr5I9tdeh0C8Ir7QcbEMU9gDk6U7CiF3Tbg6NEsE,3372
|
6
6
|
deeplotx/nn/__init__.py,sha256=YILwbxb-NHdiJjfOwBKH8F7PuZSDZSrGpTznPDucTro,710
|
7
7
|
deeplotx/nn/attention.py,sha256=R-i-Rd7gnsh6hwXDeYfqLQOJvfSZIGfQbFzRlC91XLo,2879
|
8
8
|
deeplotx/nn/auto_regression.py,sha256=j_R7WGPq9REngjpLuX5c0AaNqOpgGm2Vfrolw-XjWXw,877
|
9
|
-
deeplotx/nn/base_neural_network.py,sha256=
|
10
|
-
deeplotx/nn/feed_forward.py,sha256=
|
11
|
-
deeplotx/nn/linear_regression.py,sha256=
|
12
|
-
deeplotx/nn/logistic_regression.py,sha256=
|
9
|
+
deeplotx/nn/base_neural_network.py,sha256=QCyB1dxOs4I8vpu6PCshrZs0infoHXS9IErw6tN-dhs,6060
|
10
|
+
deeplotx/nn/feed_forward.py,sha256=kGWEUo8J7jrhSSWlitNnj-AcitNiLz6eOCvUcEuWlVs,2949
|
11
|
+
deeplotx/nn/linear_regression.py,sha256=LWrrdAIw32KIT1bdr7q6HczdpEiCgb-R8BCNXGywMxE,1763
|
12
|
+
deeplotx/nn/logistic_regression.py,sha256=nipWD3ZPRub2Cx0rU2zxYQyG0COn3NJvew8b2gbJy24,998
|
13
13
|
deeplotx/nn/long_context_auto_regression.py,sha256=uy0k_g8wEfMH5nd5HCfrHA8dgEsuWBA2x8U-g3h4vQc,1054
|
14
14
|
deeplotx/nn/long_context_recursive_sequential.py,sha256=pcZfnrIHBqbp2BssfUTS1klpuykZwowikfAIaOnvRUI,2674
|
15
15
|
deeplotx/nn/multi_head_attention.py,sha256=3z73uGbvy3jszRy1B9nxGOJjlttHpcpRF8Qd09OEams,2267
|
16
16
|
deeplotx/nn/multi_head_feed_forward.py,sha256=hD9ScrVJZ9kNksoFASf0xaPgEnNgCeRivW-XjYOPjj8,1908
|
17
|
-
deeplotx/nn/recursive_sequential.py,sha256=
|
18
|
-
deeplotx/nn/roformer_encoder.py,sha256=
|
17
|
+
deeplotx/nn/recursive_sequential.py,sha256=sNvAs9iVCuWIgx0_6TizDq41hJpFbfKT3kyDHE86wRM,2928
|
18
|
+
deeplotx/nn/roformer_encoder.py,sha256=BAPAMS5-qiM3i2FUyIW-ZTc7og4gZzwlu5LniqzaymY,2432
|
19
19
|
deeplotx/nn/rope.py,sha256=RTOjnllubktdy2rzFWxBfkuLuGjhEMyDd06uojdqPhM,1848
|
20
|
-
deeplotx/nn/softmax_regression.py,sha256=
|
20
|
+
deeplotx/nn/softmax_regression.py,sha256=xe2etxSfN0e9XZ4E6Uyz5ThWWzAdQVjYIvN24j8kfNY,1019
|
21
21
|
deeplotx/similarity/__init__.py,sha256=s3u-KSgxjnMcWpIItKgXNltFMPQ7YY3CqsqHI-5F1c8,724
|
22
22
|
deeplotx/similarity/distribution.py,sha256=wQGouuuW531pZeBRKBujXsdsoz4fDnPw7_GW81jwepc,1066
|
23
23
|
deeplotx/similarity/set.py,sha256=zhGFxtSIXlWqvipBYzoiPahp4g0boAIoUiMfG0wl07A,686
|
@@ -28,8 +28,8 @@ deeplotx/trainer/text_binary_classification_trainer.py,sha256=TFxOX8rWU_zKliI9zm
|
|
28
28
|
deeplotx/util/__init__.py,sha256=5CH4MTeSgsmCe3LPMfvKoSBpwh6jDSBuHVElJvzQzgs,90
|
29
29
|
deeplotx/util/hash.py,sha256=qbNU3RLBWGQYFVte9WZBAkZ1BkdjCXiKLDaKPN54KFk,662
|
30
30
|
deeplotx/util/read_file.py,sha256=ptzouvEQeeW8KU5BrWNJlXw-vFXVrpS9SkAUxsu6A8A,612
|
31
|
-
deeplotx-0.8.
|
32
|
-
deeplotx-0.8.
|
33
|
-
deeplotx-0.8.
|
34
|
-
deeplotx-0.8.
|
35
|
-
deeplotx-0.8.
|
31
|
+
deeplotx-0.8.7.dist-info/licenses/LICENSE,sha256=IwGE9guuL-ryRPEKi6wFPI_zOhg7zDZbTYuHbSt_SAk,35823
|
32
|
+
deeplotx-0.8.7.dist-info/METADATA,sha256=fGyVnmSy3YKst_ZpwtMQhCq_-yxp5pvf-4zcQlhxNBA,13138
|
33
|
+
deeplotx-0.8.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
34
|
+
deeplotx-0.8.7.dist-info/top_level.txt,sha256=hKg4pVDXZ-WWxkRfJFczRIll1Sv7VyfKCmzHLXbuh1U,9
|
35
|
+
deeplotx-0.8.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|