deeplotx 0.5.5__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deeplotx/__init__.py CHANGED
@@ -5,6 +5,8 @@ __ROOT__ = os.path.dirname(os.path.abspath(__file__))
5
5
 
6
6
  from .encoder import Encoder, LongTextEncoder, LongformerEncoder
7
7
  from .nn import (
8
+ BaseNeuralNetwork,
9
+ FeedForward,
8
10
  LinearRegression,
9
11
  LogisticRegression,
10
12
  SoftmaxRegression,
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import os
3
3
  import math
4
+ from requests.exceptions import ConnectTimeout, SSLError
4
5
 
5
6
  import torch
6
7
  from torch import nn
@@ -18,14 +19,31 @@ class Encoder(nn.Module):
18
19
  super().__init__()
19
20
  self.device = torch.device(device) if device is not None \
20
21
  else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
21
- self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
22
- cache_dir=CACHE_PATH, _from_auto=True)
23
- self.encoder = AutoModel.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
24
- cache_dir=CACHE_PATH, _from_auto=True).to(self.device)
22
+ try:
23
+ self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
24
+ cache_dir=CACHE_PATH, _from_auto=True,
25
+ trust_remote_code=True)
26
+ self.encoder = AutoModel.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
27
+ cache_dir=CACHE_PATH, _from_auto=True,
28
+ trust_remote_code=True).to(self.device)
29
+ except ConnectTimeout:
30
+ self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
31
+ cache_dir=CACHE_PATH, _from_auto=True,
32
+ trust_remote_code=True, local_files_only=True)
33
+ self.encoder = AutoModel.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
34
+ cache_dir=CACHE_PATH, _from_auto=True,
35
+ trust_remote_code=True, local_files_only=True).to(self.device)
36
+ except SSLError:
37
+ self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
38
+ cache_dir=CACHE_PATH, _from_auto=True,
39
+ trust_remote_code=True, local_files_only=True)
40
+ self.encoder = AutoModel.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
41
+ cache_dir=CACHE_PATH, _from_auto=True,
42
+ trust_remote_code=True, local_files_only=True).to(self.device)
25
43
  self.embed_dim = self.encoder.config.max_position_embeddings
26
44
  logger.debug(f'{Encoder.__name__} initialized on device: {self.device}.')
27
45
 
28
- def forward(self, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
46
+ def forward(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, *args, **kwargs) -> torch.Tensor:
29
47
  def _encoder(_input_tup: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
30
48
  return self.encoder.forward(_input_tup[0], attention_mask=_input_tup[1]).last_hidden_state[:, 0, :]
31
49
 
@@ -15,16 +15,21 @@ logger = logging.getLogger('deeplotx.embedding')
15
15
  class LongTextEncoder(Encoder):
16
16
  def __init__(self, max_length: int, chunk_size: int = 448,
17
17
  overlapping: int = 32, model_name_or_path: str = DEFAULT_BERT,
18
- cache_capacity: int = 64, device: str | None = None):
18
+ cache_capacity: int = 64, max_workers: int = 8, device: str | None = None):
19
19
  super().__init__(model_name_or_path=model_name_or_path, device=device)
20
20
  self._max_length = max_length
21
21
  self._chunk_size = chunk_size
22
22
  self._overlapping = overlapping
23
23
  self._cache = LRUCache(capacity=cache_capacity)
24
+ self._worker_group = ThreadPool(max_workers=max_workers)
24
25
 
25
26
  def __chunk_embedding(self, idx: int, x: torch.Tensor, mask: torch.Tensor) -> tuple[int, torch.Tensor]:
26
27
  return idx, super().forward(x, attention_mask=mask)
27
28
 
29
+ @override
30
+ def forward(self, text: str, flatten: bool = False, *args, **kwargs) -> torch.Tensor:
31
+ return self.encode(text=text, flatten=flatten)
32
+
28
33
  @override
29
34
  def encode(self, text: str, flatten: bool = False) -> torch.Tensor:
30
35
  def postprocess(tensors: list[torch.Tensor], _flatten: bool) -> torch.Tensor:
@@ -59,7 +64,7 @@ class LongTextEncoder(Encoder):
59
64
  _tmp_right = (i + 1) * self._chunk_size + self._overlapping
60
65
  chunks.append((i, torch.tensor([_text_to_input_ids[_tmp_left: _tmp_right]], dtype=torch.int, device=self.device),
61
66
  torch.tensor([_text_to_input_ids_att_mask[_tmp_left: _tmp_right]], dtype=torch.int, device=self.device)))
62
- embeddings = list(ThreadPool(max_workers=min(num_chunks + 1, 8)).map(self.__chunk_embedding, chunks))
67
+ embeddings = list(self._worker_group.map(self.__chunk_embedding, chunks))
63
68
  embeddings = sorted([x.returns for x in embeddings], key=lambda x: x[0], reverse=False)
64
69
  fin_embedding = [x[1] for x in embeddings]
65
70
  # write cache
deeplotx/nn/__init__.py CHANGED
@@ -1,3 +1,5 @@
1
+ from .base_neural_network import BaseNeuralNetwork
2
+ from .feed_forward import FeedForward
1
3
  from .linear_regression import LinearRegression
2
4
  from .logistic_regression import LogisticRegression
3
5
  from .softmax_regression import SoftmaxRegression
@@ -5,8 +5,11 @@ from deeplotx.nn import RecursiveSequential
5
5
 
6
6
  class AutoRegression(RecursiveSequential):
7
7
  def __init__(self, feature_dim: int, hidden_dim: int | None = None,
8
- recursive_layers: int = 2, model_name: str | None = None,
8
+ recursive_layers: int = 2, ffn_layers: int = 1, ffn_expansion_factor: int | float = 2,
9
+ ffn_bias: bool = True, ffn_dropout_rate: float = 0.05, model_name: str | None = None,
9
10
  device: str | None = None, dtype: torch.dtype | None = None):
10
11
  super().__init__(input_dim=feature_dim, output_dim=feature_dim,
11
12
  hidden_dim=hidden_dim, recursive_layers=recursive_layers,
13
+ ffn_layers=ffn_layers, ffn_expansion_factor=ffn_expansion_factor,
14
+ ffn_bias=ffn_bias, ffn_dropout_rate=ffn_dropout_rate,
12
15
  model_name=model_name, device=device, dtype=dtype)
@@ -8,7 +8,8 @@ DEFAULT_SUFFIX = 'dlx'
8
8
 
9
9
 
10
10
  class BaseNeuralNetwork(nn.Module):
11
- def __init__(self, model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
11
+ def __init__(self, in_features: int, out_features: int, model_name: str | None = None,
12
+ device: str | None = None, dtype: torch.dtype | None = None):
12
13
  super().__init__()
13
14
  self._model_name = model_name \
14
15
  if model_name is not None \
@@ -16,6 +17,16 @@ class BaseNeuralNetwork(nn.Module):
16
17
  self.device = torch.device(device) if device is not None \
17
18
  else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
18
19
  self.dtype = dtype if dtype is not None else torch.float32
20
+ self._in_features = in_features
21
+ self._out_features = out_features
22
+
23
+ @property
24
+ def in_features(self) -> int:
25
+ return self._in_features
26
+
27
+ @property
28
+ def out_features(self) -> int:
29
+ return self._out_features
19
30
 
20
31
  @staticmethod
21
32
  def ensure_device_and_dtype(x: torch.Tensor, device: torch.device, dtype: torch.dtype) -> torch.Tensor:
@@ -0,0 +1,53 @@
1
+ from typing_extensions import override
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
+
8
+
9
+ class FeedForwardUnit(BaseNeuralNetwork):
10
+ def __init__(self, feature_dim: int, expansion_factor: int | float = 2,
11
+ bias: bool = True, dropout_rate: float = 0.05, model_name: str | None = None,
12
+ device: str | None = None, dtype: torch.dtype | None = None):
13
+ super().__init__(in_features=feature_dim, out_features=feature_dim, model_name=model_name, device=device, dtype=dtype)
14
+ self._dropout_rate = dropout_rate
15
+ self.fc1 = nn.Linear(feature_dim, int(feature_dim * expansion_factor), bias=bias,
16
+ device=self.device, dtype=self.dtype)
17
+ self.fc2 = nn.Linear(int(feature_dim * expansion_factor), feature_dim, bias=bias,
18
+ device=self.device, dtype=self.dtype)
19
+ self.parametric_relu_1 = nn.PReLU(num_parameters=1, init=5e-3,
20
+ device=self.device, dtype=self.dtype)
21
+ self.layer_norm = nn.LayerNorm(normalized_shape=self.fc1.in_features, eps=1e-9,
22
+ device=self.device, dtype=self.dtype)
23
+
24
+ @override
25
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
26
+ x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
27
+ residual = x
28
+ x = self.layer_norm(x)
29
+ x = self.fc1(x)
30
+ x = self.parametric_relu_1(x)
31
+ if self._dropout_rate > .0:
32
+ x = torch.dropout(x, p=self._dropout_rate, train=self.training)
33
+ return self.fc2(x) + residual
34
+
35
+
36
+ class FeedForward(BaseNeuralNetwork):
37
+ def __init__(self, feature_dim: int, num_layers: int = 1, expansion_factor: int | float = 2,
38
+ bias: bool = True, dropout_rate: float = 0.05, model_name: str | None = None,
39
+ device: str | None = None, dtype: torch.dtype | None = None):
40
+ if num_layers < 1:
41
+ raise ValueError('num_layers cannot be less than 1.')
42
+ super().__init__(in_features=feature_dim, out_features=feature_dim, model_name=model_name, device=device, dtype=dtype)
43
+ self.ffn_layers = nn.ModuleList([FeedForwardUnit(feature_dim=feature_dim,
44
+ expansion_factor=expansion_factor, bias=bias,
45
+ dropout_rate=dropout_rate,
46
+ device=self.device, dtype=self.dtype)] * num_layers)
47
+
48
+ @override
49
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
50
+ x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
51
+ for ffn in self.ffn_layers:
52
+ x = ffn(x)
53
+ return x
@@ -4,34 +4,22 @@ import torch
4
4
  from torch import nn
5
5
 
6
6
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
+ from deeplotx.nn.feed_forward import FeedForward
7
8
 
8
9
 
9
10
  class LinearRegression(BaseNeuralNetwork):
10
- def __init__(self, input_dim: int, output_dim: int, model_name: str | None = None,
11
- device: str | None = None, dtype: torch.dtype | None = None):
12
- super().__init__(model_name=model_name, device=device, dtype=dtype)
13
- self.fc1 = nn.Linear(input_dim, 1024, device=self.device, dtype=self.dtype)
14
- self.fc1_to_fc4_res = nn.Linear(1024, 64, device=self.device, dtype=self.dtype)
15
- self.fc2 = nn.Linear(1024, 768, device=self.device, dtype=self.dtype)
16
- self.fc3 = nn.Linear(768, 128, device=self.device, dtype=self.dtype)
17
- self.fc4 = nn.Linear(128, 64, device=self.device, dtype=self.dtype)
18
- self.fc5 = nn.Linear(64, output_dim, device=self.device, dtype=self.dtype)
19
- self.parametric_relu_1 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
20
- self.parametric_relu_2 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
21
- self.parametric_relu_3 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
22
- self.parametric_relu_4 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
11
+ def __init__(self, input_dim: int, output_dim: int, num_layers: int = 1,
12
+ expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
13
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
14
+ super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
15
+ self.ffn = FeedForward(feature_dim=input_dim, num_layers=num_layers, expansion_factor=expansion_factor,
16
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
17
+ self.proj = nn.Linear(in_features=input_dim, out_features=output_dim,
18
+ bias=bias, device=self.device, dtype=self.dtype)
23
19
 
24
20
  @override
25
21
  def forward(self, x: torch.Tensor) -> torch.Tensor:
26
22
  x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
27
- fc1_out = self.parametric_relu_1(self.fc1(x))
28
- x = nn.LayerNorm(normalized_shape=1024, eps=1e-9, device=self.device, dtype=self.dtype)(fc1_out)
29
- x = torch.dropout(x, p=0.2, train=self.training)
30
- x = self.parametric_relu_2(self.fc2(x))
31
- x = nn.LayerNorm(normalized_shape=768, eps=1e-9, device=self.device, dtype=self.dtype)(x)
32
- x = torch.dropout(x, p=0.2, train=self.training)
33
- x = self.parametric_relu_3(self.fc3(x))
34
- x = torch.dropout(x, p=0.2, train=self.training)
35
- x = self.parametric_relu_4(self.fc4(x)) + self.fc1_to_fc4_res(fc1_out)
36
- x = self.fc5(x)
37
- return x
23
+ residual = x
24
+ x = self.ffn(x) + residual
25
+ return self.proj(x)
@@ -6,9 +6,12 @@ from deeplotx.nn.linear_regression import LinearRegression
6
6
 
7
7
 
8
8
  class LogisticRegression(LinearRegression):
9
- def __init__(self, input_dim: int, output_dim: int = 1, model_name: str | None = None,
9
+ def __init__(self, input_dim: int, output_dim: int = 1, num_layers: int = 1, expansion_factor: int | float = 1.5,
10
+ bias: bool = True, dropout_rate: float = 0.1, model_name: str | None = None,
10
11
  device: str | None = None, dtype: torch.dtype | None = None):
11
- super().__init__(input_dim=input_dim, output_dim=output_dim, model_name=model_name, device=device, dtype=dtype)
12
+ super().__init__(input_dim=input_dim, output_dim=output_dim, num_layers=num_layers,
13
+ expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
14
+ model_name=model_name, device=device, dtype=dtype)
12
15
 
13
16
  @override
14
17
  def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -5,8 +5,11 @@ from deeplotx.nn import LongContextRecursiveSequential
5
5
 
6
6
  class LongContextAutoRegression(LongContextRecursiveSequential):
7
7
  def __init__(self, feature_dim: int, hidden_dim: int | None = None,
8
- recursive_layers: int = 2, model_name: str | None = None,
8
+ recursive_layers: int = 2, ffn_layers: int = 1, ffn_expansion_factor: int | float = 2,
9
+ ffn_bias: bool = True, ffn_dropout_rate: float = 0.05, model_name: str | None = None,
9
10
  device: str | None = None, dtype: torch.dtype | None = None):
10
11
  super().__init__(input_dim=feature_dim, output_dim=feature_dim,
11
12
  hidden_dim=hidden_dim, recursive_layers=recursive_layers,
13
+ ffn_layers=ffn_layers, ffn_expansion_factor=ffn_expansion_factor,
14
+ ffn_bias=ffn_bias, ffn_dropout_rate=ffn_dropout_rate,
12
15
  model_name=model_name, device=device, dtype=dtype)
@@ -10,19 +10,25 @@ from deeplotx.nn.self_attention import SelfAttention
10
10
  class LongContextRecursiveSequential(RecursiveSequential):
11
11
  def __init__(self, input_dim: int, output_dim: int,
12
12
  hidden_dim: int | None = None, recursive_layers: int = 2,
13
+ ffn_layers: int = 1, ffn_expansion_factor: int | float = 2,
14
+ ffn_bias: bool = True, ffn_dropout_rate: float = 0.05,
13
15
  model_name: str | None = None, device: str | None = None,
14
- dtype: torch.dtype | None = None):
16
+ dtype: torch.dtype | None = None, **kwargs):
15
17
  super().__init__(input_dim=input_dim, output_dim=output_dim,
16
18
  hidden_dim=hidden_dim, recursive_layers=recursive_layers,
19
+ ffn_layers=ffn_layers, ffn_expansion_factor=ffn_expansion_factor,
20
+ ffn_bias=ffn_bias, ffn_dropout_rate=ffn_dropout_rate,
17
21
  model_name=model_name, device=device, dtype=dtype)
18
- self._feature_dim = input_dim
19
- self.self_attention = SelfAttention(feature_dim=input_dim)
20
- self.proj = nn.Linear(in_features=input_dim * 2, out_features=input_dim,
21
- bias=True, device=self.device, dtype=self.dtype)
22
+ self.self_attention = SelfAttention(feature_dim=input_dim, bias=kwargs.get('attn_proj_bias', ffn_bias),
23
+ proj_layers=kwargs.get('attn_proj_layers', 1),
24
+ proj_expansion_factor=kwargs.get('attn_proj_expansion_factor', ffn_expansion_factor),
25
+ dropout_rate=kwargs.get('attn_proj_dropout_rate', ffn_dropout_rate))
26
+ self.__proj = nn.Linear(in_features=input_dim * 2, out_features=input_dim,
27
+ bias=ffn_bias, device=self.device, dtype=self.dtype)
22
28
 
23
29
  @override
24
30
  def forward(self, x: torch.Tensor, state: tuple[torch.Tensor, torch.Tensor]) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
25
31
  x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
26
32
  x = torch.cat([self.self_attention(x), x], dim=-1)
27
33
  x = nn.LayerNorm(normalized_shape=x.shape[-1], eps=1e-9, device=self.device, dtype=self.dtype)(x)
28
- return super().forward(self.proj(x), state)
34
+ return super().forward(self.__proj(x), state)
@@ -4,23 +4,27 @@ import torch
4
4
  from torch import nn
5
5
 
6
6
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
- from deeplotx.nn import LinearRegression
7
+ from deeplotx.nn.feed_forward import FeedForward
8
8
 
9
9
 
10
10
  class RecursiveSequential(BaseNeuralNetwork):
11
11
  def __init__(self, input_dim: int, output_dim: int,
12
12
  hidden_dim: int | None = None, recursive_layers: int = 2,
13
+ ffn_layers: int = 1, ffn_expansion_factor: int | float = 2,
14
+ ffn_bias: bool = True, ffn_dropout_rate: float = 0.05,
13
15
  model_name: str | None = None, device: str | None = None,
14
16
  dtype: torch.dtype | None = None):
15
- super().__init__(model_name=model_name, device=device, dtype=dtype)
17
+ super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
16
18
  if hidden_dim is None:
17
19
  hidden_dim = input_dim
18
20
  self.lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim,
19
21
  num_layers=recursive_layers, batch_first=True,
20
22
  bias=True, bidirectional=True, device=self.device,
21
23
  dtype=self.dtype)
22
- self.regressive_head = LinearRegression(input_dim=hidden_dim * 2, output_dim=output_dim,
23
- device=self.device, dtype=self.dtype)
24
+ self.ffn = FeedForward(feature_dim=hidden_dim * 2, num_layers=ffn_layers, expansion_factor=ffn_expansion_factor,
25
+ bias=ffn_bias, dropout_rate=ffn_dropout_rate, device=self.device, dtype=self.dtype)
26
+ self.__proj = nn.Linear(in_features=hidden_dim * 2, out_features=output_dim, bias=ffn_bias,
27
+ device=self.device, dtype=self.dtype)
24
28
 
25
29
  def initial_state(self, batch_size: int = 1) -> tuple[torch.Tensor, torch.Tensor]:
26
30
  zeros = torch.zeros(self.lstm.num_layers * 2, batch_size, self.lstm.hidden_size, device=self.device, dtype=self.dtype)
@@ -32,7 +36,10 @@ class RecursiveSequential(BaseNeuralNetwork):
32
36
  state = (self.ensure_device_and_dtype(state[0], device=self.device, dtype=self.dtype),
33
37
  self.ensure_device_and_dtype(state[1], device=self.device, dtype=self.dtype))
34
38
  x, (hidden_state, cell_state) = self.lstm(x, state)
35
- x = self.regressive_head(x[:, -1, :])
39
+ x = x[:, -1, :]
40
+ residual = x
41
+ x = self.ffn(x) + residual
42
+ x = self.__proj(x)
36
43
  return x, (hidden_state, cell_state)
37
44
 
38
45
  @override
@@ -1,29 +1,34 @@
1
1
  from typing_extensions import override
2
2
 
3
3
  import torch
4
- from torch import nn, softmax
5
4
 
6
5
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
6
+ from deeplotx.nn.feed_forward import FeedForward
7
7
 
8
8
 
9
9
  class SelfAttention(BaseNeuralNetwork):
10
- def __init__(self, feature_dim: int, model_name: str | None = None,
11
- device: str | None = None, dtype: torch.dtype | None = None):
12
- super().__init__(model_name=model_name, device=device, dtype=dtype)
10
+ def __init__(self, feature_dim: int, bias: bool = True, proj_layers: int = 1,
11
+ proj_expansion_factor: int | float = 1.5, dropout_rate: float = 0.02,
12
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
13
+ super().__init__(in_features=feature_dim, out_features=feature_dim, model_name=model_name,
14
+ device=device, dtype=dtype)
13
15
  self._feature_dim = feature_dim
14
- self.q_proj = nn.Linear(in_features=self._feature_dim, out_features=self._feature_dim,
15
- bias=True, device=self.device, dtype=self.dtype)
16
- self.k_proj = nn.Linear(in_features=self._feature_dim, out_features=self._feature_dim,
17
- bias=True, device=self.device, dtype=self.dtype)
18
- self.v_proj = nn.Linear(in_features=self._feature_dim, out_features=self._feature_dim,
19
- bias=True, device=self.device, dtype=self.dtype)
16
+ self.q_proj = FeedForward(feature_dim=self._feature_dim, num_layers=proj_layers,
17
+ expansion_factor=proj_expansion_factor,
18
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
19
+ self.k_proj = FeedForward(feature_dim=self._feature_dim, num_layers=proj_layers,
20
+ expansion_factor=proj_expansion_factor,
21
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
22
+ self.v_proj = FeedForward(feature_dim=self._feature_dim, num_layers=proj_layers,
23
+ expansion_factor=proj_expansion_factor,
24
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
20
25
 
21
26
  def _attention(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
22
27
  q, k = self.q_proj(x), self.k_proj(x)
23
28
  attn = torch.matmul(q, k.transpose(-2, -1))
24
29
  attn = attn / (self._feature_dim ** 0.5)
25
30
  attn = attn.masked_fill(mask == 0, -1e9) if mask is not None else attn
26
- return softmax(attn, dim=-1)
31
+ return torch.softmax(attn, dim=-1)
27
32
 
28
33
  @override
29
34
  def forward(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
@@ -6,9 +6,12 @@ from deeplotx.nn.linear_regression import LinearRegression
6
6
 
7
7
 
8
8
  class SoftmaxRegression(LinearRegression):
9
- def __init__(self, input_dim: int, output_dim: int, model_name: str | None = None,
9
+ def __init__(self, input_dim: int, output_dim: int, num_layers: int = 1, expansion_factor: int | float = 1.5,
10
+ bias: bool = True, dropout_rate: float = 0.1, model_name: str | None = None,
10
11
  device: str | None = None, dtype: torch.dtype | None = None):
11
- super().__init__(input_dim=input_dim, output_dim=output_dim, model_name=model_name, device=device, dtype=dtype)
12
+ super().__init__(input_dim=input_dim, output_dim=output_dim, num_layers=num_layers,
13
+ expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
14
+ model_name=model_name, device=device, dtype=dtype)
12
15
 
13
16
  @override
14
17
  def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -25,13 +25,13 @@ class TextBinaryClassifierTrainer(BaseTrainer):
25
25
  num_epochs: int, learning_rate: float = 2e-6, balancing_dataset: bool = True,
26
26
  train_loss_threshold: float = 0.0, valid_loss_threshold: float = 0.0,
27
27
  alpha: float = 1e-4, rho: float = 0.2,
28
- hidden_dim: int = 256, recursive_layers: int = 2) -> LongContextRecursiveSequential:
28
+ hidden_dim: int = 256, recursive_layers: int = 2, **kwargs) -> LongContextRecursiveSequential:
29
29
  if balancing_dataset:
30
30
  min_length = min(len(positive_texts), len(negative_texts))
31
31
  positive_texts = positive_texts[:min_length]
32
32
  negative_texts = negative_texts[:min_length]
33
33
  all_texts = positive_texts + negative_texts
34
- text_embeddings = [self._long_text_encoder.encode(x, flatten=False, use_cache=True) for x in all_texts]
34
+ text_embeddings = [self._long_text_encoder.encode(x, flatten=False) for x in all_texts]
35
35
  feature_dim = text_embeddings[0].shape[-1]
36
36
  dtype = text_embeddings[0].dtype
37
37
  labels = ([torch.tensor([1.], dtype=dtype, device=self.device) for _ in range(len(positive_texts))]
@@ -44,15 +44,27 @@ class TextBinaryClassifierTrainer(BaseTrainer):
44
44
  valid_dataset = TensorDataset(inputs[train_size:], labels[train_size:])
45
45
  self.train_dataset_loader = DataLoader(train_dataset, batch_size=self._batch_size, shuffle=True)
46
46
  self.valid_dataset_loader = DataLoader(valid_dataset, batch_size=self._batch_size, shuffle=True)
47
-
48
- if self.model is not None and self.model.fc1.in_features != feature_dim:
47
+ if self.model is not None and self.model.in_features != feature_dim:
49
48
  logger.warning("The dimension of features doesn't match. A new model instance will be created.")
50
49
  self.model = None
51
50
  if self.model is None:
51
+ ffn_layers = kwargs.get('ffn_layers', 5)
52
+ ffn_expansion_factor = kwargs.get('ffn_expansion_factor', 2)
53
+ ffn_bias = kwargs.get('ffn_bias', True)
54
+ ffn_dropout_rate = kwargs.get('ffn_dropout_rate', 0.1)
52
55
  self.model = LongContextRecursiveSequential(input_dim=feature_dim, output_dim=1,
53
56
  hidden_dim=hidden_dim,
54
57
  recursive_layers=recursive_layers,
58
+ ffn_layers=ffn_layers,
59
+ ffn_expansion_factor=ffn_expansion_factor,
60
+ ffn_bias=ffn_bias,
61
+ ffn_dropout_rate=ffn_dropout_rate,
62
+ attn_proj_layers=kwargs.get('attn_proj_layers', ffn_layers),
63
+ attn_proj_bias=kwargs.get('attn_proj_bias', ffn_bias),
64
+ attn_proj_expansion_factor=kwargs.get('attn_proj_expansion_factor', ffn_expansion_factor),
65
+ attn_proj_dropout_rate=kwargs.get('attn_proj_dropout_rate', ffn_dropout_rate),
55
66
  device=self.device, dtype=dtype)
67
+ logger.debug(f'Training Model: {self.model}')
56
68
  loss_function = nn.BCELoss()
57
69
  optimizer = optim.Adamax(self.model.parameters(), lr=learning_rate)
58
70
  for epoch in range(num_epochs):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deeplotx
3
- Version: 0.5.5
3
+ Version: 0.6.1
4
4
  Summary: Easy-2-use long text NLP toolkit.
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
@@ -163,6 +163,8 @@ Dynamic: license-file
163
163
 
164
164
  ```python
165
165
  from deeplotx import (
166
+ BaseNeuralNetwork, # 深度神经网络基类
167
+ FeedForward, # 前馈神经网络
166
168
  LinearRegression, # 线性回归
167
169
  LogisticRegression, # 逻辑回归 / 二分类 / 多标签分类
168
170
  SoftmaxRegression, # Softmax 回归 / 多分类
@@ -181,38 +183,54 @@ Dynamic: license-file
181
183
 
182
184
  import torch
183
185
  from torch import nn
184
-
186
+
185
187
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
186
-
187
-
188
- class LinearRegression(BaseNeuralNetwork):
189
- def __init__(self, input_dim: int, output_dim: int, model_name: str | None = None,
190
- device: str | None = None, dtype: torch.dtype | None = None):
191
- super().__init__(model_name=model_name, device=device, dtype=dtype)
192
- self.fc1 = nn.Linear(input_dim, 1024, device=self.device, dtype=self.dtype)
193
- self.fc1_to_fc4_res = nn.Linear(1024, 64, device=self.device, dtype=self.dtype)
194
- self.fc2 = nn.Linear(1024, 768, device=self.device, dtype=self.dtype)
195
- self.fc3 = nn.Linear(768, 128, device=self.device, dtype=self.dtype)
196
- self.fc4 = nn.Linear(128, 64, device=self.device, dtype=self.dtype)
197
- self.fc5 = nn.Linear(64, output_dim, device=self.device, dtype=self.dtype)
198
- self.parametric_relu_1 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
199
- self.parametric_relu_2 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
200
- self.parametric_relu_3 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
201
- self.parametric_relu_4 = nn.PReLU(num_parameters=1, init=5e-3, device=self.device, dtype=self.dtype)
202
-
188
+
189
+
190
+ class FeedForwardUnit(BaseNeuralNetwork):
191
+ def __init__(self, feature_dim: int, expansion_factor: int | float = 2,
192
+ bias: bool = True, dropout_rate: float = 0.05, model_name: str | None = None,
193
+ device: str | None = None, dtype: torch.dtype | None = None):
194
+ super().__init__(in_features=feature_dim, out_features=feature_dim, model_name=model_name, device=device, dtype=dtype)
195
+ self._dropout_rate = dropout_rate
196
+ self.fc1 = nn.Linear(feature_dim, int(feature_dim * expansion_factor), bias=bias,
197
+ device=self.device, dtype=self.dtype)
198
+ self.fc2 = nn.Linear(int(feature_dim * expansion_factor), feature_dim, bias=bias,
199
+ device=self.device, dtype=self.dtype)
200
+ self.parametric_relu_1 = nn.PReLU(num_parameters=1, init=5e-3,
201
+ device=self.device, dtype=self.dtype)
202
+ self.layer_norm = nn.LayerNorm(normalized_shape=self.fc1.in_features, eps=1e-9,
203
+ device=self.device, dtype=self.dtype)
204
+
203
205
  @override
204
- def forward(self, x) -> torch.Tensor:
206
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
205
207
  x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
206
- fc1_out = self.parametric_relu_1(self.fc1(x))
207
- x = nn.LayerNorm(normalized_shape=1024, eps=1e-9, device=self.device, dtype=self.dtype)(fc1_out)
208
- x = torch.dropout(x, p=0.2, train=self.training)
209
- x = self.parametric_relu_2(self.fc2(x))
210
- x = nn.LayerNorm(normalized_shape=768, eps=1e-9, device=self.device, dtype=self.dtype)(x)
211
- x = torch.dropout(x, p=0.2, train=self.training)
212
- x = self.parametric_relu_3(self.fc3(x))
213
- x = torch.dropout(x, p=0.2, train=self.training)
214
- x = self.parametric_relu_4(self.fc4(x)) + self.fc1_to_fc4_res(fc1_out)
215
- x = self.fc5(x)
208
+ residual = x
209
+ x = self.layer_norm(x)
210
+ x = self.fc1(x)
211
+ x = self.parametric_relu_1(x)
212
+ if self._dropout_rate > .0:
213
+ x = torch.dropout(x, p=self._dropout_rate, train=self.training)
214
+ return self.fc2(x) + residual
215
+
216
+
217
+ class FeedForward(BaseNeuralNetwork):
218
+ def __init__(self, feature_dim: int, num_layers: int = 1, expansion_factor: int | float = 2,
219
+ bias: bool = True, dropout_rate: float = 0.05, model_name: str | None = None,
220
+ device: str | None = None, dtype: torch.dtype | None = None):
221
+ if num_layers < 1:
222
+ raise ValueError('num_layers cannot be less than 1.')
223
+ super().__init__(in_features=feature_dim, out_features=feature_dim, model_name=model_name, device=device, dtype=dtype)
224
+ self.ffn_layers = nn.ModuleList([FeedForwardUnit(feature_dim=feature_dim,
225
+ expansion_factor=expansion_factor, bias=bias,
226
+ dropout_rate=dropout_rate,
227
+ device=self.device, dtype=self.dtype)] * num_layers)
228
+
229
+ @override
230
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
231
+ x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
232
+ for ffn in self.ffn_layers:
233
+ x = ffn(x)
216
234
  return x
217
235
  ```
218
236
 
@@ -222,29 +240,34 @@ Dynamic: license-file
222
240
  from typing_extensions import override
223
241
 
224
242
  import torch
225
- from torch import nn, softmax
226
243
 
227
244
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
245
+ from deeplotx.nn.feed_forward import FeedForward
228
246
 
229
247
 
230
248
  class SelfAttention(BaseNeuralNetwork):
231
- def __init__(self, feature_dim: int, model_name: str | None = None,
232
- device: str | None = None, dtype: torch.dtype | None = None):
233
- super().__init__(model_name=model_name, device=device, dtype=dtype)
249
+ def __init__(self, feature_dim: int, bias: bool = True, proj_layers: int = 1,
250
+ proj_expansion_factor: int | float = 1.5, dropout_rate: float = 0.02,
251
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
252
+ super().__init__(in_features=feature_dim, out_features=feature_dim, model_name=model_name,
253
+ device=device, dtype=dtype)
234
254
  self._feature_dim = feature_dim
235
- self.q_proj = nn.Linear(in_features=self._feature_dim, out_features=self._feature_dim,
236
- bias=True, device=self.device, dtype=self.dtype)
237
- self.k_proj = nn.Linear(in_features=self._feature_dim, out_features=self._feature_dim,
238
- bias=True, device=self.device, dtype=self.dtype)
239
- self.v_proj = nn.Linear(in_features=self._feature_dim, out_features=self._feature_dim,
240
- bias=True, device=self.device, dtype=self.dtype)
255
+ self.q_proj = FeedForward(feature_dim=self._feature_dim, num_layers=proj_layers,
256
+ expansion_factor=proj_expansion_factor,
257
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
258
+ self.k_proj = FeedForward(feature_dim=self._feature_dim, num_layers=proj_layers,
259
+ expansion_factor=proj_expansion_factor,
260
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
261
+ self.v_proj = FeedForward(feature_dim=self._feature_dim, num_layers=proj_layers,
262
+ expansion_factor=proj_expansion_factor,
263
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
241
264
 
242
265
  def _attention(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
243
266
  q, k = self.q_proj(x), self.k_proj(x)
244
267
  attn = torch.matmul(q, k.transpose(-2, -1))
245
268
  attn = attn / (self._feature_dim ** 0.5)
246
269
  attn = attn.masked_fill(mask == 0, -1e9) if mask is not None else attn
247
- return softmax(attn, dim=-1)
270
+ return torch.softmax(attn, dim=-1)
248
271
 
249
272
  @override
250
273
  def forward(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
@@ -265,7 +288,8 @@ Dynamic: license-file
265
288
  long_text_encoder = LongTextEncoder(
266
289
  max_length=2048, # 最大文本大小, 超出截断
267
290
  chunk_size=448, # 块大小 (按 Token 计)
268
- overlapping=32 # 块间重叠大小 (按 Token 计)
291
+ overlapping=32, # 块间重叠大小 (按 Token 计)
292
+ cache_capacity=512 # 缓存大小
269
293
  )
270
294
 
271
295
  trainer = TextBinaryClassifierTrainer(
@@ -0,0 +1,31 @@
1
+ deeplotx/__init__.py,sha256=S0hLmRkHdoaxv7IPKVXh5Oat27pt_FGKGmKjp3aAyMU,1129
2
+ deeplotx/encoder/__init__.py,sha256=BrsF5_4O-4pfihYF2wjExDOoAY-03kGJTH-Mhez4tsE,129
3
+ deeplotx/encoder/encoder.py,sha256=oSBdA-MiwMKNfTFJWR-RdvNS0G0qfX-Qchwy4LuwB00,3985
4
+ deeplotx/encoder/long_text_encoder.py,sha256=It0hXuSe0Hq5Y_3QhjEqvF1JbtX6Hc2VzVabzOu7fLA,3625
5
+ deeplotx/encoder/longformer_encoder.py,sha256=A8FXqd4mdHxSn_o_R689XtpT73ISDT788EgMQRGLC2g,1822
6
+ deeplotx/nn/__init__.py,sha256=f7f6Qx1Xkw3Nn3Lvafe7Pq4pUO7ZcESIA8KZxnSL_OM,535
7
+ deeplotx/nn/auto_regression.py,sha256=8eEdXhOjRLKP4MpgX1wt9L1grU4_fS49ejVoNzFs7LM,877
8
+ deeplotx/nn/base_neural_network.py,sha256=s7jHe7HprOelD1wZRbFdqb5Hxqs3sjLXLEo7OyDBHtk,3215
9
+ deeplotx/nn/feed_forward.py,sha256=3lWV_snCp_PiqjxTYoiNlL9EF2heekWbMkKXoPlljkM,2839
10
+ deeplotx/nn/linear_regression.py,sha256=QybSRfsf9PpgJAWixvrSNn3OYRKJXpSZMfqdzpw-Kd8,1280
11
+ deeplotx/nn/logistic_regression.py,sha256=WfgHVNGIvAYsX2iea2wRlLgfbubYWyZkBLYpnpwOiyU,937
12
+ deeplotx/nn/long_context_auto_regression.py,sha256=oMrxeVuCa1M2EQJSbOYlpTjl5NrkKGAHers8qIaZdU8,911
13
+ deeplotx/nn/long_context_recursive_sequential.py,sha256=sU_22QH7Z6EJurMbTVEYPd83wC2dzadMIeztVIcc04I,2173
14
+ deeplotx/nn/recursive_sequential.py,sha256=WsmXaIgTdpudo2bYcpBX8bKeJgPnT-atwEmLSXqQEco,2743
15
+ deeplotx/nn/self_attention.py,sha256=HW9ZB3S6-yfTQc2745rJ6TM7L01P8ewxt7nGHosE2r8,2291
16
+ deeplotx/nn/softmax_regression.py,sha256=PN_1Zr_B_z5zYC_s_8k6c5fllOtxfJEvVvCmC9GRmx0,958
17
+ deeplotx/similarity/__init__.py,sha256=s3u-KSgxjnMcWpIItKgXNltFMPQ7YY3CqsqHI-5F1c8,724
18
+ deeplotx/similarity/distribution.py,sha256=wQGouuuW531pZeBRKBujXsdsoz4fDnPw7_GW81jwepc,1066
19
+ deeplotx/similarity/set.py,sha256=zhGFxtSIXlWqvipBYzoiPahp4g0boAIoUiMfG0wl07A,686
20
+ deeplotx/similarity/vector.py,sha256=WVbDHqykt-fvuILVrhUCtIFAOEjY_zvttrXGM9eylG0,1125
21
+ deeplotx/trainer/__init__.py,sha256=Fl5DR9UecQc5VtBcczU9sx_HtPNoFohpuELOh-Jrsks,77
22
+ deeplotx/trainer/base_trainer.py,sha256=z0MeAT-rRYmjeBXt0ckt7J1itYArR0Cx02wHesXUoZE,385
23
+ deeplotx/trainer/text_binary_classification_trainer.py,sha256=7oLzgXvdmFpQiBy7ncJ0smdqnMGr8xdZs6nTWpj6qfw,6085
24
+ deeplotx/util/__init__.py,sha256=JxqAK_WOOHcYVSTHBT1-WuBwWrPEVDTV3titeVWvNUM,74
25
+ deeplotx/util/hash.py,sha256=qbNU3RLBWGQYFVte9WZBAkZ1BkdjCXiKLDaKPN54KFk,662
26
+ deeplotx/util/read_file.py,sha256=ptzouvEQeeW8KU5BrWNJlXw-vFXVrpS9SkAUxsu6A8A,612
27
+ deeplotx-0.6.1.dist-info/licenses/LICENSE,sha256=IwGE9guuL-ryRPEKi6wFPI_zOhg7zDZbTYuHbSt_SAk,35823
28
+ deeplotx-0.6.1.dist-info/METADATA,sha256=a1KcBHaewfyOwIywZ3wtBr8mdly4ofdb7Z4g2KYVzUk,12251
29
+ deeplotx-0.6.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
30
+ deeplotx-0.6.1.dist-info/top_level.txt,sha256=hKg4pVDXZ-WWxkRfJFczRIll1Sv7VyfKCmzHLXbuh1U,9
31
+ deeplotx-0.6.1.dist-info/RECORD,,
@@ -1,30 +0,0 @@
1
- deeplotx/__init__.py,sha256=6El66QXHDrgNMsNIG9bG97WO8BhPK5btXbTikzx2ce4,1087
2
- deeplotx/encoder/__init__.py,sha256=BrsF5_4O-4pfihYF2wjExDOoAY-03kGJTH-Mhez4tsE,129
3
- deeplotx/encoder/encoder.py,sha256=p1e4Dx3-Ghdl0MGNalr0D_OnafwaJnbhscEDVq-y73A,2400
4
- deeplotx/encoder/long_text_encoder.py,sha256=GatkOF1QQHLtvyuikfCP4xpzfDvszJJyonaS9f7wSxg,3401
5
- deeplotx/encoder/longformer_encoder.py,sha256=A8FXqd4mdHxSn_o_R689XtpT73ISDT788EgMQRGLC2g,1822
6
- deeplotx/nn/__init__.py,sha256=CS0UwyYKa8wI6vu6FBIYxvm-HAmw39MTMFlZDtqi6UA,444
7
- deeplotx/nn/auto_regression.py,sha256=7P63opWCWMqE2DigwbsL6kfXtFtJPz00Yo1RqflBz4A,572
8
- deeplotx/nn/base_neural_network.py,sha256=o9s0NqxkDcFZdipX8UrlbBmwYHOg7wPmzbjBEeGw63s,2902
9
- deeplotx/nn/linear_regression.py,sha256=7TbbplBgY70b1l5lKvTJMzDWQ8khQfnRCyMjObhVdEc,2180
10
- deeplotx/nn/logistic_regression.py,sha256=YiSLAon8gLDtMXAkPQ210sauod24eyJYYH50fPhj6T8,667
11
- deeplotx/nn/long_context_auto_regression.py,sha256=Z67Enq1kc1bERIrQW4jHeDQQmisOXhhjrtaPklnHkyw,605
12
- deeplotx/nn/long_context_recursive_sequential.py,sha256=_fKpPA7wt6B0kPyyig4xuhmLxygK19FSLgxW1Xa453M,1487
13
- deeplotx/nn/recursive_sequential.py,sha256=8YHZ-IdLyMJN5QVWPMuizDxLodAE9Bgdg1_YtIxFw7o,2247
14
- deeplotx/nn/self_attention.py,sha256=fb34wXnfgAGYJEhqa1l9AxMa-AHcCTOLbUlAfaGIK7Q,1766
15
- deeplotx/nn/softmax_regression.py,sha256=BeVk0G2H3zKG6bsQgPRNWuTxnnNmVI2zFZtCHgARAAc,688
16
- deeplotx/similarity/__init__.py,sha256=s3u-KSgxjnMcWpIItKgXNltFMPQ7YY3CqsqHI-5F1c8,724
17
- deeplotx/similarity/distribution.py,sha256=wQGouuuW531pZeBRKBujXsdsoz4fDnPw7_GW81jwepc,1066
18
- deeplotx/similarity/set.py,sha256=zhGFxtSIXlWqvipBYzoiPahp4g0boAIoUiMfG0wl07A,686
19
- deeplotx/similarity/vector.py,sha256=WVbDHqykt-fvuILVrhUCtIFAOEjY_zvttrXGM9eylG0,1125
20
- deeplotx/trainer/__init__.py,sha256=Fl5DR9UecQc5VtBcczU9sx_HtPNoFohpuELOh-Jrsks,77
21
- deeplotx/trainer/base_trainer.py,sha256=z0MeAT-rRYmjeBXt0ckt7J1itYArR0Cx02wHesXUoZE,385
22
- deeplotx/trainer/text_binary_classification_trainer.py,sha256=BNBQdpaD8nB1dQv8naHNIravNcQC8JjOMqD-WRSrUH0,4931
23
- deeplotx/util/__init__.py,sha256=JxqAK_WOOHcYVSTHBT1-WuBwWrPEVDTV3titeVWvNUM,74
24
- deeplotx/util/hash.py,sha256=qbNU3RLBWGQYFVte9WZBAkZ1BkdjCXiKLDaKPN54KFk,662
25
- deeplotx/util/read_file.py,sha256=ptzouvEQeeW8KU5BrWNJlXw-vFXVrpS9SkAUxsu6A8A,612
26
- deeplotx-0.5.5.dist-info/licenses/LICENSE,sha256=IwGE9guuL-ryRPEKi6wFPI_zOhg7zDZbTYuHbSt_SAk,35823
27
- deeplotx-0.5.5.dist-info/METADATA,sha256=QE1R1jodTrnPFY7cbu4mQNPt8_BgKNJuHoSDswopueo,10880
28
- deeplotx-0.5.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
29
- deeplotx-0.5.5.dist-info/top_level.txt,sha256=hKg4pVDXZ-WWxkRfJFczRIll1Sv7VyfKCmzHLXbuh1U,9
30
- deeplotx-0.5.5.dist-info/RECORD,,