deeplotx 0.8.3__py3-none-any.whl → 0.8.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deeplotx/__init__.py CHANGED
@@ -6,6 +6,7 @@ __ROOT__ = os.path.dirname(os.path.abspath(__file__))
6
6
  from .encoder import Encoder, LongTextEncoder, LongformerEncoder
7
7
  from .nn import (
8
8
  FeedForward,
9
+ MultiHeadFeedForward,
9
10
  LinearRegression,
10
11
  LogisticRegression,
11
12
  SoftmaxRegression,
@@ -48,7 +48,7 @@ class Encoder(nn.Module):
48
48
  return self.encoder.forward(_input_tup[0], attention_mask=_input_tup[1]).last_hidden_state[:, 0, :]
49
49
 
50
50
  num_chunks = math.ceil(input_ids.shape[-1] / self.embed_dim)
51
- chunks = chunk_results = []
51
+ chunks, chunk_results = [], []
52
52
  for i in range(num_chunks):
53
53
  start_idx = i * self.embed_dim
54
54
  end_idx = min(start_idx + self.embed_dim, input_ids.shape[-1])
deeplotx/nn/__init__.py CHANGED
@@ -1,5 +1,6 @@
1
1
  from .base_neural_network import BaseNeuralNetwork
2
2
  from .feed_forward import FeedForward
3
+ from .multi_head_feed_forward import MultiHeadFeedForward
3
4
  from .linear_regression import LinearRegression
4
5
  from .logistic_regression import LogisticRegression
5
6
  from .softmax_regression import SoftmaxRegression
@@ -7,8 +7,8 @@ class AutoRegression(RecursiveSequential):
7
7
  def __init__(self, feature_dim: int, bias: bool = True,
8
8
  recursive_layers: int = 1, recursive_hidden_dim: int | None = None,
9
9
  ffn_layers: int = 1, ffn_expansion_factor: int | float = 2, dropout_rate: float = 0.05,
10
- model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
10
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
11
11
  super().__init__(input_dim=feature_dim, output_dim=feature_dim, bias=bias,
12
12
  recursive_layers=recursive_layers, recursive_hidden_dim=recursive_hidden_dim,
13
13
  ffn_layers=ffn_layers, ffn_expansion_factor=ffn_expansion_factor,
14
- dropout_rate=dropout_rate, model_name=model_name, device=device, dtype=dtype)
14
+ dropout_rate=dropout_rate, model_name=model_name, device=device, dtype=dtype, **kwargs)
@@ -4,22 +4,26 @@ import torch
4
4
  from torch import nn
5
5
 
6
6
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
- from deeplotx.nn.feed_forward import FeedForward
7
+ from deeplotx.nn.multi_head_feed_forward import MultiHeadFeedForward
8
8
 
9
9
 
10
10
  class LinearRegression(BaseNeuralNetwork):
11
- def __init__(self, input_dim: int, output_dim: int, num_layers: int = 1,
11
+ def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
12
12
  expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
13
- model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
13
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
14
14
  super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name, device=device, dtype=dtype)
15
- self.ffn = FeedForward(feature_dim=input_dim, num_layers=num_layers, expansion_factor=expansion_factor,
16
- bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
17
- self.proj = nn.Linear(in_features=input_dim, out_features=output_dim,
18
- bias=bias, device=self.device, dtype=self.dtype)
15
+ self.multi_head_ffn_layers = nn.ModuleList([MultiHeadFeedForward(feature_dim=input_dim, num_heads=num_heads,
16
+ num_layers=kwargs.get('head_layers', 1),
17
+ expansion_factor=expansion_factor,
18
+ bias=bias, dropout_rate=dropout_rate,
19
+ device=self.device, dtype=self.dtype) for _ in range(num_layers)])
20
+ self.out_proj = nn.Linear(in_features=input_dim, out_features=output_dim,
21
+ bias=bias, device=self.device, dtype=self.dtype)
19
22
 
20
23
  @override
21
24
  def forward(self, x: torch.Tensor) -> torch.Tensor:
22
25
  x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
23
26
  residual = x
24
- x = self.ffn(x) + residual
25
- return self.proj(x)
27
+ for ffn in self.multi_head_ffn_layers:
28
+ x = ffn(x)
29
+ return self.out_proj(x + residual)
@@ -6,12 +6,12 @@ from deeplotx.nn.linear_regression import LinearRegression
6
6
 
7
7
 
8
8
  class LogisticRegression(LinearRegression):
9
- def __init__(self, input_dim: int, output_dim: int = 1, num_layers: int = 1, expansion_factor: int | float = 1.5,
10
- bias: bool = True, dropout_rate: float = 0.1, model_name: str | None = None,
11
- device: str | None = None, dtype: torch.dtype | None = None):
12
- super().__init__(input_dim=input_dim, output_dim=output_dim, num_layers=num_layers,
9
+ def __init__(self, input_dim: int, output_dim: int = 1, num_heads: int = 1, num_layers: int = 1,
10
+ expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
11
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
12
+ super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
13
13
  expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
14
- model_name=model_name, device=device, dtype=dtype)
14
+ model_name=model_name, device=device, dtype=dtype, **kwargs)
15
15
 
16
16
  @override
17
17
  def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -12,12 +12,11 @@ class LongContextRecursiveSequential(RecursiveSequential):
12
12
  def __init__(self, input_dim: int, output_dim: int, bias: bool = True,
13
13
  encoder_layers: int = 1, attn_heads: int = 1, recursive_layers: int = 2, recursive_hidden_dim: int | None = None,
14
14
  ffn_layers: int = 1, ffn_expansion_factor: int | float = 2, dropout_rate: float = 0.05,
15
- model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None,
16
- **kwargs):
15
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
17
16
  super().__init__(input_dim=input_dim, output_dim=output_dim, bias=bias,
18
17
  recursive_layers=recursive_layers, recursive_hidden_dim=recursive_hidden_dim,
19
18
  ffn_layers=ffn_layers, ffn_expansion_factor=ffn_expansion_factor, dropout_rate=dropout_rate,
20
- model_name=model_name, device=device, dtype=dtype)
19
+ model_name=model_name, device=device, dtype=dtype, **kwargs)
21
20
  self.roformer_encoders = nn.ModuleList([RoFormerEncoder(feature_dim=input_dim, attn_heads=attn_heads, bias=bias,
22
21
  ffn_layers=kwargs.get('encoder_ffn_layers', ffn_layers),
23
22
  ffn_expansion_factor=kwargs.get('encoder_expansion_factor', ffn_expansion_factor),
@@ -0,0 +1,32 @@
1
+ from typing_extensions import override
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
+ from deeplotx.nn.feed_forward import FeedForward
8
+
9
+
10
+ class MultiHeadFeedForward(BaseNeuralNetwork):
11
+ def __init__(self, feature_dim: int, num_heads: int = 1, num_layers: int = 1, expansion_factor: int | float = 2,
12
+ bias: bool = True, dropout_rate: float = 0.05, model_name: str | None = None,
13
+ device: str | None = None, dtype: torch.dtype | None = None):
14
+ super().__init__(in_features=feature_dim, out_features=feature_dim, model_name=model_name,
15
+ device=device, dtype=dtype)
16
+ self._num_heads = num_heads
17
+ self.expand_proj = nn.Linear(in_features=feature_dim, out_features=feature_dim * self._num_heads, bias=bias,
18
+ device=self.device, dtype=self.dtype)
19
+ self.ffn_heads = nn.ModuleList([FeedForward(feature_dim=feature_dim, num_layers=num_layers,
20
+ expansion_factor=expansion_factor, bias=bias,
21
+ dropout_rate=dropout_rate, device=self.device,
22
+ dtype=self.dtype) for _ in range(self._num_heads)])
23
+ self.out_proj = nn.Linear(in_features=feature_dim * self._num_heads, out_features=feature_dim, bias=bias,
24
+ device=self.device, dtype=self.dtype)
25
+
26
+ @override
27
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
28
+ x = self.ensure_device_and_dtype(x, device=self.device, dtype=self.dtype)
29
+ x = self.expand_proj(x)
30
+ x_heads = x.split(self.in_features, dim=-1)
31
+ head_outs = [self.ffn_heads[_](x_heads[_]) for _ in range(self._num_heads)]
32
+ return self.out_proj(torch.concat(head_outs, dim=-1))
@@ -4,14 +4,14 @@ import torch
4
4
  from torch import nn
5
5
 
6
6
  from deeplotx.nn.base_neural_network import BaseNeuralNetwork
7
- from deeplotx.nn.feed_forward import FeedForward
7
+ from deeplotx.nn.linear_regression import LinearRegression
8
8
 
9
9
 
10
10
  class RecursiveSequential(BaseNeuralNetwork):
11
11
  def __init__(self, input_dim: int, output_dim: int, bias: bool = True,
12
12
  recursive_layers: int = 1, recursive_hidden_dim: int | None = None,
13
13
  ffn_layers: int = 1, ffn_expansion_factor: int | float = 2, dropout_rate: float = 0.05,
14
- model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None):
14
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
15
15
  super().__init__(in_features=input_dim, out_features=output_dim, model_name=model_name,
16
16
  device=device, dtype=dtype)
17
17
  if recursive_hidden_dim is None:
@@ -20,11 +20,10 @@ class RecursiveSequential(BaseNeuralNetwork):
20
20
  num_layers=recursive_layers, batch_first=True,
21
21
  bias=True, bidirectional=True, device=self.device,
22
22
  dtype=self.dtype)
23
- self.ffn = FeedForward(feature_dim=recursive_hidden_dim * 2, num_layers=ffn_layers,
24
- expansion_factor=ffn_expansion_factor, bias=bias, dropout_rate=dropout_rate,
25
- device=self.device, dtype=self.dtype)
26
- self.__proj = nn.Linear(in_features=recursive_hidden_dim * 2, out_features=output_dim, bias=bias,
27
- device=self.device, dtype=self.dtype)
23
+ self.out_proj = LinearRegression(input_dim=recursive_hidden_dim * 2, output_dim=output_dim,
24
+ num_heads=kwargs.get('ffn_heads', 1), head_layers=kwargs.get('ffn_head_layers', 1),
25
+ num_layers=ffn_layers, expansion_factor=ffn_expansion_factor,
26
+ bias=bias, dropout_rate=dropout_rate, device=self.device, dtype=self.dtype)
28
27
 
29
28
  def initial_state(self, batch_size: int = 1) -> tuple[torch.Tensor, torch.Tensor]:
30
29
  zeros = torch.zeros(self.lstm.num_layers * 2, batch_size, self.lstm.hidden_size, device=self.device, dtype=self.dtype)
@@ -37,9 +36,7 @@ class RecursiveSequential(BaseNeuralNetwork):
37
36
  self.ensure_device_and_dtype(state[1], device=self.device, dtype=self.dtype))
38
37
  x, (hidden_state, cell_state) = self.lstm(x, state)
39
38
  x = x[:, -1, :]
40
- residual = x
41
- x = self.ffn(x) + residual
42
- x = self.__proj(x)
39
+ x = self.out_proj(x)
43
40
  return x, (hidden_state, cell_state)
44
41
 
45
42
  @override
@@ -27,8 +27,8 @@ class RoFormerEncoder(BaseNeuralNetwork):
27
27
  device=self.device, dtype=self.dtype)
28
28
  self.layer_norm = nn.LayerNorm(normalized_shape=feature_dim, eps=1e-9,
29
29
  device=self.device, dtype=self.dtype)
30
- self.__proj = nn.Linear(in_features=feature_dim * 2, out_features=feature_dim,
31
- bias=bias, device=self.device, dtype=self.dtype)
30
+ self.out_proj = nn.Linear(in_features=feature_dim * 2, out_features=feature_dim,
31
+ bias=bias, device=self.device, dtype=self.dtype)
32
32
 
33
33
  @override
34
34
  def forward(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
@@ -37,4 +37,4 @@ class RoFormerEncoder(BaseNeuralNetwork):
37
37
  mask = self.ensure_device_and_dtype(mask, device=self.device, dtype=self.dtype)
38
38
  attn = self.attn(x=self.layer_norm(x), y=None, mask=mask)
39
39
  x = torch.concat([attn, x], dim=-1)
40
- return self.__proj(self.ffn(x))
40
+ return self.out_proj(self.ffn(x))
@@ -6,12 +6,12 @@ from deeplotx.nn.linear_regression import LinearRegression
6
6
 
7
7
 
8
8
  class SoftmaxRegression(LinearRegression):
9
- def __init__(self, input_dim: int, output_dim: int, num_layers: int = 1, expansion_factor: int | float = 1.5,
10
- bias: bool = True, dropout_rate: float = 0.1, model_name: str | None = None,
11
- device: str | None = None, dtype: torch.dtype | None = None):
12
- super().__init__(input_dim=input_dim, output_dim=output_dim, num_layers=num_layers,
9
+ def __init__(self, input_dim: int, output_dim: int, num_heads: int = 1, num_layers: int = 1,
10
+ expansion_factor: int | float = 1.5, bias: bool = True, dropout_rate: float = 0.1,
11
+ model_name: str | None = None, device: str | None = None, dtype: torch.dtype | None = None, **kwargs):
12
+ super().__init__(input_dim=input_dim, output_dim=output_dim, num_heads=num_heads, num_layers=num_layers,
13
13
  expansion_factor=expansion_factor, bias=bias, dropout_rate=dropout_rate,
14
- model_name=model_name, device=device, dtype=dtype)
14
+ model_name=model_name, device=device, dtype=dtype, **kwargs)
15
15
 
16
16
  @override
17
17
  def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -49,6 +49,7 @@ class TextBinaryClassifierTrainer(BaseTrainer):
49
49
  logger.warning("The dimension of features doesn't match. A new model instance will be created.")
50
50
  self.model = None
51
51
  if self.model is None:
52
+ ffn_heads = kwargs.get('ffn_heads', 2)
52
53
  ffn_layers = kwargs.get('ffn_layers', 5)
53
54
  ffn_expansion_factor = kwargs.get('ffn_expansion_factor', 2)
54
55
  bias = kwargs.get('bias', True)
@@ -63,11 +64,11 @@ class TextBinaryClassifierTrainer(BaseTrainer):
63
64
  self.model = LongContextRecursiveSequential(input_dim=feature_dim, output_dim=1, bias=bias,
64
65
  encoder_layers=encoder_layers, attn_heads=attn_heads,
65
66
  recursive_layers=recursive_layers, recursive_hidden_dim=recursive_hidden_dim,
66
- ffn_layers=ffn_layers, ffn_expansion_factor=ffn_expansion_factor, dropout_rate=dropout_rate,
67
- encoder_ffn_layers=encoder_ffn_layers, encoder_expansion_factor=encoder_expansion_factor,
68
- encoder_dropout_rate=encoder_dropout_rate, attn_ffn_layers=attn_ffn_layers,
69
- attn_expansion_factor=attn_expansion_factor, attn_dropout_rate=attn_dropout_rate,
70
- theta=theta).initialize_weights()
67
+ ffn_layers=ffn_layers, ffn_heads=ffn_heads, ffn_expansion_factor=ffn_expansion_factor,
68
+ dropout_rate=dropout_rate, encoder_ffn_layers=encoder_ffn_layers,
69
+ encoder_expansion_factor=encoder_expansion_factor, encoder_dropout_rate=encoder_dropout_rate,
70
+ attn_ffn_layers=attn_ffn_layers, attn_expansion_factor=attn_expansion_factor,
71
+ attn_dropout_rate=attn_dropout_rate, theta=theta).initialize_weights()
71
72
  logger.debug(f'Training Model: \n{self.model}')
72
73
  loss_function = nn.BCELoss()
73
74
  optimizer = optim.Adamax(self.model.parameters(), lr=learning_rate)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deeplotx
3
- Version: 0.8.3
3
+ Version: 0.8.6
4
4
  Summary: Easy-2-use long text NLP toolkit.
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
@@ -168,6 +168,7 @@ Dynamic: license-file
168
168
  ```python
169
169
  from deeplotx import (
170
170
  FeedForward, # 前馈神经网络
171
+ MultiHeadFeedForward, # 多头前馈神经网络
171
172
  LinearRegression, # 线性回归
172
173
  LogisticRegression, # 逻辑回归 / 二分类 / 多标签分类
173
174
  SoftmaxRegression, # Softmax 回归 / 多分类
@@ -1,34 +1,35 @@
1
- deeplotx/__init__.py,sha256=oNeA-vNu5YGiEQg0IcpKEdGh_Y_2uPvo2nqaNL_Zgv8,1159
1
+ deeplotx/__init__.py,sha256=xEq8WQ2LpEZoLX_Z464d0dy4aemFGrEV6ZMJr6ioFnQ,1186
2
2
  deeplotx/encoder/__init__.py,sha256=BrsF5_4O-4pfihYF2wjExDOoAY-03kGJTH-Mhez4tsE,129
3
- deeplotx/encoder/encoder.py,sha256=oSBdA-MiwMKNfTFJWR-RdvNS0G0qfX-Qchwy4LuwB00,3985
3
+ deeplotx/encoder/encoder.py,sha256=tksTtmz9JRDSimCdhMkxpbGUHNWhARGaeKh2pBvLgEI,3988
4
4
  deeplotx/encoder/long_text_encoder.py,sha256=3ScdKDi65J5tdO8PFCXBjCzNUCLlJRwVhpDR0BrphG4,3951
5
5
  deeplotx/encoder/longformer_encoder.py,sha256=NNYLr5I9tdeh0C8Ir7QcbEMU9gDk6U7CiF3Tbg6NEsE,3372
6
- deeplotx/nn/__init__.py,sha256=01I_yqx9GTa4wy3uNyAqhtxp66tDqxgMLC4Ky5Vnkrg,651
6
+ deeplotx/nn/__init__.py,sha256=YILwbxb-NHdiJjfOwBKH8F7PuZSDZSrGpTznPDucTro,710
7
7
  deeplotx/nn/attention.py,sha256=R-i-Rd7gnsh6hwXDeYfqLQOJvfSZIGfQbFzRlC91XLo,2879
8
- deeplotx/nn/auto_regression.py,sha256=uISx29t_zkDGS8s2wvGB6wOGYZitQ4hQ7wyoQl4lcqY,857
8
+ deeplotx/nn/auto_regression.py,sha256=j_R7WGPq9REngjpLuX5c0AaNqOpgGm2Vfrolw-XjWXw,877
9
9
  deeplotx/nn/base_neural_network.py,sha256=FjQEDFH810fJS7JV3aLgJZnaMqC6DH--wlBvuj-ghTc,5900
10
10
  deeplotx/nn/feed_forward.py,sha256=4ozj7EDalO9pb6JUhZtsJqE0r8bIHFApHRt2zTrl4ho,2931
11
- deeplotx/nn/linear_regression.py,sha256=QybSRfsf9PpgJAWixvrSNn3OYRKJXpSZMfqdzpw-Kd8,1280
12
- deeplotx/nn/logistic_regression.py,sha256=WfgHVNGIvAYsX2iea2wRlLgfbubYWyZkBLYpnpwOiyU,937
11
+ deeplotx/nn/linear_regression.py,sha256=LWrrdAIw32KIT1bdr7q6HczdpEiCgb-R8BCNXGywMxE,1763
12
+ deeplotx/nn/logistic_regression.py,sha256=nipWD3ZPRub2Cx0rU2zxYQyG0COn3NJvew8b2gbJy24,998
13
13
  deeplotx/nn/long_context_auto_regression.py,sha256=uy0k_g8wEfMH5nd5HCfrHA8dgEsuWBA2x8U-g3h4vQc,1054
14
- deeplotx/nn/long_context_recursive_sequential.py,sha256=i7kUml9RV_mkLRJ114UHsj9Gxw7LzJVQ4z8-REHa8-w,2682
14
+ deeplotx/nn/long_context_recursive_sequential.py,sha256=pcZfnrIHBqbp2BssfUTS1klpuykZwowikfAIaOnvRUI,2674
15
15
  deeplotx/nn/multi_head_attention.py,sha256=3z73uGbvy3jszRy1B9nxGOJjlttHpcpRF8Qd09OEams,2267
16
- deeplotx/nn/recursive_sequential.py,sha256=8Z8vT70xTygusL-3w3QlB_B_k0xQSUU2ZTgC1LhEmzQ,2805
17
- deeplotx/nn/roformer_encoder.py,sha256=UJjKniNdMd0rfoYQcsX6bPo6Ceq_Z6EhwHe2kgqWC_k,2426
16
+ deeplotx/nn/multi_head_feed_forward.py,sha256=hD9ScrVJZ9kNksoFASf0xaPgEnNgCeRivW-XjYOPjj8,1908
17
+ deeplotx/nn/recursive_sequential.py,sha256=Nrnsx-AU68tz1vn8_uf5ZdC-r8vA_X4-p-DY2t8y8us,2768
18
+ deeplotx/nn/roformer_encoder.py,sha256=BAPAMS5-qiM3i2FUyIW-ZTc7og4gZzwlu5LniqzaymY,2432
18
19
  deeplotx/nn/rope.py,sha256=RTOjnllubktdy2rzFWxBfkuLuGjhEMyDd06uojdqPhM,1848
19
- deeplotx/nn/softmax_regression.py,sha256=PN_1Zr_B_z5zYC_s_8k6c5fllOtxfJEvVvCmC9GRmx0,958
20
+ deeplotx/nn/softmax_regression.py,sha256=xe2etxSfN0e9XZ4E6Uyz5ThWWzAdQVjYIvN24j8kfNY,1019
20
21
  deeplotx/similarity/__init__.py,sha256=s3u-KSgxjnMcWpIItKgXNltFMPQ7YY3CqsqHI-5F1c8,724
21
22
  deeplotx/similarity/distribution.py,sha256=wQGouuuW531pZeBRKBujXsdsoz4fDnPw7_GW81jwepc,1066
22
23
  deeplotx/similarity/set.py,sha256=zhGFxtSIXlWqvipBYzoiPahp4g0boAIoUiMfG0wl07A,686
23
24
  deeplotx/similarity/vector.py,sha256=WVbDHqykt-fvuILVrhUCtIFAOEjY_zvttrXGM9eylG0,1125
24
25
  deeplotx/trainer/__init__.py,sha256=Fl5DR9UecQc5VtBcczU9sx_HtPNoFohpuELOh-Jrsks,77
25
26
  deeplotx/trainer/base_trainer.py,sha256=z0MeAT-rRYmjeBXt0ckt7J1itYArR0Cx02wHesXUoZE,385
26
- deeplotx/trainer/text_binary_classification_trainer.py,sha256=QMLR4cC8NCUP-v7SOYVtCykNwahENmWHv9adaeTbYmA,6528
27
+ deeplotx/trainer/text_binary_classification_trainer.py,sha256=TFxOX8rWU_zKliI9zm7F5ZH7snR2d-sk95s3pfTmm78,6601
27
28
  deeplotx/util/__init__.py,sha256=5CH4MTeSgsmCe3LPMfvKoSBpwh6jDSBuHVElJvzQzgs,90
28
29
  deeplotx/util/hash.py,sha256=qbNU3RLBWGQYFVte9WZBAkZ1BkdjCXiKLDaKPN54KFk,662
29
30
  deeplotx/util/read_file.py,sha256=ptzouvEQeeW8KU5BrWNJlXw-vFXVrpS9SkAUxsu6A8A,612
30
- deeplotx-0.8.3.dist-info/licenses/LICENSE,sha256=IwGE9guuL-ryRPEKi6wFPI_zOhg7zDZbTYuHbSt_SAk,35823
31
- deeplotx-0.8.3.dist-info/METADATA,sha256=Lif2B7wUDIQQKWvUt_Vl_XYPlMf_EhskiQcq8ZYv6TQ,13079
32
- deeplotx-0.8.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
33
- deeplotx-0.8.3.dist-info/top_level.txt,sha256=hKg4pVDXZ-WWxkRfJFczRIll1Sv7VyfKCmzHLXbuh1U,9
34
- deeplotx-0.8.3.dist-info/RECORD,,
31
+ deeplotx-0.8.6.dist-info/licenses/LICENSE,sha256=IwGE9guuL-ryRPEKi6wFPI_zOhg7zDZbTYuHbSt_SAk,35823
32
+ deeplotx-0.8.6.dist-info/METADATA,sha256=9cUvV_kD2TMFotnw51j1hXvGqjm8MBAfm7nJG62174I,13138
33
+ deeplotx-0.8.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
+ deeplotx-0.8.6.dist-info/top_level.txt,sha256=hKg4pVDXZ-WWxkRfJFczRIll1Sv7VyfKCmzHLXbuh1U,9
35
+ deeplotx-0.8.6.dist-info/RECORD,,