rxnn 0.1.58__tar.gz → 0.1.59__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.58 → rxnn-0.1.59}/PKG-INFO +1 -1
  2. {rxnn-0.1.58 → rxnn-0.1.59}/pyproject.toml +1 -1
  3. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/experimental/models.py +9 -5
  4. {rxnn-0.1.58 → rxnn-0.1.59}/LICENSE +0 -0
  5. {rxnn-0.1.58 → rxnn-0.1.59}/README.md +0 -0
  6. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/__init__.py +0 -0
  7. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/experimental/__init__.py +0 -0
  8. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/experimental/attention.py +0 -0
  9. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/experimental/moe.py +0 -0
  10. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/memory/__init__.py +0 -0
  11. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/memory/norm.py +0 -0
  12. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/memory/stm.py +0 -0
  13. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/rxt/__init__.py +0 -0
  14. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/rxt/models.py +0 -0
  15. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/training/__init__.py +0 -0
  16. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/training/base.py +0 -0
  17. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/training/bml.py +0 -0
  18. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/training/callbacks.py +0 -0
  19. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/training/dataset.py +0 -0
  20. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/training/scheduler.py +0 -0
  21. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/training/tokenizer.py +0 -0
  22. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/__init__.py +0 -0
  23. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/attention.py +0 -0
  24. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.58 → rxnn-0.1.59}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.58
3
+ Version: 0.1.59
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.58"
7
+ version = "0.1.59"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -11,7 +11,7 @@ from ..utils import get_model_size
11
11
  from .attention import init_experimental_attention
12
12
 
13
13
 
14
- class MoeAttentionTransformerConfig(TypedDict):
14
+ class ExperimentalAttentionTransformerConfig(TypedDict):
15
15
  num_layers: int
16
16
  vocab_size: int
17
17
  embed_dim: int
@@ -34,8 +34,12 @@ class MoeAttentionTransformerConfig(TypedDict):
34
34
  att_num_query_groups: int
35
35
 
36
36
 
37
- class MoeAttentionTransformer(nn.Module, PyTorchModelHubMixin, pipeline_tag="text-generation", license="apache-2.0"):
38
- """Research decoder model for experiments with Mixture-of-Experts Attention"""
37
+ class ExperimentalAttentionTransformer(nn.Module, PyTorchModelHubMixin, pipeline_tag="text-generation", license="apache-2.0"):
38
+ """
39
+ Research model for experiments with new attention layers.
40
+
41
+ Currently, accepts SparseQueryAttention, GroupedMoeAttention, DeepMoeAttention and standard variants (MHA/GQA/MQA) for reference models
42
+ """
39
43
 
40
44
  def __init__(
41
45
  self,
@@ -61,7 +65,7 @@ class MoeAttentionTransformer(nn.Module, PyTorchModelHubMixin, pipeline_tag="tex
61
65
  att_num_query_groups: int = None,
62
66
  **kwargs
63
67
  ):
64
- super(MoeAttentionTransformer, self).__init__(**kwargs)
68
+ super(ExperimentalAttentionTransformer, self).__init__(**kwargs)
65
69
  assert ff_activation in ['relu', 'gelu',
66
70
  'swish', 'silu', 'linear',
67
71
  'sigmoid'], 'Feed-forward activation could be "relu", "gelu", "swish", "silu", "linear", "sigmoid".'
@@ -83,7 +87,7 @@ class MoeAttentionTransformer(nn.Module, PyTorchModelHubMixin, pipeline_tag="tex
83
87
  num_query_experts=att_num_query_experts,
84
88
  num_query_groups=att_num_query_groups)
85
89
 
86
- use_moe_att = att_type in ['gma', 'dma', 'gma_s', 'dma_s']
90
+ use_moe_att = att_type in ['gma', 'dma']
87
91
 
88
92
  self.model = ClassicTransformerDecoder(
89
93
  embed_dim,
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes