rxnn 0.1.62__tar.gz → 0.1.64__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.62 → rxnn-0.1.64}/PKG-INFO +1 -1
  2. {rxnn-0.1.62 → rxnn-0.1.64}/pyproject.toml +1 -1
  3. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/rxt/models.py +17 -13
  4. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/training/base.py +6 -2
  5. {rxnn-0.1.62 → rxnn-0.1.64}/LICENSE +0 -0
  6. {rxnn-0.1.62 → rxnn-0.1.64}/README.md +0 -0
  7. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/experimental/attention.py +0 -0
  10. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/experimental/models.py +0 -0
  11. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/experimental/moe.py +0 -0
  12. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/memory/__init__.py +0 -0
  13. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/memory/norm.py +0 -0
  14. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/memory/stm.py +0 -0
  15. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/rxt/__init__.py +0 -0
  16. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/training/bml.py +0 -0
  18. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/training/callbacks.py +0 -0
  19. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/training/dataset.py +0 -0
  20. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/training/scheduler.py +0 -0
  21. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/training/tokenizer.py +0 -0
  22. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/__init__.py +0 -0
  23. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/attention.py +0 -0
  24. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.62 → rxnn-0.1.64}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.62
3
+ Version: 0.1.64
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.62"
7
+ version = "0.1.64"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -31,9 +31,11 @@ class RxTAlphaComponentConfig(TypedDict):
31
31
  moe_top_k: int
32
32
  self_att_type: str
33
33
  cross_att_type: str
34
- att_num_experts: int
35
- att_num_query_experts: int
36
- att_num_query_groups: int
34
+ att_experts: int
35
+ att_query_experts: int
36
+ att_query_groups: int
37
+ cross_att_groups: int
38
+ cross_att_query_groups: int
37
39
 
38
40
 
39
41
  class RxTAlphaComponentBase(nn.Module, PyTorchModelHubMixin):
@@ -61,9 +63,11 @@ class RxTAlphaComponentBase(nn.Module, PyTorchModelHubMixin):
61
63
  moe_top_k: int = 1,
62
64
  self_att_type: str = 'gqa',
63
65
  cross_att_type: str = 'mqa',
64
- att_num_experts: int = None,
65
- att_num_query_experts: int = None,
66
- att_num_query_groups: int = None,
66
+ att_experts: int = None,
67
+ att_query_experts: int = None,
68
+ att_query_groups: int = None,
69
+ cross_att_groups: int = None,
70
+ cross_att_query_groups: int = None,
67
71
  **kwargs
68
72
  ):
69
73
  super(RxTAlphaComponentBase, self).__init__(**kwargs)
@@ -86,20 +90,20 @@ class RxTAlphaComponentBase(nn.Module, PyTorchModelHubMixin):
86
90
  else:
87
91
  att_init = lambda: init_experimental_attention(embed_dim, att_heads, self_att_type, att_groups, rope=rope,
88
92
  use_flash_attention=use_flash_attention, dropout=att_dropout,
89
- max_seq_len=seq_len, is_causal=is_causal, num_experts=att_num_experts,
90
- num_query_experts=att_num_query_experts,
91
- num_query_groups=att_num_query_groups)
93
+ max_seq_len=seq_len, is_causal=is_causal, num_experts=att_experts,
94
+ num_query_experts=att_query_experts,
95
+ num_query_groups=att_query_groups)
92
96
 
93
97
  if cross_att_type in ['mha', 'gqa', 'mqa']:
94
98
  cross_att_init = lambda: init_attention(embed_dim, att_heads, cross_att_type, att_groups, rope=rope,
95
99
  use_flash_attention=use_flash_attention, dropout=att_dropout,
96
100
  max_seq_len=seq_len, is_causal=is_causal)
97
101
  else:
98
- cross_att_init = lambda: init_experimental_attention(embed_dim, att_heads, cross_att_type, att_groups, rope=rope,
102
+ cross_att_init = lambda: init_experimental_attention(embed_dim, att_heads, cross_att_type, cross_att_groups or att_groups, rope=rope,
99
103
  use_flash_attention=use_flash_attention, dropout=att_dropout,
100
- max_seq_len=seq_len, is_causal=is_causal, num_experts=att_num_experts,
101
- num_query_experts=att_num_query_experts,
102
- num_query_groups=att_num_query_groups)
104
+ max_seq_len=seq_len, is_causal=is_causal, num_experts=att_experts,
105
+ num_query_experts=att_query_experts,
106
+ num_query_groups=cross_att_query_groups or att_query_groups)
103
107
 
104
108
  layers = nn.ModuleList([
105
109
  ReactiveTransformerLayer(
@@ -113,7 +113,9 @@ class BaseTrainer(ABC):
113
113
 
114
114
  self.model.train()
115
115
  for epoch in range(self.current_epoch, self.current_epoch + epochs):
116
- if self.is_running:
116
+ if not self.is_running:
117
+ break
118
+ else:
117
119
  self.current_epoch = epoch
118
120
  self.epoch_steps = 0
119
121
  if train_sampler is not None:
@@ -143,7 +145,9 @@ class BaseTrainer(ABC):
143
145
  self.optimizer_step_count = 0
144
146
 
145
147
  for batch_idx, batch in enumerate(dataloader):
146
- if self.is_running:
148
+ if not self.is_running:
149
+ break
150
+ else:
147
151
  for callback in self.callbacks:
148
152
  callback.on_batch_start(self.model, batch_idx, batch)
149
153
  if self.get_batch_size(batch) == batch_size:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes