rxnn 0.1.40__tar.gz → 0.1.42__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.40 → rxnn-0.1.42}/PKG-INFO +1 -1
  2. {rxnn-0.1.40 → rxnn-0.1.42}/pyproject.toml +1 -1
  3. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/experimental/attention.py +4 -3
  4. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/attention.py +4 -0
  5. {rxnn-0.1.40 → rxnn-0.1.42}/LICENSE +0 -0
  6. {rxnn-0.1.40 → rxnn-0.1.42}/README.md +0 -0
  7. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/experimental/models.py +0 -0
  10. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/experimental/moe.py +0 -0
  11. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/memory/__init__.py +0 -0
  12. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/memory/norm.py +0 -0
  13. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/memory/stm.py +0 -0
  14. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/rxt/__init__.py +0 -0
  15. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/rxt/models.py +0 -0
  16. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/training/base.py +0 -0
  18. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/training/bml.py +0 -0
  19. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/training/callbacks.py +0 -0
  20. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/training/dataset.py +0 -0
  21. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/training/scheduler.py +0 -0
  22. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/training/tokenizer.py +0 -0
  23. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/__init__.py +0 -0
  24. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.40 → rxnn-0.1.42}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.40
3
+ Version: 0.1.42
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.40"
7
+ version = "0.1.42"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -90,6 +90,7 @@ class GroupedMoeAttention(GroupedQueryAttention):
90
90
 
91
91
  # Key/Value MoE routing
92
92
  B, S, D = key.shape
93
+ print('key/value type', key.dtype, value.dtype)
93
94
  key_flat = key.reshape(-1, D)
94
95
  print('key flat type', key_flat.dtype)
95
96
  weights, indices = self.router(key_flat) # (B*S, num_groups), (B*S, num_groups)
@@ -127,8 +128,8 @@ class GroupedMoeAttention(GroupedQueryAttention):
127
128
  print(selected_v.size(), selected_v.dtype)
128
129
 
129
130
  # Weighted
130
- weighted_k = selected_k * weights # [B, S, num_groups, head_dim]
131
- weighted_v = selected_v * weights # [B, S, num_groups, head_dim]
131
+ weighted_k = (selected_k * weights).to(selected_k.device, dtype=selected_k.dtype) # [B, S, num_groups, head_dim]
132
+ weighted_v = (selected_v * weights).to(selected_k.device, dtype=selected_k.dtype) # [B, S, num_groups, head_dim]
132
133
 
133
134
  print('weighted')
134
135
  print(weighted_k.size(), weighted_k.dtype)
@@ -252,7 +253,7 @@ class DeepMoeAttention(GroupedMoeAttention):
252
253
  selected_q = torch.gather(q_all, 2, expanded_indices) # [B, T, num_query_groups, head_dim]
253
254
 
254
255
  # Weighted sum
255
- q = selected_q * weights_q # [B, T, num_query_groups, head_dim]
256
+ q = (selected_q * weights_q).to(selected_q.device, dtype=selected_q.dtype) # [B, T, num_query_groups, head_dim]
256
257
  q = q.view(B, T, self.num_query_groups, -1).permute(0, 2, 1, 3) # [B, num_query_groups, T, head_dim]
257
258
 
258
259
  return super()._forward_qkv(q, key, value, b, t, d, skip_query_processing=True)
@@ -137,6 +137,10 @@ class MultiHeadAttention(nn.Module):
137
137
  return self._calculate_output(attn_weights, v, b, t, d)
138
138
 
139
139
  def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor = None):
140
+ print('MHA forward')
141
+ print(query.size(), query.dtype)
142
+ print(key.size(), key.dtype)
143
+ print(value.size(), value.dtype)
140
144
  b, t, d = query.size()
141
145
  q, k, v = self._forward_qkv(query, key, value, b, t, d)
142
146
  if not self.rel_embed:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes