rxnn 0.1.39__tar.gz → 0.1.41__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.39 → rxnn-0.1.41}/PKG-INFO +1 -1
  2. {rxnn-0.1.39 → rxnn-0.1.41}/pyproject.toml +1 -1
  3. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/experimental/attention.py +30 -4
  4. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/attention.py +4 -0
  5. {rxnn-0.1.39 → rxnn-0.1.41}/LICENSE +0 -0
  6. {rxnn-0.1.39 → rxnn-0.1.41}/README.md +0 -0
  7. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/experimental/models.py +0 -0
  10. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/experimental/moe.py +0 -0
  11. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/memory/__init__.py +0 -0
  12. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/memory/norm.py +0 -0
  13. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/memory/stm.py +0 -0
  14. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/rxt/__init__.py +0 -0
  15. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/rxt/models.py +0 -0
  16. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/training/base.py +0 -0
  18. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/training/bml.py +0 -0
  19. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/training/callbacks.py +0 -0
  20. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/training/dataset.py +0 -0
  21. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/training/scheduler.py +0 -0
  22. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/training/tokenizer.py +0 -0
  23. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/__init__.py +0 -0
  24. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.39 → rxnn-0.1.41}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.39
3
+ Version: 0.1.41
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.39"
7
+ version = "0.1.41"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -90,11 +90,13 @@ class GroupedMoeAttention(GroupedQueryAttention):
90
90
 
91
91
  # Key/Value MoE routing
92
92
  B, S, D = key.shape
93
+ print('key/value type', key.dtype, value.dtype)
93
94
  key_flat = key.reshape(-1, D)
95
+ print('key flat type', key_flat.dtype)
94
96
  weights, indices = self.router(key_flat) # (B*S, num_groups), (B*S, num_groups)
95
97
  weights = weights.view(B, S, self.num_groups, 1)
96
98
  indices = indices.view(B, S, self.num_groups)
97
-
99
+ print('weights/indices type', weights.dtype, indices.dtype)
98
100
  # Compute all experts' projections
99
101
  # Shape: (B*S, num_experts, head_dim)
100
102
  k_all = torch.einsum('bd,edh->beh', key_flat, self.wk) # [B*S, num_experts, head_dim]
@@ -104,24 +106,44 @@ class GroupedMoeAttention(GroupedQueryAttention):
104
106
  k_all += self.bk
105
107
  v_all += self.bv
106
108
 
109
+ print('k all/v all before get all')
110
+ print(k_all.size(), k_all.dtype)
111
+ print(v_all.size(), v_all.dtype)
112
+
107
113
  # Get results for all heads
108
114
  k_all = k_all.view(B, S, self.num_experts, -1) # [B, S, num_experts, head_dim]
109
115
  v_all = v_all.view(B, S, self.num_experts, -1) # [B, S, num_experts, head_dim]
110
116
 
117
+ print('k all/v all get all')
118
+ print(k_all.size(), k_all.dtype)
119
+ print(v_all.size(), v_all.dtype)
120
+
111
121
  # Gather top-k experts using expanded indices
112
122
  expanded_indices = indices.unsqueeze(-1).expand(-1, -1, -1, k_all.size(-1)) # [B, S, num_groups, head_dim]
113
123
  selected_k = torch.gather(k_all, 2, expanded_indices) # [B, S, num_groups, head_dim]
114
124
  selected_v = torch.gather(v_all, 2, expanded_indices) # [B, S, num_groups, head_dim]
115
125
 
126
+ print('selected k/selected v')
127
+ print(selected_k.size(), selected_k.dtype)
128
+ print(selected_v.size(), selected_v.dtype)
129
+
116
130
  # Weighted
117
131
  weighted_k = selected_k * weights # [B, S, num_groups, head_dim]
118
132
  weighted_v = selected_v * weights # [B, S, num_groups, head_dim]
119
133
 
134
+ print('weighted')
135
+ print(weighted_k.size(), weighted_k.dtype)
136
+ print(weighted_v.size(), weighted_v.dtype)
137
+
120
138
  # Reshape to GQA format
121
139
  k = weighted_k.view(B, S, self.num_groups, -1).permute(0, 2, 1, 3) # [B, num_groups, S, head_dim]
122
140
  v = weighted_v.view(B, S, self.num_groups, -1).permute(0, 2, 1, 3) # [B, num_groups, S, head_dim]
123
141
 
124
- if not self.rel_embed:
142
+ print('out 1')
143
+ print(k.size(), k.dtype)
144
+ print(v.size(), v.dtype)
145
+
146
+ if self.rel_embed:
125
147
  group_heads = self.num_heads // self.num_groups
126
148
 
127
149
  k = k.unsqueeze(2).expand(-1, -1, group_heads, -1, -1) # (B, G, group_heads, S, head_dim)
@@ -130,6 +152,10 @@ class GroupedMoeAttention(GroupedQueryAttention):
130
152
  k = k.flatten(start_dim=1, end_dim=2) # (B, H, S, head_dim)
131
153
  v = v.flatten(start_dim=1, end_dim=2) # (B, H, S, head_dim)
132
154
 
155
+ print('out 2')
156
+ print(k.size(), k.dtype)
157
+ print(v.size(), v.dtype)
158
+
133
159
  return q, k, v
134
160
 
135
161
 
@@ -220,10 +246,10 @@ class DeepMoeAttention(GroupedMoeAttention):
220
246
  if self.use_bias:
221
247
  q_all += self.bq
222
248
 
223
- q_all = q_all.view(B, T, self.num_query_experts, -1)
249
+ q_all = q_all.view(B, T, self.num_query_experts, -1) # [B, T, num_query_experts, head_dim]
224
250
 
225
251
  # Gather top-k experts
226
- expanded_indices = indices_q.unsqueeze(-1).expand(-1, -1, -1, q_all.size(-1))
252
+ expanded_indices = indices_q.unsqueeze(-1).expand(-1, -1, -1, q_all.size(-1)) # [B, T, num_query_groups, head_dim]
227
253
  selected_q = torch.gather(q_all, 2, expanded_indices) # [B, T, num_query_groups, head_dim]
228
254
 
229
255
  # Weighted sum
@@ -137,6 +137,10 @@ class MultiHeadAttention(nn.Module):
137
137
  return self._calculate_output(attn_weights, v, b, t, d)
138
138
 
139
139
  def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor = None):
140
+ print('MHA forward')
141
+ print(query.size(), query.dtype)
142
+ print(key.size(), key.dtype)
143
+ print(value.size(), value.dtype)
140
144
  b, t, d = query.size()
141
145
  q, k, v = self._forward_qkv(query, key, value, b, t, d)
142
146
  if not self.rel_embed:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes