rxnn 0.1.52__tar.gz → 0.1.53__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.52 → rxnn-0.1.53}/PKG-INFO +1 -1
  2. {rxnn-0.1.52 → rxnn-0.1.53}/pyproject.toml +1 -1
  3. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/moe.py +6 -5
  4. {rxnn-0.1.52 → rxnn-0.1.53}/LICENSE +0 -0
  5. {rxnn-0.1.52 → rxnn-0.1.53}/README.md +0 -0
  6. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/__init__.py +0 -0
  7. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/experimental/__init__.py +0 -0
  8. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/experimental/attention.py +0 -0
  9. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/experimental/models.py +0 -0
  10. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/experimental/moe.py +0 -0
  11. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/memory/__init__.py +0 -0
  12. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/memory/norm.py +0 -0
  13. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/memory/stm.py +0 -0
  14. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/rxt/__init__.py +0 -0
  15. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/rxt/models.py +0 -0
  16. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/training/base.py +0 -0
  18. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/training/bml.py +0 -0
  19. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/training/callbacks.py +0 -0
  20. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/training/dataset.py +0 -0
  21. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/training/scheduler.py +0 -0
  22. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/training/tokenizer.py +0 -0
  23. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/__init__.py +0 -0
  24. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/attention.py +0 -0
  25. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/ff.py +0 -0
  26. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/layers.py +0 -0
  27. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/mask.py +0 -0
  28. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/models.py +0 -0
  29. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.52 → rxnn-0.1.53}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.52
3
+ Version: 0.1.53
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.52"
7
+ version = "0.1.53"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -23,20 +23,21 @@ class MoeRouter(nn.Module):
23
23
 
24
24
  def calculate_aux_loss(self, top_k_indices: torch.Tensor, probs: torch.Tensor) -> torch.Tensor:
25
25
  # Get shapes
26
- B, S, K = top_k_indices.shape # Batch, Sequence length, Top-K
26
+ T, K = top_k_indices.shape # Batch, Sequence length, Top-K
27
27
 
28
28
  # 1. Compute expert selection mask (one-hot encoded)
29
29
  expert_mask = F.one_hot(top_k_indices, self.num_experts).float() # (B, S, K, E)
30
30
 
31
31
  # 2. Total number of times each expert is selected
32
- expert_usage = expert_mask.sum(dim=(0, 1, 2)) # (E,)
32
+ expert_usage = expert_mask.sum(dim=(0, 1)) # (E,)
33
33
 
34
34
  # 3. Fraction of tokens assigned to each expert
35
- total_tokens = B * S * K
36
- fraction_expert = expert_usage / total_tokens # (E,)
35
+ total_selections = T * K
36
+ fraction_expert = expert_usage / (total_selections + 1e-6) # (E,)
37
37
 
38
38
  # 4. Sum of probabilities for each expert's selected tokens
39
- sum_probs = (probs.unsqueeze(-1) * expert_mask).sum(dim=(0, 1, 2)) # (E,)
39
+ probs_expanded = probs.unsqueeze(1).expand(-1, K, -1) # (B_K, K, E)
40
+ sum_probs = (probs_expanded * expert_mask).sum(dim=(0, 1))
40
41
 
41
42
  # 5. Average probability per expert (avoid division by zero)
42
43
  avg_probs = sum_probs / expert_usage.clamp(min=1e-6) # (E,)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes