rxnn 0.2.59__tar.gz → 0.2.60__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {rxnn-0.2.59 → rxnn-0.2.60}/PKG-INFO +1 -1
  2. {rxnn-0.2.59 → rxnn-0.2.60}/pyproject.toml +1 -1
  3. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/memory/attention.py +0 -1
  4. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/models.py +0 -1
  5. {rxnn-0.2.59 → rxnn-0.2.60}/LICENSE +0 -0
  6. {rxnn-0.2.59 → rxnn-0.2.60}/README.md +0 -0
  7. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/.DS_Store +0 -0
  8. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/__init__.py +0 -0
  9. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/experimental/__init__.py +0 -0
  10. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/experimental/attention.py +0 -0
  11. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/experimental/models.py +0 -0
  12. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/experimental/moe.py +0 -0
  13. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/memory/__init__.py +0 -0
  14. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/memory/norm.py +0 -0
  15. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/memory/stm.py +0 -0
  16. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/rxt/__init__.py +0 -0
  17. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/rxt/models.py +0 -0
  18. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/__init__.py +0 -0
  19. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/base.py +0 -0
  20. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/bml.py +0 -0
  21. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/callbacks.py +0 -0
  22. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/dataset.py +0 -0
  23. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/ddp.py +0 -0
  24. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/models.py +0 -0
  25. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/mrl.py +0 -0
  26. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/reward.py +0 -0
  27. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/rl.py +0 -0
  28. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/scheduler.py +0 -0
  29. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/tokenizer.py +0 -0
  30. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/training/utils.py +0 -0
  31. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/__init__.py +0 -0
  32. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/attention.py +0 -0
  33. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/ff.py +0 -0
  34. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/layers.py +0 -0
  35. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/mask.py +0 -0
  36. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/moe.py +0 -0
  37. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/positional.py +0 -0
  38. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/transformers/sampler.py +0 -0
  39. {rxnn-0.2.59 → rxnn-0.2.60}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.2.59
3
+ Version: 0.2.60
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.2.59"
7
+ version = "0.2.60"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -49,7 +49,6 @@ class StmMemoryAttention(nn.Module):
49
49
 
50
50
  def forward(self, x: torch.Tensor, attention_mask: torch.Tensor = None) -> torch.Tensor:
51
51
  if attention_mask is not None:
52
- print(attention_mask.size())
53
52
  attention_mask = attention_mask.unsqueeze(1).unsqueeze(1).bool()
54
53
  new_stm = torch.zeros_like(self.stm.memory)
55
54
  for i in range(self.num_layers):
@@ -108,7 +108,6 @@ class ReactiveTransformerEncoder(ReactiveTransformerBase):
108
108
  def forward(self, x: torch.Tensor, attention_mask: torch.Tensor = None) -> tuple[torch.Tensor, torch.Tensor]:
109
109
  x = super().forward(x) # apply embeddings
110
110
  if attention_mask is not None:
111
- print(attention_mask.size())
112
111
  attention_mask = attention_mask.unsqueeze(1).unsqueeze(1).bool()
113
112
 
114
113
  hidden_states = []
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes