rxnn 0.2.27__tar.gz → 0.2.29__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {rxnn-0.2.27 → rxnn-0.2.29}/PKG-INFO +1 -1
  2. {rxnn-0.2.27 → rxnn-0.2.29}/pyproject.toml +1 -1
  3. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/models.py +1 -2
  4. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/layers.py +2 -2
  5. {rxnn-0.2.27 → rxnn-0.2.29}/LICENSE +0 -0
  6. {rxnn-0.2.27 → rxnn-0.2.29}/README.md +0 -0
  7. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/.DS_Store +0 -0
  8. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/__init__.py +0 -0
  9. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/experimental/__init__.py +0 -0
  10. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/experimental/attention.py +0 -0
  11. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/experimental/models.py +0 -0
  12. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/experimental/moe.py +0 -0
  13. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/memory/__init__.py +0 -0
  14. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/memory/attention.py +0 -0
  15. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/memory/norm.py +0 -0
  16. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/memory/stm.py +0 -0
  17. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/rxt/__init__.py +0 -0
  18. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/rxt/models.py +0 -0
  19. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/__init__.py +0 -0
  20. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/base.py +0 -0
  21. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/bml.py +0 -0
  22. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/callbacks.py +0 -0
  23. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/dataset.py +0 -0
  24. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/mrl.py +0 -0
  25. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/reward.py +0 -0
  26. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/rl.py +0 -0
  27. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/scheduler.py +0 -0
  28. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/tokenizer.py +0 -0
  29. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/training/utils.py +0 -0
  30. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/__init__.py +0 -0
  31. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/attention.py +0 -0
  32. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/ff.py +0 -0
  33. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/mask.py +0 -0
  34. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/models.py +0 -0
  35. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/moe.py +0 -0
  36. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/positional.py +0 -0
  37. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/transformers/sampler.py +0 -0
  38. {rxnn-0.2.27 → rxnn-0.2.29}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.2.27
3
+ Version: 0.2.29
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.2.27"
7
+ version = "0.2.29"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -128,7 +128,7 @@ class MrlActorModel(nn.Module):
128
128
  return list(set(
129
129
  self.encoder.memory_parameters() +
130
130
  self.decoder.memory_parameters() +
131
- self.memory_attention.parameters()
131
+ list(self.memory_attention.parameters())
132
132
  ))
133
133
 
134
134
  def memory_cross_attention_parameters(self) -> list[nn.Parameter]:
@@ -140,7 +140,6 @@ class MrlActorModel(nn.Module):
140
140
  def memory_attention_parameters(self) -> Iterator[nn.Parameter]:
141
141
  return self.memory_attention.parameters()
142
142
 
143
-
144
143
  def not_memory_parameters(self) -> list[nn.Parameter]:
145
144
  return list(set(
146
145
  self.encoder.not_memory_parameters() +
@@ -68,8 +68,8 @@ class ReactiveTransformerLayer(nn.Module):
68
68
  return list(self.memory_cross_attention.parameters()) + list(self.norm2.parameters())
69
69
 
70
70
  def not_memory_parameters(self) -> list[nn.Parameter]:
71
- memory_params = self.memory_parameters()
72
- return [param for param in self.parameters() if param not in memory_params]
71
+ return (list(self.attention.parameters()) + list(self.norm1.parameters()) +
72
+ list(self.norm3.parameters()) + list(self.ff.parameters()))
73
73
 
74
74
  def update_max_len(self, max_seq_len: int):
75
75
  if self.attention.rope is not None:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes