rxnn 0.1.61__tar.gz → 0.1.62__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.61 → rxnn-0.1.62}/PKG-INFO +1 -2
  2. {rxnn-0.1.61 → rxnn-0.1.62}/pyproject.toml +2 -2
  3. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/attention.py +0 -3
  4. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/layers.py +0 -3
  5. {rxnn-0.1.61 → rxnn-0.1.62}/LICENSE +0 -0
  6. {rxnn-0.1.61 → rxnn-0.1.62}/README.md +0 -0
  7. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/experimental/attention.py +0 -0
  10. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/experimental/models.py +0 -0
  11. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/experimental/moe.py +0 -0
  12. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/memory/__init__.py +0 -0
  13. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/memory/norm.py +0 -0
  14. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/memory/stm.py +0 -0
  15. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/rxt/__init__.py +0 -0
  16. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/rxt/models.py +0 -0
  17. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/training/__init__.py +0 -0
  18. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/training/base.py +0 -0
  19. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/training/bml.py +0 -0
  20. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/training/callbacks.py +0 -0
  21. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/training/dataset.py +0 -0
  22. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/training/scheduler.py +0 -0
  23. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/training/tokenizer.py +0 -0
  24. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/__init__.py +0 -0
  25. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/ff.py +0 -0
  26. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.61 → rxnn-0.1.62}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.61
3
+ Version: 0.1.62
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -14,7 +14,6 @@ Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Requires-Dist: datasets (>=3.5.0,<4.0.0)
17
- Requires-Dist: flash-attention (>=1.0.0,<2.0.0)
18
17
  Requires-Dist: huggingface-hub (>=0.30.0,<0.31.0)
19
18
  Requires-Dist: tensorboard (>=2.19.0,<3.0.0)
20
19
  Requires-Dist: tokenizers (>=0.21.0,<0.22.0)
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.61"
7
+ version = "0.1.62"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -30,4 +30,4 @@ datasets = "^3.5.0"
30
30
  tokenizers = "^0.21.0"
31
31
  huggingface-hub = "^0.30.0"
32
32
  tensorboard = "^2.19.0"
33
- flash-attention = "^1.0.0"
33
+
@@ -137,9 +137,6 @@ class MultiHeadAttention(nn.Module):
137
137
  b, t, d = query.size()
138
138
  q, k, v = self._forward_qkv(query, key, value, b, t, d)
139
139
  if not self.rel_embed:
140
- print('q', q.size())
141
- print('k', k.size())
142
- print('v', v.size())
143
140
  q, k = self._apply_rope(q, k)
144
141
  attn_output = self._calculate_attention(q, k, v, b, t, d, mask=mask)
145
142
  else:
@@ -86,7 +86,6 @@ class ReactiveTransformerLayer(nn.Module):
86
86
  residual = x
87
87
  if not self.use_post_norm:
88
88
  x = self.norm1(x)
89
- print('self x', x.size())
90
89
  x = self.attention(x, x, x, mask=mask)
91
90
  x = residual + x
92
91
  if self.use_post_norm:
@@ -95,8 +94,6 @@ class ReactiveTransformerLayer(nn.Module):
95
94
  residual = x
96
95
  if not self.use_post_norm:
97
96
  x = self.norm2(x)
98
- print('x', x.size())
99
- print('STM', stm.size())
100
97
  x = self.memory_cross_attention(x, stm, stm)
101
98
  x = residual + x
102
99
  if self.use_post_norm:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes