rxnn 0.1.59__tar.gz → 0.1.61__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.59 → rxnn-0.1.61}/PKG-INFO +1 -1
  2. {rxnn-0.1.59 → rxnn-0.1.61}/pyproject.toml +1 -1
  3. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/attention.py +3 -0
  4. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/layers.py +3 -0
  5. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/models.py +4 -4
  6. {rxnn-0.1.59 → rxnn-0.1.61}/LICENSE +0 -0
  7. {rxnn-0.1.59 → rxnn-0.1.61}/README.md +0 -0
  8. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/__init__.py +0 -0
  9. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/experimental/__init__.py +0 -0
  10. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/experimental/attention.py +0 -0
  11. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/experimental/models.py +0 -0
  12. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/experimental/moe.py +0 -0
  13. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/memory/__init__.py +0 -0
  14. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/memory/norm.py +0 -0
  15. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/memory/stm.py +0 -0
  16. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/rxt/__init__.py +0 -0
  17. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/rxt/models.py +0 -0
  18. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/training/__init__.py +0 -0
  19. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/training/base.py +0 -0
  20. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/training/bml.py +0 -0
  21. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/training/callbacks.py +0 -0
  22. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/training/dataset.py +0 -0
  23. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/training/scheduler.py +0 -0
  24. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/training/tokenizer.py +0 -0
  25. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/__init__.py +0 -0
  26. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/ff.py +0 -0
  27. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/mask.py +0 -0
  28. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.59 → rxnn-0.1.61}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.59
3
+ Version: 0.1.61
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.59"
7
+ version = "0.1.61"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -137,6 +137,9 @@ class MultiHeadAttention(nn.Module):
137
137
  b, t, d = query.size()
138
138
  q, k, v = self._forward_qkv(query, key, value, b, t, d)
139
139
  if not self.rel_embed:
140
+ print('q', q.size())
141
+ print('k', k.size())
142
+ print('v', v.size())
140
143
  q, k = self._apply_rope(q, k)
141
144
  attn_output = self._calculate_attention(q, k, v, b, t, d, mask=mask)
142
145
  else:
@@ -86,6 +86,7 @@ class ReactiveTransformerLayer(nn.Module):
86
86
  residual = x
87
87
  if not self.use_post_norm:
88
88
  x = self.norm1(x)
89
+ print('self x', x.size())
89
90
  x = self.attention(x, x, x, mask=mask)
90
91
  x = residual + x
91
92
  if self.use_post_norm:
@@ -94,6 +95,8 @@ class ReactiveTransformerLayer(nn.Module):
94
95
  residual = x
95
96
  if not self.use_post_norm:
96
97
  x = self.norm2(x)
98
+ print('x', x.size())
99
+ print('STM', stm.size())
97
100
  x = self.memory_cross_attention(x, stm, stm)
98
101
  x = residual + x
99
102
  if self.use_post_norm:
@@ -72,11 +72,11 @@ class ReactiveTransformerDecoder(ReactiveTransformerBase):
72
72
  # Process shared layers
73
73
  if self.shared_layers is not None:
74
74
  for i in range(self.num_shared_layers):
75
- layer_stm = self.stm(i)
75
+ layer_stm = self.stm(i).expand(x.size(0), -1, -1)
76
76
  x = self.shared_layers[i](x, layer_stm, mask=mask)
77
77
  # Process own layers
78
78
  for i in range(self.num_own_layers):
79
- layer_stm = self.stm(i)
79
+ layer_stm = self.stm(i).expand(x.size(0), -1, -1)
80
80
  x = self.layers[i](x, layer_stm, mask=mask)
81
81
  return self.head(x)
82
82
 
@@ -93,12 +93,12 @@ class ReactiveTransformerEncoder(ReactiveTransformerBase):
93
93
  # Process shared layers
94
94
  if self.shared_layers is not None:
95
95
  for i in range(self.num_shared_layers):
96
- layer_stm = self.stm(i)
96
+ layer_stm = self.stm(i).expand(x.size(0), -1, -1)
97
97
  x = self.shared_layers[i](x, layer_stm, mask=attention_mask)
98
98
  hidden_states.append(x)
99
99
  # Process own layers
100
100
  for i in range(self.num_own_layers):
101
- layer_stm = self.stm(i)
101
+ layer_stm = self.stm(i).expand(x.size(0), -1, -1)
102
102
  x = self.layers[i](x, layer_stm, mask=attention_mask)
103
103
  hidden_states.append(x)
104
104
  return x, torch.stack(hidden_states)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes