rxnn 0.2.59__py3-none-any.whl → 0.2.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rxnn/memory/attention.py +0 -1
- rxnn/transformers/models.py +0 -1
- {rxnn-0.2.59.dist-info → rxnn-0.2.60.dist-info}/METADATA +1 -1
- {rxnn-0.2.59.dist-info → rxnn-0.2.60.dist-info}/RECORD +6 -6
- {rxnn-0.2.59.dist-info → rxnn-0.2.60.dist-info}/LICENSE +0 -0
- {rxnn-0.2.59.dist-info → rxnn-0.2.60.dist-info}/WHEEL +0 -0
rxnn/memory/attention.py
CHANGED
@@ -49,7 +49,6 @@ class StmMemoryAttention(nn.Module):
|
|
49
49
|
|
50
50
|
def forward(self, x: torch.Tensor, attention_mask: torch.Tensor = None) -> torch.Tensor:
|
51
51
|
if attention_mask is not None:
|
52
|
-
print(attention_mask.size())
|
53
52
|
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1).bool()
|
54
53
|
new_stm = torch.zeros_like(self.stm.memory)
|
55
54
|
for i in range(self.num_layers):
|
rxnn/transformers/models.py
CHANGED
@@ -108,7 +108,6 @@ class ReactiveTransformerEncoder(ReactiveTransformerBase):
|
|
108
108
|
def forward(self, x: torch.Tensor, attention_mask: torch.Tensor = None) -> tuple[torch.Tensor, torch.Tensor]:
|
109
109
|
x = super().forward(x) # apply embeddings
|
110
110
|
if attention_mask is not None:
|
111
|
-
print(attention_mask.size())
|
112
111
|
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1).bool()
|
113
112
|
|
114
113
|
hidden_states = []
|
@@ -5,7 +5,7 @@ rxnn/experimental/attention.py,sha256=jlNS82INjycNEfmk3HtkIacUvT_ELhaCO2g-kZTvhX
|
|
5
5
|
rxnn/experimental/models.py,sha256=oJWd56LUsLc9S8eCZw-ShvuWjoQxj4C9GitbohlQ0ok,5139
|
6
6
|
rxnn/experimental/moe.py,sha256=jHZ1QhpWiVQOswVpFmuH7b2IUOPf0Uuf-I2Ddwsd7Us,6140
|
7
7
|
rxnn/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
|
-
rxnn/memory/attention.py,sha256=
|
8
|
+
rxnn/memory/attention.py,sha256=t-SWJhQ71TV8X_8I_yp0Cr5df7fnWxI-EnYiN8gjpok,3268
|
9
9
|
rxnn/memory/norm.py,sha256=cVjjhCLqR5K6-321SP_ObG17y-ddlcTJeCTXvW4vpk0,6675
|
10
10
|
rxnn/memory/stm.py,sha256=jv57gsH9XW19sLbxpRDqsp1yfsii_4Ef4Ncr_ztk-i4,3937
|
11
11
|
rxnn/rxt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -28,12 +28,12 @@ rxnn/transformers/attention.py,sha256=KRnKT6XUqAXElxV9y72mSpdTeiMgCKCCLqqxCFNTHm
|
|
28
28
|
rxnn/transformers/ff.py,sha256=WDjO-H9XWInoWnUnxiseIH6Kx5GlHP0zGJygwhcb1gc,2589
|
29
29
|
rxnn/transformers/layers.py,sha256=OlbqD5kKygn5WZziLbU3jZjhr8hBrxLpqlCjJ_BNCW0,8119
|
30
30
|
rxnn/transformers/mask.py,sha256=J0cfLVLt3SzS2ra3KcY4khrkhI975Dw4CjpUi3Sn25s,419
|
31
|
-
rxnn/transformers/models.py,sha256=
|
31
|
+
rxnn/transformers/models.py,sha256=tT0W5inG4EtjEHNutG77Wcws2fJzLJs-iFDP3hX3D2Q,10761
|
32
32
|
rxnn/transformers/moe.py,sha256=j6jEx6Ip0zttlUZKKn82azxo95lkLZs-H2GLSMD88hY,5859
|
33
33
|
rxnn/transformers/positional.py,sha256=1PjcJybUzeQlIKJI4tahAGZcYgCRCL0otxs7mpsNuzM,4410
|
34
34
|
rxnn/transformers/sampler.py,sha256=t6iiQTdLQ0TakUWnnhKkb5DKF2F_9-thXHBydDF3fxg,17389
|
35
35
|
rxnn/utils.py,sha256=ihb6OTyDtPiocB_lOvnq7eOkjjpCkgs8wxvXUBNQ7mM,996
|
36
|
-
rxnn-0.2.
|
37
|
-
rxnn-0.2.
|
38
|
-
rxnn-0.2.
|
39
|
-
rxnn-0.2.
|
36
|
+
rxnn-0.2.60.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
|
37
|
+
rxnn-0.2.60.dist-info/METADATA,sha256=x_juLxld_xGztBqC7bbBWTn4llwNuZtu29xyjz4uiX8,25997
|
38
|
+
rxnn-0.2.60.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
39
|
+
rxnn-0.2.60.dist-info/RECORD,,
|
File without changes
|
File without changes
|