rxnn 0.2.0__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {rxnn-0.2.0 → rxnn-0.2.2}/PKG-INFO +1 -1
  2. {rxnn-0.2.0 → rxnn-0.2.2}/pyproject.toml +1 -1
  3. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/memory/stm.py +3 -10
  4. {rxnn-0.2.0 → rxnn-0.2.2}/LICENSE +0 -0
  5. {rxnn-0.2.0 → rxnn-0.2.2}/README.md +0 -0
  6. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/.DS_Store +0 -0
  7. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/experimental/attention.py +0 -0
  10. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/experimental/models.py +0 -0
  11. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/experimental/moe.py +0 -0
  12. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/memory/__init__.py +0 -0
  13. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/memory/attention.py +0 -0
  14. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/memory/norm.py +0 -0
  15. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/rxt/__init__.py +0 -0
  16. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/rxt/models.py +0 -0
  17. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/__init__.py +0 -0
  18. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/base.py +0 -0
  19. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/bml.py +0 -0
  20. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/callbacks.py +0 -0
  21. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/dataset.py +0 -0
  22. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/models.py +0 -0
  23. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/mrl.py +0 -0
  24. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/reward.py +0 -0
  25. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/rl.py +0 -0
  26. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/scheduler.py +0 -0
  27. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/tokenizer.py +0 -0
  28. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/training/utils.py +0 -0
  29. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/__init__.py +0 -0
  30. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/attention.py +0 -0
  31. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/ff.py +0 -0
  32. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/layers.py +0 -0
  33. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/mask.py +0 -0
  34. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/models.py +0 -0
  35. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/moe.py +0 -0
  36. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/positional.py +0 -0
  37. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/transformers/sampler.py +0 -0
  38. {rxnn-0.2.0 → rxnn-0.2.2}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.2.0"
7
+ version = "0.2.2"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -5,7 +5,7 @@ class ShortTermMemory(nn.Module):
5
5
  """Short-term memory module for the Attention-based Memory System"""
6
6
 
7
7
  def __init__(self, num_layers: int, embed_dim: int, stm_size: int, init_type: str = 'normal',
8
- is_trainable: bool = False, legacy_init: bool = True, *args, **kwargs):
8
+ is_trainable: bool = False, *args, **kwargs):
9
9
  super(ShortTermMemory, self).__init__(*args, **kwargs)
10
10
  self.num_layers = num_layers
11
11
  self.embed_dim = embed_dim
@@ -20,13 +20,10 @@ class ShortTermMemory(nn.Module):
20
20
  self.memory = nn.Parameter(stm)
21
21
  else:
22
22
  self.register_buffer('memory', stm)
23
- # Legacy init - temporary option to load old models with not-batched STM (they will be loaded, updated and then the option will be removed)
24
- self.legacy_init = legacy_init
25
23
 
26
24
  def _init_tensor(self, init_type: str = None):
27
25
  init_type = init_type or self.init_type
28
- stm_shape = (self.num_layers, self.stm_size, self.embed_dim) \
29
- if self.legacy_init else (self.num_layers, self.batch_size, self.stm_size, self.embed_dim)
26
+ stm_shape = (self.num_layers, self.batch_size, self.stm_size, self.embed_dim)
30
27
  if init_type == 'normal':
31
28
  return torch.normal(0, 0.02, stm_shape)
32
29
  elif init_type == 'standard':
@@ -38,12 +35,8 @@ class ShortTermMemory(nn.Module):
38
35
  else:
39
36
  return torch.zeros(*stm_shape)
40
37
 
41
- def reset_legacy_(self):
42
- self.legacy_init = False
43
- self.memory = self._init_tensor()
44
-
45
38
  def forward(self, layer: int) -> torch.Tensor:
46
- return self.memory[layer].unsqueeze(0) if self.legacy_init else self.memory[layer]
39
+ return self.memory[layer]
47
40
 
48
41
  def update_layer(self, layer: int, new_stm: torch.Tensor):
49
42
  self.memory[layer] = new_stm
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes