rxnn 0.2.35__tar.gz → 0.2.36__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {rxnn-0.2.35 → rxnn-0.2.36}/PKG-INFO +1 -1
  2. {rxnn-0.2.35 → rxnn-0.2.36}/pyproject.toml +1 -1
  3. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/rl.py +3 -3
  4. {rxnn-0.2.35 → rxnn-0.2.36}/LICENSE +0 -0
  5. {rxnn-0.2.35 → rxnn-0.2.36}/README.md +0 -0
  6. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/.DS_Store +0 -0
  7. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/experimental/attention.py +0 -0
  10. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/experimental/models.py +0 -0
  11. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/experimental/moe.py +0 -0
  12. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/memory/__init__.py +0 -0
  13. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/memory/attention.py +0 -0
  14. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/memory/norm.py +0 -0
  15. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/memory/stm.py +0 -0
  16. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/rxt/__init__.py +0 -0
  17. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/rxt/models.py +0 -0
  18. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/__init__.py +0 -0
  19. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/base.py +0 -0
  20. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/bml.py +0 -0
  21. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/callbacks.py +0 -0
  22. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/dataset.py +0 -0
  23. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/ddp.py +0 -0
  24. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/models.py +0 -0
  25. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/mrl.py +0 -0
  26. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/reward.py +0 -0
  27. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/scheduler.py +0 -0
  28. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/tokenizer.py +0 -0
  29. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/training/utils.py +0 -0
  30. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/__init__.py +0 -0
  31. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/attention.py +0 -0
  32. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/ff.py +0 -0
  33. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/layers.py +0 -0
  34. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/mask.py +0 -0
  35. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/models.py +0 -0
  36. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/moe.py +0 -0
  37. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/positional.py +0 -0
  38. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/transformers/sampler.py +0 -0
  39. {rxnn-0.2.35 → rxnn-0.2.36}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.2.35
3
+ Version: 0.2.36
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.2.35"
7
+ version = "0.2.36"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -107,12 +107,12 @@ class PPOAlgorithm(RlAlgorithm):
107
107
  return policy_loss
108
108
 
109
109
  def _compute_gae(self, rewards: torch.Tensor, values: torch.Tensor,
110
- last_value: torch.Tensor, dones: torch.Tensor):
110
+ last_value: torch.Tensor, dones: torch.Tensor, last_done: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
111
111
  trajectory_len, batch_size = rewards.shape
112
112
  advantages = torch.zeros_like(rewards, device=rewards.device)
113
113
  last_advantage = 0
114
114
  next_value = last_value
115
- next_done = torch.zeros(batch_size, device=dones.device) # Last state is terminal
115
+ next_done = last_done
116
116
  dones = dones.float()
117
117
 
118
118
  for t in reversed(range(trajectory_len)):
@@ -131,7 +131,7 @@ class PPOAlgorithm(RlAlgorithm):
131
131
  return advantages, returns
132
132
 
133
133
  def calculate_advantages(self, rewards: torch.Tensor, values: torch.Tensor, dones: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
134
- advantages, ref_values = self._compute_gae(rewards[:-1], values[:-1], values[-1], dones[:-1])
134
+ advantages, ref_values = self._compute_gae(rewards[:-1], values[:-1], values[-1], dones[:-1], dones[-1])
135
135
  if self.use_distributed_advantage_norm:
136
136
  mean_advantage = distributed_mean(advantages.mean())
137
137
  std_advantage = distributed_mean(advantages.std())
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes