rxnn 0.2.35__py3-none-any.whl → 0.2.37__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rxnn/training/rl.py
CHANGED
@@ -107,12 +107,12 @@ class PPOAlgorithm(RlAlgorithm):
|
|
107
107
|
return policy_loss
|
108
108
|
|
109
109
|
def _compute_gae(self, rewards: torch.Tensor, values: torch.Tensor,
|
110
|
-
last_value: torch.Tensor, dones: torch.Tensor):
|
110
|
+
last_value: torch.Tensor, dones: torch.Tensor, last_done: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
111
111
|
trajectory_len, batch_size = rewards.shape
|
112
112
|
advantages = torch.zeros_like(rewards, device=rewards.device)
|
113
113
|
last_advantage = 0
|
114
114
|
next_value = last_value
|
115
|
-
next_done =
|
115
|
+
next_done = last_done.float()
|
116
116
|
dones = dones.float()
|
117
117
|
|
118
118
|
for t in reversed(range(trajectory_len)):
|
@@ -131,7 +131,7 @@ class PPOAlgorithm(RlAlgorithm):
|
|
131
131
|
return advantages, returns
|
132
132
|
|
133
133
|
def calculate_advantages(self, rewards: torch.Tensor, values: torch.Tensor, dones: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
134
|
-
advantages, ref_values = self._compute_gae(rewards[:-1], values[:-1], values[-1], dones[:-1])
|
134
|
+
advantages, ref_values = self._compute_gae(rewards[:-1], values[:-1], values[-1], dones[:-1], dones[-1])
|
135
135
|
if self.use_distributed_advantage_norm:
|
136
136
|
mean_advantage = distributed_mean(advantages.mean())
|
137
137
|
std_advantage = distributed_mean(advantages.std())
|
@@ -19,7 +19,7 @@ rxnn/training/ddp.py,sha256=VsNBjn3cY-uUj8hbsW7oKvb0_ZKnXnJ2KgObm-Mr9i4,836
|
|
19
19
|
rxnn/training/models.py,sha256=y-9XHedSheyK1AmLBp3ayulnUvAmDuJ3t0qVg8wHBRg,7463
|
20
20
|
rxnn/training/mrl.py,sha256=fIrg1Er0aAK4TnyDRmJC1m7az9wdkhikxv0CBCrGT-c,55868
|
21
21
|
rxnn/training/reward.py,sha256=B7nerPk9eNAv2i7umtNF88tVQVwijNNrchIrEITGHKk,11623
|
22
|
-
rxnn/training/rl.py,sha256=
|
22
|
+
rxnn/training/rl.py,sha256=47wxFeUSHSqc1dKEEy8skTcNHDqNuthsYTGA-HeUbhg,5982
|
23
23
|
rxnn/training/scheduler.py,sha256=LcjU35mEwz2U5x3U6tLfeeYlBqMxbFSxYzJYuXkWbSY,1408
|
24
24
|
rxnn/training/tokenizer.py,sha256=umaLByMBx_NMrQElA45HLm9gkuzyKWDTFaKVd-CjXl0,8344
|
25
25
|
rxnn/training/utils.py,sha256=Bw8nZLKIt7NQpUVCYkb_79kWKChVFOYgYXwODo4SvNc,5718
|
@@ -33,7 +33,7 @@ rxnn/transformers/moe.py,sha256=j6jEx6Ip0zttlUZKKn82azxo95lkLZs-H2GLSMD88hY,5859
|
|
33
33
|
rxnn/transformers/positional.py,sha256=1PjcJybUzeQlIKJI4tahAGZcYgCRCL0otxs7mpsNuzM,4410
|
34
34
|
rxnn/transformers/sampler.py,sha256=t6iiQTdLQ0TakUWnnhKkb5DKF2F_9-thXHBydDF3fxg,17389
|
35
35
|
rxnn/utils.py,sha256=ihb6OTyDtPiocB_lOvnq7eOkjjpCkgs8wxvXUBNQ7mM,996
|
36
|
-
rxnn-0.2.
|
37
|
-
rxnn-0.2.
|
38
|
-
rxnn-0.2.
|
39
|
-
rxnn-0.2.
|
36
|
+
rxnn-0.2.37.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
|
37
|
+
rxnn-0.2.37.dist-info/METADATA,sha256=GXBCyK-3ALJw6TpVk7rJ7Z2uyFSq8u8N-TYpmQaeUE8,25960
|
38
|
+
rxnn-0.2.37.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
39
|
+
rxnn-0.2.37.dist-info/RECORD,,
|
File without changes
|
File without changes
|