evolutionary-policy-optimization 0.0.40__py3-none-any.whl → 0.0.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutionary_policy_optimization/epo.py +13 -1
- {evolutionary_policy_optimization-0.0.40.dist-info → evolutionary_policy_optimization-0.0.41.dist-info}/METADATA +22 -1
- {evolutionary_policy_optimization-0.0.40.dist-info → evolutionary_policy_optimization-0.0.41.dist-info}/RECORD +5 -5
- {evolutionary_policy_optimization-0.0.40.dist-info → evolutionary_policy_optimization-0.0.41.dist-info}/WHEEL +0 -0
- {evolutionary_policy_optimization-0.0.40.dist-info → evolutionary_policy_optimization-0.0.41.dist-info}/licenses/LICENSE +0 -0
@@ -798,6 +798,10 @@ class Agent(Module):
|
|
798
798
|
|
799
799
|
latents = self.latent_gene_pool(latent_id = latent_gene_ids)
|
800
800
|
|
801
|
+
orig_latents = latents
|
802
|
+
latents = latents.detach()
|
803
|
+
latents.requires_grad_()
|
804
|
+
|
801
805
|
# learn actor
|
802
806
|
|
803
807
|
logits = self.actor(states, latents)
|
@@ -822,6 +826,14 @@ class Agent(Module):
|
|
822
826
|
self.critic_optim.step()
|
823
827
|
self.critic_optim.zero_grad()
|
824
828
|
|
829
|
+
# maybe update latents, if not frozen
|
830
|
+
|
831
|
+
if not self.latent_gene_pool.frozen_latents:
|
832
|
+
orig_latents.backward(latents.grad)
|
833
|
+
|
834
|
+
self.latent_optim.step()
|
835
|
+
self.latent_optim.zero_grad()
|
836
|
+
|
825
837
|
# apply evolution
|
826
838
|
|
827
839
|
self.latent_gene_pool.genetic_algorithm_step(fitness_scores)
|
@@ -951,7 +963,7 @@ class EPO(Module):
|
|
951
963
|
|
952
964
|
done = tensor(False)
|
953
965
|
|
954
|
-
while time < self.max_episode_length:
|
966
|
+
while time < self.max_episode_length and not done:
|
955
967
|
|
956
968
|
# sample action
|
957
969
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.41
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -162,4 +162,25 @@ agent.load('./agent.pt')
|
|
162
162
|
}
|
163
163
|
```
|
164
164
|
|
165
|
+
```bibtex
|
166
|
+
@inproceedings{Khadka2018EvolutionGuidedPG,
|
167
|
+
title = {Evolution-Guided Policy Gradient in Reinforcement Learning},
|
168
|
+
author = {Shauharda Khadka and Kagan Tumer},
|
169
|
+
booktitle = {Neural Information Processing Systems},
|
170
|
+
year = {2018},
|
171
|
+
url = {https://api.semanticscholar.org/CorpusID:53096951}
|
172
|
+
}
|
173
|
+
```
|
174
|
+
|
175
|
+
```bibtex
|
176
|
+
@article{Fortunato2017NoisyNF,
|
177
|
+
title = {Noisy Networks for Exploration},
|
178
|
+
author = {Meire Fortunato and Mohammad Gheshlaghi Azar and Bilal Piot and Jacob Menick and Ian Osband and Alex Graves and Vlad Mnih and R{\'e}mi Munos and Demis Hassabis and Olivier Pietquin and Charles Blundell and Shane Legg},
|
179
|
+
journal = {ArXiv},
|
180
|
+
year = {2017},
|
181
|
+
volume = {abs/1706.10295},
|
182
|
+
url = {https://api.semanticscholar.org/CorpusID:5176587}
|
183
|
+
}
|
184
|
+
```
|
185
|
+
|
165
186
|
*Evolution is cleverer than you are.* - Leslie Orgel
|
@@ -1,8 +1,8 @@
|
|
1
1
|
evolutionary_policy_optimization/__init__.py,sha256=0q0aBuFgWi06MLMD8FiHzBYQ3_W4LYWrwmCtF3u5H2A,201
|
2
|
-
evolutionary_policy_optimization/epo.py,sha256=
|
2
|
+
evolutionary_policy_optimization/epo.py,sha256=GL3nH5crOj4y_Amu2BY0s95MJL7F2t-X085y40SgUK0,30260
|
3
3
|
evolutionary_policy_optimization/experimental.py,sha256=9FrJGviLESlYysHI3i83efT9g2ZB9ha4u3K9HXN98_w,1100
|
4
4
|
evolutionary_policy_optimization/mock_env.py,sha256=QqVPZVJtrvQmSDcnYDTob_A5sDwiUzGj6_tmo6BII5c,918
|
5
|
-
evolutionary_policy_optimization-0.0.
|
6
|
-
evolutionary_policy_optimization-0.0.
|
7
|
-
evolutionary_policy_optimization-0.0.
|
8
|
-
evolutionary_policy_optimization-0.0.
|
5
|
+
evolutionary_policy_optimization-0.0.41.dist-info/METADATA,sha256=TFKI2B2PeyU6pHwqmCu130k-U2Li_QmUkvVB39-4uDw,6213
|
6
|
+
evolutionary_policy_optimization-0.0.41.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
7
|
+
evolutionary_policy_optimization-0.0.41.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
8
|
+
evolutionary_policy_optimization-0.0.41.dist-info/RECORD,,
|
File without changes
|