evolutionary-policy-optimization 0.0.40__tar.gz → 0.0.42__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/PKG-INFO +22 -1
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/README.md +21 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/evolutionary_policy_optimization/epo.py +24 -5
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/pyproject.toml +1 -1
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/tests/test_epo.py +8 -2
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/.github/workflows/python-publish.yml +0 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/.github/workflows/test.yml +0 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/.gitignore +0 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/LICENSE +0 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/evolutionary_policy_optimization/__init__.py +0 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/evolutionary_policy_optimization/experimental.py +0 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/evolutionary_policy_optimization/mock_env.py +0 -0
- {evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/requirements.txt +0 -0
{evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.42
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -162,4 +162,25 @@ agent.load('./agent.pt')
|
|
162
162
|
}
|
163
163
|
```
|
164
164
|
|
165
|
+
```bibtex
|
166
|
+
@inproceedings{Khadka2018EvolutionGuidedPG,
|
167
|
+
title = {Evolution-Guided Policy Gradient in Reinforcement Learning},
|
168
|
+
author = {Shauharda Khadka and Kagan Tumer},
|
169
|
+
booktitle = {Neural Information Processing Systems},
|
170
|
+
year = {2018},
|
171
|
+
url = {https://api.semanticscholar.org/CorpusID:53096951}
|
172
|
+
}
|
173
|
+
```
|
174
|
+
|
175
|
+
```bibtex
|
176
|
+
@article{Fortunato2017NoisyNF,
|
177
|
+
title = {Noisy Networks for Exploration},
|
178
|
+
author = {Meire Fortunato and Mohammad Gheshlaghi Azar and Bilal Piot and Jacob Menick and Ian Osband and Alex Graves and Vlad Mnih and R{\'e}mi Munos and Demis Hassabis and Olivier Pietquin and Charles Blundell and Shane Legg},
|
179
|
+
journal = {ArXiv},
|
180
|
+
year = {2017},
|
181
|
+
volume = {abs/1706.10295},
|
182
|
+
url = {https://api.semanticscholar.org/CorpusID:5176587}
|
183
|
+
}
|
184
|
+
```
|
185
|
+
|
165
186
|
*Evolution is cleverer than you are.* - Leslie Orgel
|
{evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/README.md
RENAMED
@@ -110,4 +110,25 @@ agent.load('./agent.pt')
|
|
110
110
|
}
|
111
111
|
```
|
112
112
|
|
113
|
+
```bibtex
|
114
|
+
@inproceedings{Khadka2018EvolutionGuidedPG,
|
115
|
+
title = {Evolution-Guided Policy Gradient in Reinforcement Learning},
|
116
|
+
author = {Shauharda Khadka and Kagan Tumer},
|
117
|
+
booktitle = {Neural Information Processing Systems},
|
118
|
+
year = {2018},
|
119
|
+
url = {https://api.semanticscholar.org/CorpusID:53096951}
|
120
|
+
}
|
121
|
+
```
|
122
|
+
|
123
|
+
```bibtex
|
124
|
+
@article{Fortunato2017NoisyNF,
|
125
|
+
title = {Noisy Networks for Exploration},
|
126
|
+
author = {Meire Fortunato and Mohammad Gheshlaghi Azar and Bilal Piot and Jacob Menick and Ian Osband and Alex Graves and Vlad Mnih and R{\'e}mi Munos and Demis Hassabis and Olivier Pietquin and Charles Blundell and Shane Legg},
|
127
|
+
journal = {ArXiv},
|
128
|
+
year = {2017},
|
129
|
+
volume = {abs/1706.10295},
|
130
|
+
url = {https://api.semanticscholar.org/CorpusID:5176587}
|
131
|
+
}
|
132
|
+
```
|
133
|
+
|
113
134
|
*Evolution is cleverer than you are.* - Leslie Orgel
|
@@ -798,6 +798,10 @@ class Agent(Module):
|
|
798
798
|
|
799
799
|
latents = self.latent_gene_pool(latent_id = latent_gene_ids)
|
800
800
|
|
801
|
+
orig_latents = latents
|
802
|
+
latents = latents.detach()
|
803
|
+
latents.requires_grad_()
|
804
|
+
|
801
805
|
# learn actor
|
802
806
|
|
803
807
|
logits = self.actor(states, latents)
|
@@ -822,6 +826,14 @@ class Agent(Module):
|
|
822
826
|
self.critic_optim.step()
|
823
827
|
self.critic_optim.zero_grad()
|
824
828
|
|
829
|
+
# maybe update latents, if not frozen
|
830
|
+
|
831
|
+
if not self.latent_gene_pool.frozen_latents:
|
832
|
+
orig_latents.backward(latents.grad)
|
833
|
+
|
834
|
+
self.latent_optim.step()
|
835
|
+
self.latent_optim.zero_grad()
|
836
|
+
|
825
837
|
# apply evolution
|
826
838
|
|
827
839
|
self.latent_gene_pool.genetic_algorithm_step(fitness_scores)
|
@@ -863,25 +875,32 @@ def create_agent(
|
|
863
875
|
actor_num_actions,
|
864
876
|
actor_dim_hiddens: int | tuple[int, ...],
|
865
877
|
critic_dim_hiddens: int | tuple[int, ...],
|
878
|
+
latent_gene_pool_kwargs: dict = dict(),
|
879
|
+
actor_kwargs: dict = dict(),
|
880
|
+
critic_kwargs: dict = dict(),
|
866
881
|
) -> Agent:
|
867
882
|
|
868
883
|
latent_gene_pool = LatentGenePool(
|
869
884
|
num_latents = num_latents,
|
870
|
-
dim_latent = dim_latent
|
885
|
+
dim_latent = dim_latent,
|
886
|
+
**latent_gene_pool_kwargs
|
871
887
|
)
|
872
888
|
|
873
889
|
actor = Actor(
|
874
890
|
num_actions = actor_num_actions,
|
875
891
|
dim_state = dim_state,
|
876
892
|
dim_latent = dim_latent,
|
877
|
-
dim_hiddens = actor_dim_hiddens
|
893
|
+
dim_hiddens = actor_dim_hiddens,
|
894
|
+
**actor_kwargs
|
878
895
|
)
|
879
896
|
|
880
897
|
critic = Critic(
|
881
898
|
dim_state = dim_state,
|
882
899
|
dim_latent = dim_latent,
|
883
|
-
dim_hiddens = critic_dim_hiddens
|
884
|
-
|
900
|
+
dim_hiddens = critic_dim_hiddens,
|
901
|
+
**critic_kwargs
|
902
|
+
)
|
903
|
+
|
885
904
|
return Agent(actor = actor, critic = critic, latent_gene_pool = latent_gene_pool)
|
886
905
|
|
887
906
|
# EPO - which is just PPO with natural selection of a population of latent variables conditioning the agent
|
@@ -951,7 +970,7 @@ class EPO(Module):
|
|
951
970
|
|
952
971
|
done = tensor(False)
|
953
972
|
|
954
|
-
while time < self.max_episode_length:
|
973
|
+
while time < self.max_episode_length and not done:
|
955
974
|
|
956
975
|
# sample action
|
957
976
|
|
@@ -73,7 +73,10 @@ def test_create_agent(
|
|
73
73
|
agent.save('./agent.pt', overwrite = True)
|
74
74
|
agent.load('./agent.pt')
|
75
75
|
|
76
|
-
|
76
|
+
@pytest.mark.parametrize('frozen_latents', (False, True))
|
77
|
+
def test_e2e_with_mock_env(
|
78
|
+
frozen_latents
|
79
|
+
):
|
77
80
|
from evolutionary_policy_optimization import create_agent, EPO, Env
|
78
81
|
|
79
82
|
agent = create_agent(
|
@@ -82,7 +85,10 @@ def test_e2e_with_mock_env():
|
|
82
85
|
dim_latent = 32,
|
83
86
|
actor_num_actions = 5,
|
84
87
|
actor_dim_hiddens = (256, 128),
|
85
|
-
critic_dim_hiddens = (256, 128, 64)
|
88
|
+
critic_dim_hiddens = (256, 128, 64),
|
89
|
+
latent_gene_pool_kwargs = dict(
|
90
|
+
frozen_latents = frozen_latents
|
91
|
+
)
|
86
92
|
)
|
87
93
|
|
88
94
|
epo = EPO(
|
File without changes
|
File without changes
|
{evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/.gitignore
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{evolutionary_policy_optimization-0.0.40 → evolutionary_policy_optimization-0.0.42}/requirements.txt
RENAMED
File without changes
|