evolutionary-policy-optimization 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ # This workflow will upload a Python Package using Twine when a release is created
2
+ # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3
+
4
+ # This workflow uses actions that are not certified by GitHub.
5
+ # They are provided by a third-party and are governed by
6
+ # separate terms of service, privacy policy, and support
7
+ # documentation.
8
+
9
+ name: Upload Python Package
10
+
11
+ on:
12
+ release:
13
+ types: [published]
14
+
15
+ jobs:
16
+ deploy:
17
+
18
+ runs-on: ubuntu-latest
19
+
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v2
24
+ with:
25
+ python-version: '3.x'
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m pip install --upgrade pip
29
+ pip install build
30
+ - name: Build package
31
+ run: python -m build
32
+ - name: Publish package
33
+ uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
34
+ with:
35
+ user: __token__
36
+ password: ${{ secrets.PYPI_API_TOKEN }}
@@ -0,0 +1,174 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # Ruff stuff:
171
+ .ruff_cache/
172
+
173
+ # PyPI configuration file
174
+ .pypirc
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Phil Wang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,103 @@
1
+ Metadata-Version: 2.4
2
+ Name: evolutionary-policy-optimization
3
+ Version: 0.0.1
4
+ Summary: EPO - Pytorch
5
+ Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
6
+ Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
7
+ Author-email: Phil Wang <lucidrains@gmail.com>
8
+ License: MIT License
9
+
10
+ Copyright (c) 2025 Phil Wang
11
+
12
+ Permission is hereby granted, free of charge, to any person obtaining a copy
13
+ of this software and associated documentation files (the "Software"), to deal
14
+ in the Software without restriction, including without limitation the rights
15
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16
+ copies of the Software, and to permit persons to whom the Software is
17
+ furnished to do so, subject to the following conditions:
18
+
19
+ The above copyright notice and this permission notice shall be included in all
20
+ copies or substantial portions of the Software.
21
+
22
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
+ SOFTWARE.
29
+ License-File: LICENSE
30
+ Keywords: artificial intelligence,deep learning,genetic algorithms,reinforcement learning,robotics
31
+ Classifier: Development Status :: 4 - Beta
32
+ Classifier: Intended Audience :: Developers
33
+ Classifier: License :: OSI Approved :: MIT License
34
+ Classifier: Programming Language :: Python :: 3.8
35
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
36
+ Requires-Python: >=3.8
37
+ Requires-Dist: einops>=0.8.0
38
+ Requires-Dist: torch>=2.2
39
+ Requires-Dist: tqdm
40
+ Provides-Extra: examples
41
+ Requires-Dist: numpy; extra == 'examples'
42
+ Requires-Dist: pufferlib>=2.0.6; extra == 'examples'
43
+ Requires-Dist: tqdm; extra == 'examples'
44
+ Provides-Extra: examples-gym
45
+ Requires-Dist: box2d-py; extra == 'examples-gym'
46
+ Requires-Dist: gymnasium[box2d]>=1.0.0; extra == 'examples-gym'
47
+ Requires-Dist: tqdm; extra == 'examples-gym'
48
+ Description-Content-Type: text/markdown
49
+
50
+ <img width="450px" alt="fig1" src="https://github.com/user-attachments/assets/33bef569-e786-4f09-bdee-56bad7ea9e6d" />
51
+
52
+ ## Evolutionary Policy Optimization (wip)
53
+
54
+ Pytorch implementation of [Evolutionary Policy Optimization](https://web3.arxiv.org/abs/2503.19037), from Wang et al. of the Robotics Institute at Carnegie Mellon University
55
+
56
+ This paper stands out, as I have witnessed the positive effects first hand in an [exploratory project](https://github.com/lucidrains/firefly-torch) (mixing evolution with gradient based methods). Perhaps the Alexnet moment for genetic algorithms has not come to pass yet.
57
+
58
+ Besides their latent variable method, I'll also throw in some attempts with crossover in weight space
59
+
60
+ ## Usage
61
+
62
+ ```python
63
+ import torch
64
+
65
+ from evolutionary_policy_optimization import (
66
+ LatentGenePool,
67
+ MLP
68
+ )
69
+
70
+ latent_pool = LatentGenePool(
71
+ num_latents = 32,
72
+ dim_latent = 32,
73
+ net = MLP(
74
+ dims = (512, 256),
75
+ dim_latent = 32,
76
+ )
77
+ )
78
+
79
+ state = torch.randn(1, 512)
80
+ action = latent_pool(state, latent_id = 3) # use latent / gene 4
81
+
82
+ # interact with environment and receive rewards, termination etc
83
+
84
+ # derive a fitness score for each gene / latent
85
+
86
+ fitness = torch.randn(32)
87
+
88
+ latent_pool.genetic_algorithm_step(fitness) # update latents using one generation of genetic algorithm
89
+
90
+ ```
91
+
92
+ ## Citations
93
+
94
+ ```bibtex
95
+ @inproceedings{Wang2025EvolutionaryPO,
96
+ title = {Evolutionary Policy Optimization},
97
+ author = {Jianren Wang and Yifan Su and Abhinav Gupta and Deepak Pathak},
98
+ year = {2025},
99
+ url = {https://api.semanticscholar.org/CorpusID:277313729}
100
+ }
101
+ ```
102
+
103
+ *Evolution is cleverer than you are.* - Leslie Orgel
@@ -0,0 +1,54 @@
1
+ <img width="450px" alt="fig1" src="https://github.com/user-attachments/assets/33bef569-e786-4f09-bdee-56bad7ea9e6d" />
2
+
3
+ ## Evolutionary Policy Optimization (wip)
4
+
5
+ Pytorch implementation of [Evolutionary Policy Optimization](https://web3.arxiv.org/abs/2503.19037), from Wang et al. of the Robotics Institute at Carnegie Mellon University
6
+
7
+ This paper stands out, as I have witnessed the positive effects first hand in an [exploratory project](https://github.com/lucidrains/firefly-torch) (mixing evolution with gradient based methods). Perhaps the Alexnet moment for genetic algorithms has not come to pass yet.
8
+
9
+ Besides their latent variable method, I'll also throw in some attempts with crossover in weight space
10
+
11
+ ## Usage
12
+
13
+ ```python
14
+ import torch
15
+
16
+ from evolutionary_policy_optimization import (
17
+ LatentGenePool,
18
+ MLP
19
+ )
20
+
21
+ latent_pool = LatentGenePool(
22
+ num_latents = 32,
23
+ dim_latent = 32,
24
+ net = MLP(
25
+ dims = (512, 256),
26
+ dim_latent = 32,
27
+ )
28
+ )
29
+
30
+ state = torch.randn(1, 512)
31
+ action = latent_pool(state, latent_id = 3) # use latent / gene 4
32
+
33
+ # interact with environment and receive rewards, termination etc
34
+
35
+ # derive a fitness score for each gene / latent
36
+
37
+ fitness = torch.randn(32)
38
+
39
+ latent_pool.genetic_algorithm_step(fitness) # update latents using one generation of genetic algorithm
40
+
41
+ ```
42
+
43
+ ## Citations
44
+
45
+ ```bibtex
46
+ @inproceedings{Wang2025EvolutionaryPO,
47
+ title = {Evolutionary Policy Optimization},
48
+ author = {Jianren Wang and Yifan Su and Abhinav Gupta and Deepak Pathak},
49
+ year = {2025},
50
+ url = {https://api.semanticscholar.org/CorpusID:277313729}
51
+ }
52
+ ```
53
+
54
+ *Evolution is cleverer than you are.* - Leslie Orgel
@@ -0,0 +1,4 @@
1
+ from evolutionary_policy_optimization.epo import (
2
+ MLP,
3
+ LatentGenePool
4
+ )
@@ -0,0 +1,326 @@
1
+ from __future__ import annotations
2
+
3
+ import torch
4
+ from torch import nn, cat
5
+ import torch.nn.functional as F
6
+
7
+ import torch.nn.functional as F
8
+ from torch.nn import Linear, Module, ModuleList
9
+
10
+ from einops import rearrange, repeat
11
+
12
+ # helpers
13
+
14
+ def exists(v):
15
+ return v is not None
16
+
17
+ def default(v, d):
18
+ return v if exists(v) else d
19
+
20
+ def identity(t):
21
+ return t
22
+
23
+ def xnor(x, y):
24
+ return not (x ^ y)
25
+
26
+ def l2norm(t):
27
+ return F.normalize(t, p = 2, dim = -1)
28
+
29
+ # tensor helpers
30
+
31
+ def log(t, eps = 1e-20):
32
+ return t.clamp(min = eps).log()
33
+
34
+ def calc_entropy(logits):
35
+ prob = logits.softmax(dim = -1)
36
+ return -prob * log(prob)
37
+
38
+ def gather_log_prob(
39
+ logits, # Float[b l]
40
+ indices # Int[b]
41
+ ): # Float[b]
42
+ indices = rearrange(indices, '... -> ... 1')
43
+ log_probs = logits.log_softmax(dim = -1)
44
+ log_prob = log_probs.gather(-1, indices)
45
+ return rearrange(log_prob, '... 1 -> ...')
46
+
47
+ # reinforcement learning related - ppo
48
+
49
+ def actor_loss(
50
+ logits, # Float[b l]
51
+ old_log_probs, # Float[b]
52
+ actions, # Int[b]
53
+ advantages, # Float[b]
54
+ eps_clip = 0.2,
55
+ entropy_weight = .01,
56
+ ):
57
+ log_probs = gather_log_prob(logits, actions)
58
+
59
+ entropy = calc_entropy(logits)
60
+
61
+ ratio = (log_probs - old_log_probs).exp()
62
+
63
+ clipped_ratio = ratio.clamp(min = 1. - eps_clip, max = 1. + eps_clip)
64
+
65
+ # classic clipped surrogate loss from ppo
66
+
67
+ actor_loss = -torch.min(clipped_ratio * advantage, ratio * advantage)
68
+
69
+ # add entropy loss for exploration
70
+
71
+ entropy_aux_loss = -entropy_weight * entropy
72
+
73
+ return actor_loss + entropy_aux_loss
74
+
75
+ def critic_loss(
76
+ pred_values, # Float[b]
77
+ advantages, # Float[b]
78
+ old_values # Float[b]
79
+ ):
80
+ discounted_values = advantages + old_values
81
+ return F.mse_loss(pred_values, discounted_values)
82
+
83
+ # evolution related functions
84
+
85
+ def crossover_latents(
86
+ parent1, parent2,
87
+ weight = None,
88
+ random = False,
89
+ l2norm_output = False
90
+ ):
91
+ assert parent1.shape == parent2.shape
92
+
93
+ if random:
94
+ assert not exists(weight)
95
+ weight = torch.randn_like(parent1).sigmoid()
96
+ else:
97
+ weight = default(weight, 0.5) # they do a simple averaging for the latents as crossover, but allow for random interpolation, as well extend this work for tournament selection, where same set of parents may be re-selected
98
+
99
+ child = torch.lerp(parent1, parent2, weight)
100
+
101
+ if not l2norm_output:
102
+ return child
103
+
104
+ return l2norm(child)
105
+
106
+ def mutation(
107
+ latents,
108
+ mutation_strength = 1.,
109
+ l2norm_output = False
110
+ ):
111
+ mutations = torch.randn_like(latents)
112
+
113
+ mutated = latents + mutations * mutation_strength
114
+
115
+ if not l2norm_output:
116
+ return mutated
117
+
118
+ return l2norm(mutated)
119
+
120
+ # simple MLP networks, but with latent variables
121
+ # the latent variables are the "genes" with the rest of the network as the scaffold for "gene expression" - as suggested in the paper
122
+
123
+ class MLP(Module):
124
+ def __init__(
125
+ self,
126
+ dims: tuple[int, ...],
127
+ dim_latent = 0,
128
+ ):
129
+ super().__init__()
130
+ assert len(dims) >= 2, 'must have at least two dimensions'
131
+
132
+ # add the latent to the first dim
133
+
134
+ first_dim, *rest_dims = dims
135
+ first_dim += dim_latent
136
+ dims = (first_dim, *rest_dims)
137
+
138
+ self.dim_latent = dim_latent
139
+ self.needs_latent = dim_latent > 0
140
+
141
+ self.encode_latent = nn.Sequential(
142
+ Linear(dim_latent, dim_latent),
143
+ nn.SiLU()
144
+ ) if self.needs_latent else None
145
+
146
+ # pairs of dimension
147
+
148
+ dim_pairs = tuple(zip(dims[:-1], dims[1:]))
149
+
150
+ # modules across layers
151
+
152
+ layers = ModuleList([Linear(dim_in, dim_out) for dim_in, dim_out in dim_pairs])
153
+
154
+ self.layers = layers
155
+
156
+ def forward(
157
+ self,
158
+ x,
159
+ latent = None
160
+ ):
161
+ assert xnor(self.needs_latent, exists(latent))
162
+
163
+ if exists(latent):
164
+ # start with naive concatenative conditioning
165
+ # but will also offer some alternatives once a spark is seen (film, adaptive linear from stylegan, etc)
166
+
167
+ batch = x.shape[0]
168
+
169
+ latent = self.encode_latent(latent)
170
+ latent = repeat(latent, 'd -> b d', b = batch)
171
+
172
+ x = cat((x, latent), dim = -1)
173
+
174
+ # layers
175
+
176
+ for ind, layer in enumerate(self.layers, start = 1):
177
+ is_last = ind == len(self.layers)
178
+
179
+ x = layer(x)
180
+
181
+ if not is_last:
182
+ x = F.silu(x)
183
+
184
+ return x
185
+
186
+ # classes
187
+
188
+ class LatentGenePool(Module):
189
+ def __init__(
190
+ self,
191
+ num_latents, # same as gene pool size
192
+ dim_latent, # gene dimension
193
+ crossover_random = True, # random interp from parent1 to parent2 for crossover, set to `False` for averaging (0.5 constant value)
194
+ l2norm_latent = False, # whether to enforce latents on hypersphere,
195
+ frac_tournaments = 0.25, # fraction of genes to participate in tournament - the lower the value, the more chance a less fit gene could be selected
196
+ frac_natural_selected = 0.25, # number of least fit genes to remove from the pool
197
+ frac_elitism = 0.1, # frac of population to preserve from being noised
198
+ mutation_strength = 1., # factor to multiply to gaussian noise as mutation to latents
199
+ net: MLP | Module | dict | None = None,
200
+ ):
201
+ super().__init__()
202
+
203
+ maybe_l2norm = l2norm if l2norm_latent else identity
204
+
205
+ latents = torch.randn(num_latents, dim_latent)
206
+
207
+ if l2norm_latent:
208
+ latents = maybe_l2norm(latents, dim = -1)
209
+
210
+ self.latents = nn.Parameter(latents, requires_grad = False)
211
+
212
+ self.maybe_l2norm = maybe_l2norm
213
+
214
+ # some derived values
215
+
216
+ assert 0. < frac_tournaments < 1.
217
+ assert 0. < frac_natural_selected < 1.
218
+ assert 0. <= frac_elitism < 1.
219
+ assert (frac_natural_selected + frac_elitism) < 1.
220
+
221
+ self.dim_latent = dim_latent
222
+ self.num_latents = num_latents
223
+ self.num_natural_selected = int(frac_natural_selected * num_latents)
224
+
225
+ self.num_tournament_participants = int(frac_tournaments * self.num_natural_selected)
226
+ self.crossover_random = crossover_random
227
+
228
+ self.mutation_strength = mutation_strength
229
+ self.num_elites = int(frac_elitism * num_latents)
230
+ self.has_elites = self.num_elites > 0
231
+
232
+ # network for the latent / gene
233
+
234
+ if isinstance(net, dict):
235
+ net = MLP(**net)
236
+
237
+ assert net.dim_latent == dim_latent, f'the latent dimension set on the MLP {net.dim_latent} must be what was passed into the latent gene pool module ({dim_latent})'
238
+ self.net = net
239
+
240
+ @torch.no_grad()
241
+ # non-gradient optimization, at least, not on the individual level (taken care of by rl component)
242
+ def genetic_algorithm_step(
243
+ self,
244
+ fitness, # Float['p'],
245
+ inplace = True
246
+ ):
247
+ """
248
+ p - population
249
+ g - gene dimension
250
+ """
251
+
252
+ genes = self.latents # the latents are the genes
253
+
254
+ pop_size = genes.shape[0]
255
+ assert pop_size == fitness.shape[0]
256
+
257
+ # 1. natural selection is simple in silico
258
+ # you sort the population by the fitness and slice off the least fit end
259
+
260
+ sorted_indices = fitness.sort().indices
261
+ natural_selected_indices = sorted_indices[-self.num_natural_selected:]
262
+ genes, fitness = genes[natural_selected_indices], fitness[natural_selected_indices]
263
+
264
+ # 2. for finding pairs of parents to replete gene pool, we will go with the popular tournament strategy
265
+
266
+ batch_randperm = torch.randn((pop_size - self.num_natural_selected, self.num_tournament_participants)).argsort(dim = -1)
267
+
268
+ participants = genes[batch_randperm]
269
+ participant_fitness = fitness[batch_randperm]
270
+
271
+ tournament_winner_indices = participant_fitness.topk(2, dim = -1).indices
272
+
273
+ tournament_winner_indices = repeat(tournament_winner_indices, '... -> ... g', g = self.dim_latent)
274
+
275
+ parents = participants.gather(-2, tournament_winner_indices)
276
+
277
+ # 3. do a crossover of the parents - in their case they went for a simple averaging, but since we are doing tournament style and the same pair of parents may be re-selected, lets make it random interpolation
278
+
279
+ parent1, parent2 = parents.unbind(dim = 1)
280
+ children = crossover_latents(parent1, parent2, random = self.crossover_random)
281
+
282
+ # append children to gene pool
283
+
284
+ genes = cat((children, genes))
285
+
286
+ # 4. they use the elitism strategy to protect best performing genes from being changed
287
+
288
+ if self.has_elites:
289
+ genes, elites = genes[:-self.num_elites], genes[-self.num_elites:]
290
+
291
+ # 5. mutate with gaussian noise - todo: add drawing the mutation rate from exponential distribution, from the fast genetic algorithms paper from 2017
292
+
293
+ genes = mutation(genes, mutation_strength = self.mutation_strength)
294
+
295
+ # add back the elites
296
+
297
+ if self.has_elites:
298
+ genes = cat((genes, elites))
299
+
300
+ genes = self.maybe_l2norm(genes)
301
+
302
+ if not inplace:
303
+ return genes
304
+
305
+ # store the genes for the next interaction with environment for new fitness values (a function of reward and other to be researched measures)
306
+
307
+ self.latents.copy_(genes)
308
+
309
+ def forward(
310
+ self,
311
+ *args,
312
+ latent_id: int,
313
+ **kwargs,
314
+ ):
315
+
316
+ assert exists(self.net)
317
+
318
+ assert 0 <= latent_id < self.num_latents
319
+
320
+ latent = self.latents[latent_id]
321
+
322
+ return self.net(
323
+ *args,
324
+ latent = latent,
325
+ **kwargs
326
+ )
@@ -0,0 +1,27 @@
1
+ import torch
2
+
3
+ def crossover_weights(w1, w2, transpose = False):
4
+ assert w2.shape == w2.shape
5
+ assert w1.ndim == 2
6
+
7
+ if transpose:
8
+ w1, w2 = w1.t(), w2.t()
9
+
10
+ rank = min(w2.shape)
11
+ assert rank >= 2
12
+
13
+ u1, s1, v1 = torch.svd(w1)
14
+ u2, s2, v2 = torch.svd(w2)
15
+
16
+ mask = torch.randperm(rank) < (rank // 2)
17
+
18
+ u = torch.where(mask[None, :], u1, u2)
19
+ s = torch.where(mask, s1, s2)
20
+ v = torch.where(mask[None, :], v1, v2)
21
+
22
+ out = u @ torch.diag_embed(s) @ v.mT
23
+
24
+ if transpose:
25
+ out = out.t()
26
+
27
+ return out
@@ -0,0 +1,66 @@
1
+ [project]
2
+ name = "evolutionary-policy-optimization"
3
+ version = "0.0.1"
4
+ description = "EPO - Pytorch"
5
+ authors = [
6
+ { name = "Phil Wang", email = "lucidrains@gmail.com" }
7
+ ]
8
+ readme = "README.md"
9
+ requires-python = ">= 3.8"
10
+ license = { file = "LICENSE" }
11
+ keywords = [
12
+ 'artificial intelligence',
13
+ 'deep learning',
14
+ 'reinforcement learning',
15
+ 'genetic algorithms',
16
+ 'robotics'
17
+ ]
18
+ classifiers=[
19
+ 'Development Status :: 4 - Beta',
20
+ 'Intended Audience :: Developers',
21
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
22
+ 'License :: OSI Approved :: MIT License',
23
+ 'Programming Language :: Python :: 3.8',
24
+ ]
25
+
26
+ dependencies = [
27
+ 'einops>=0.8.0',
28
+ 'torch>=2.2',
29
+ 'tqdm'
30
+ ]
31
+
32
+ [project.urls]
33
+ Homepage = "https://pypi.org/project/evolutionary-policy-optimization/"
34
+ Repository = "https://github.com/lucidrains/evolutionary-policy-optimization"
35
+
36
+ [project.optional-dependencies]
37
+ examples = [
38
+ "numpy",
39
+ "pufferlib>=2.0.6",
40
+ "tqdm",
41
+ ]
42
+
43
+ examples_gym = [
44
+ "box2d-py",
45
+ "gymnasium[box2d]>=1.0.0",
46
+ "tqdm",
47
+ ]
48
+
49
+ [build-system]
50
+ requires = ["hatchling"]
51
+ build-backend = "hatchling.build"
52
+
53
+ [tool.rye]
54
+ managed = true
55
+ dev-dependencies = [
56
+ "ruff>=0.4.2",
57
+ "pytest>=8.2.0",
58
+ "pytest-examples>=0.0.10",
59
+ "pytest-cov>=5.0.0",
60
+ ]
61
+
62
+ [tool.hatch.metadata]
63
+ allow-direct-references = true
64
+
65
+ [tool.hatch.build.targets.wheel]
66
+ packages = ["evolutionary_policy_optimization"]