evolutionary-policy-optimization 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutionary_policy_optimization/epo.py +53 -0
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.8.dist-info}/METADATA +11 -1
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.8.dist-info}/RECORD +5 -5
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.8.dist-info}/WHEEL +0 -0
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.8.dist-info}/licenses/LICENSE +0 -0
@@ -167,6 +167,8 @@ def shrink_and_perturb_(
|
|
167
167
|
|
168
168
|
assert 0. <= shrink_factor <= 1.
|
169
169
|
|
170
|
+
maybe_sync_seed()
|
171
|
+
|
170
172
|
for p in module.parameters():
|
171
173
|
noise = torch.randn_like(p.data)
|
172
174
|
p.data.mul_(1. - shrink_factor).add_(noise * perturb_factor)
|
@@ -382,6 +384,53 @@ class StateNorm(Module):
|
|
382
384
|
|
383
385
|
return normed
|
384
386
|
|
387
|
+
# style mapping network from StyleGAN2
|
388
|
+
# https://arxiv.org/abs/1912.04958
|
389
|
+
|
390
|
+
class EqualLinear(Module):
|
391
|
+
def __init__(
|
392
|
+
self,
|
393
|
+
dim_in,
|
394
|
+
dim_out,
|
395
|
+
lr_mul = 1,
|
396
|
+
bias = True
|
397
|
+
):
|
398
|
+
super().__init__()
|
399
|
+
self.lr_mul = lr_mul
|
400
|
+
|
401
|
+
self.weight = nn.Parameter(torch.randn(dim_out, dim_in))
|
402
|
+
self.bias = nn.Parameter(torch.zeros(dim_out))
|
403
|
+
|
404
|
+
def forward(
|
405
|
+
self,
|
406
|
+
input
|
407
|
+
):
|
408
|
+
weight, bias = tuple(t * self.lr_mul for t in (self.weight, self.bias))
|
409
|
+
return F.linear(input, weight, bias = bias)
|
410
|
+
|
411
|
+
class LatentMappingNetwork(Module):
|
412
|
+
def __init__(
|
413
|
+
self,
|
414
|
+
dim_latent,
|
415
|
+
depth,
|
416
|
+
lr_mul = 0.1,
|
417
|
+
leaky_relu_p = 2e-2
|
418
|
+
):
|
419
|
+
super().__init__()
|
420
|
+
|
421
|
+
layers = []
|
422
|
+
|
423
|
+
for i in range(depth):
|
424
|
+
layers.extend([
|
425
|
+
EqualLinear(dim_latent, dim_latent, lr_mul),
|
426
|
+
nn.LeakyReLU(leaky_relu_p)
|
427
|
+
])
|
428
|
+
|
429
|
+
self.net = nn.Sequential(*layers)
|
430
|
+
|
431
|
+
def forward(self, x):
|
432
|
+
return self.net(x)
|
433
|
+
|
385
434
|
# simple MLP networks, but with latent variables
|
386
435
|
# the latent variables are the "genes" with the rest of the network as the scaffold for "gene expression" - as suggested in the paper
|
387
436
|
|
@@ -391,6 +440,7 @@ class MLP(Module):
|
|
391
440
|
dim,
|
392
441
|
depth,
|
393
442
|
dim_latent = 0,
|
443
|
+
latent_mapping_network_depth = 2,
|
394
444
|
expansion_factor = 2.
|
395
445
|
):
|
396
446
|
super().__init__()
|
@@ -401,6 +451,7 @@ class MLP(Module):
|
|
401
451
|
self.needs_latent = dim_latent > 0
|
402
452
|
|
403
453
|
self.encode_latent = nn.Sequential(
|
454
|
+
LatentMappingNetwork(dim_latent, depth = latent_mapping_network_depth),
|
404
455
|
Linear(dim_latent, dim * 2),
|
405
456
|
nn.SiLU()
|
406
457
|
) if self.needs_latent else None
|
@@ -803,6 +854,8 @@ class LatentGenePool(Module):
|
|
803
854
|
inplace = True,
|
804
855
|
migrate = None # trigger a migration in the setting of multiple islands, the loop outside will need to have some `migrate_every` hyperparameter
|
805
856
|
):
|
857
|
+
maybe_sync_seed()
|
858
|
+
|
806
859
|
device = self.latents.device
|
807
860
|
|
808
861
|
if not divisible_by(self.step.item(), self.apply_genetic_algorithm_every):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.8
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -268,4 +268,14 @@ That's it
|
|
268
268
|
}
|
269
269
|
```
|
270
270
|
|
271
|
+
```bibtex
|
272
|
+
@article{Karras2019stylegan2,
|
273
|
+
title = {Analyzing and Improving the Image Quality of {StyleGAN}},
|
274
|
+
author = {Tero Karras and Samuli Laine and Miika Aittala and Janne Hellsten and Jaakko Lehtinen and Timo Aila},
|
275
|
+
journal = {CoRR},
|
276
|
+
volume = {abs/1912.04958},
|
277
|
+
year = {2019},
|
278
|
+
}
|
279
|
+
```
|
280
|
+
|
271
281
|
*Evolution is cleverer than you are.* - Leslie Orgel
|
@@ -1,10 +1,10 @@
|
|
1
1
|
evolutionary_policy_optimization/__init__.py,sha256=NyiYDYU7DlpmOTM7xiBQET3r1WwX0ebrgMCBLSQrW3c,288
|
2
2
|
evolutionary_policy_optimization/distributed.py,sha256=MxyxqxANAuOm8GYb0Yu09EHd_aVLhK2uwgrfuVWciPU,2342
|
3
3
|
evolutionary_policy_optimization/env_wrappers.py,sha256=bDL06o9_b1iW6k3fw2xifnOnYlzs643tdW6Yv2gsIdw,803
|
4
|
-
evolutionary_policy_optimization/epo.py,sha256=
|
4
|
+
evolutionary_policy_optimization/epo.py,sha256=San0DS_Z0Ueze6Rz1BKap7SKFww1bx3U0WansTRprdo,53076
|
5
5
|
evolutionary_policy_optimization/experimental.py,sha256=7LOrMIaU4fr2Vme1ZpHNIvlvFEIdWj0-uemhQoNJcPQ,5549
|
6
6
|
evolutionary_policy_optimization/mock_env.py,sha256=TLyyRm6tOD0Kdn9QqJJQriaSnsR-YmNQHo4OohmZFG4,1410
|
7
|
-
evolutionary_policy_optimization-0.2.
|
8
|
-
evolutionary_policy_optimization-0.2.
|
9
|
-
evolutionary_policy_optimization-0.2.
|
10
|
-
evolutionary_policy_optimization-0.2.
|
7
|
+
evolutionary_policy_optimization-0.2.8.dist-info/METADATA,sha256=PcsHBD4zX1EsV-kWFJIR5fsLxX6hXFPOF4FJyfaI9oU,9171
|
8
|
+
evolutionary_policy_optimization-0.2.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
9
|
+
evolutionary_policy_optimization-0.2.8.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
10
|
+
evolutionary_policy_optimization-0.2.8.dist-info/RECORD,,
|
File without changes
|