evolutionary-policy-optimization 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutionary_policy_optimization/epo.py +48 -0
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.7.dist-info}/METADATA +11 -1
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.7.dist-info}/RECORD +5 -5
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.7.dist-info}/WHEEL +0 -0
- {evolutionary_policy_optimization-0.2.6.dist-info → evolutionary_policy_optimization-0.2.7.dist-info}/licenses/LICENSE +0 -0
@@ -382,6 +382,52 @@ class StateNorm(Module):
|
|
382
382
|
|
383
383
|
return normed
|
384
384
|
|
385
|
+
# style mapping network from StyleGAN2
|
386
|
+
|
387
|
+
class EqualLinear(Module):
|
388
|
+
def __init__(
|
389
|
+
self,
|
390
|
+
dim_in,
|
391
|
+
dim_out,
|
392
|
+
lr_mul = 1,
|
393
|
+
bias = True
|
394
|
+
):
|
395
|
+
super().__init__()
|
396
|
+
self.lr_mul = lr_mul
|
397
|
+
|
398
|
+
self.weight = nn.Parameter(torch.randn(dim_out, dim_in))
|
399
|
+
self.bias = nn.Parameter(torch.zeros(dim_out))
|
400
|
+
|
401
|
+
def forward(
|
402
|
+
self,
|
403
|
+
input
|
404
|
+
):
|
405
|
+
weight, bias = tuple(t * self.lr_mul for t in (self.weight, self.bias))
|
406
|
+
return F.linear(input, weight, bias = bias)
|
407
|
+
|
408
|
+
class LatentMappingNetwork(Module):
|
409
|
+
def __init__(
|
410
|
+
self,
|
411
|
+
dim_latent,
|
412
|
+
depth,
|
413
|
+
lr_mul = 0.1,
|
414
|
+
leaky_relu_p = 2e-2
|
415
|
+
):
|
416
|
+
super().__init__()
|
417
|
+
|
418
|
+
layers = []
|
419
|
+
|
420
|
+
for i in range(depth):
|
421
|
+
layers.extend([
|
422
|
+
EqualLinear(dim_latent, dim_latent, lr_mul),
|
423
|
+
nn.LeakyReLU(leaky_relu_p)
|
424
|
+
])
|
425
|
+
|
426
|
+
self.net = nn.Sequential(*layers)
|
427
|
+
|
428
|
+
def forward(self, x):
|
429
|
+
return self.net(x)
|
430
|
+
|
385
431
|
# simple MLP networks, but with latent variables
|
386
432
|
# the latent variables are the "genes" with the rest of the network as the scaffold for "gene expression" - as suggested in the paper
|
387
433
|
|
@@ -391,6 +437,7 @@ class MLP(Module):
|
|
391
437
|
dim,
|
392
438
|
depth,
|
393
439
|
dim_latent = 0,
|
440
|
+
latent_mapping_network_depth = 2,
|
394
441
|
expansion_factor = 2.
|
395
442
|
):
|
396
443
|
super().__init__()
|
@@ -401,6 +448,7 @@ class MLP(Module):
|
|
401
448
|
self.needs_latent = dim_latent > 0
|
402
449
|
|
403
450
|
self.encode_latent = nn.Sequential(
|
451
|
+
LatentMappingNetwork(dim_latent, depth = latent_mapping_network_depth),
|
404
452
|
Linear(dim_latent, dim * 2),
|
405
453
|
nn.SiLU()
|
406
454
|
) if self.needs_latent else None
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.7
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -268,4 +268,14 @@ That's it
|
|
268
268
|
}
|
269
269
|
```
|
270
270
|
|
271
|
+
```bibtex
|
272
|
+
@article{Karras2019stylegan2,
|
273
|
+
title = {Analyzing and Improving the Image Quality of {StyleGAN}},
|
274
|
+
author = {Tero Karras and Samuli Laine and Miika Aittala and Janne Hellsten and Jaakko Lehtinen and Timo Aila},
|
275
|
+
journal = {CoRR},
|
276
|
+
volume = {abs/1912.04958},
|
277
|
+
year = {2019},
|
278
|
+
}
|
279
|
+
```
|
280
|
+
|
271
281
|
*Evolution is cleverer than you are.* - Leslie Orgel
|
@@ -1,10 +1,10 @@
|
|
1
1
|
evolutionary_policy_optimization/__init__.py,sha256=NyiYDYU7DlpmOTM7xiBQET3r1WwX0ebrgMCBLSQrW3c,288
|
2
2
|
evolutionary_policy_optimization/distributed.py,sha256=MxyxqxANAuOm8GYb0Yu09EHd_aVLhK2uwgrfuVWciPU,2342
|
3
3
|
evolutionary_policy_optimization/env_wrappers.py,sha256=bDL06o9_b1iW6k3fw2xifnOnYlzs643tdW6Yv2gsIdw,803
|
4
|
-
evolutionary_policy_optimization/epo.py,sha256=
|
4
|
+
evolutionary_policy_optimization/epo.py,sha256=OKumVrOH7DSKZhbbx-5oCI_JJwJNYf4lrEpCNmbj6ZY,52991
|
5
5
|
evolutionary_policy_optimization/experimental.py,sha256=7LOrMIaU4fr2Vme1ZpHNIvlvFEIdWj0-uemhQoNJcPQ,5549
|
6
6
|
evolutionary_policy_optimization/mock_env.py,sha256=TLyyRm6tOD0Kdn9QqJJQriaSnsR-YmNQHo4OohmZFG4,1410
|
7
|
-
evolutionary_policy_optimization-0.2.
|
8
|
-
evolutionary_policy_optimization-0.2.
|
9
|
-
evolutionary_policy_optimization-0.2.
|
10
|
-
evolutionary_policy_optimization-0.2.
|
7
|
+
evolutionary_policy_optimization-0.2.7.dist-info/METADATA,sha256=MiG_AYp6KoANhdGuaGM37-zaciW8dCrO0KvkXk7hO7w,9171
|
8
|
+
evolutionary_policy_optimization-0.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
9
|
+
evolutionary_policy_optimization-0.2.7.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
10
|
+
evolutionary_policy_optimization-0.2.7.dist-info/RECORD,,
|
File without changes
|