evolutionary-policy-optimization 0.0.50__tar.gz → 0.0.52__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/PKG-INFO +1 -1
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/evolutionary_policy_optimization/epo.py +7 -3
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/pyproject.toml +1 -1
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/tests/test_epo.py +2 -1
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/.github/workflows/python-publish.yml +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/.github/workflows/test.yml +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/.gitignore +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/LICENSE +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/README.md +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/evolutionary_policy_optimization/__init__.py +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/evolutionary_policy_optimization/experimental.py +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/evolutionary_policy_optimization/mock_env.py +0 -0
- {evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/requirements.txt +0 -0
{evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.52
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -48,6 +48,9 @@ def divisible_by(num, den):
|
|
48
48
|
def l2norm(t):
|
49
49
|
return F.normalize(t, p = 2, dim = -1)
|
50
50
|
|
51
|
+
def batch_randperm(shape, device):
|
52
|
+
return torch.randn(shape, device = device).argsort(dim = -1)
|
53
|
+
|
51
54
|
def log(t, eps = 1e-20):
|
52
55
|
return t.clamp(min = eps).log()
|
53
56
|
|
@@ -393,7 +396,6 @@ class LatentGenePool(Module):
|
|
393
396
|
|
394
397
|
latents_per_island = num_latents // num_islands
|
395
398
|
self.num_natural_selected = int(frac_natural_selected * latents_per_island)
|
396
|
-
|
397
399
|
self.num_tournament_participants = int(frac_tournaments * self.num_natural_selected)
|
398
400
|
|
399
401
|
self.crossover_random = crossover_random
|
@@ -530,7 +532,9 @@ class LatentGenePool(Module):
|
|
530
532
|
|
531
533
|
# 2. for finding pairs of parents to replete gene pool, we will go with the popular tournament strategy
|
532
534
|
|
533
|
-
|
535
|
+
tournament_shape = (islands, pop_size_per_island - self.num_natural_selected, self.num_natural_selected) # (island, num children needed, natural selected population to be bred)
|
536
|
+
|
537
|
+
rand_tournament_gene_ids = batch_randperm(tournament_shape, device)[..., :tournament_participants]
|
534
538
|
rand_tournament_gene_ids_for_gather = rearrange(rand_tournament_gene_ids, 'i p t -> i (p t)')
|
535
539
|
|
536
540
|
participant_fitness = fitness.gather(1, rand_tournament_gene_ids_for_gather)
|
@@ -901,7 +905,7 @@ class Agent(Module):
|
|
901
905
|
|
902
906
|
if self.has_diversity_loss:
|
903
907
|
diversity = self.latent_gene_pool.get_distance()
|
904
|
-
diversity_loss = diversity.
|
908
|
+
diversity_loss = (-diversity).tril(-1).exp().mean()
|
905
909
|
|
906
910
|
(diversity_loss * self.diversity_aux_loss_weight).backward()
|
907
911
|
|
@@ -78,7 +78,8 @@ def test_create_agent(
|
|
78
78
|
@pytest.mark.parametrize('diversity_aux_loss_weight', (0., 1e-3))
|
79
79
|
def test_e2e_with_mock_env(
|
80
80
|
frozen_latents,
|
81
|
-
use_critic_ema
|
81
|
+
use_critic_ema,
|
82
|
+
diversity_aux_loss_weight
|
82
83
|
):
|
83
84
|
from evolutionary_policy_optimization import create_agent, EPO, Env
|
84
85
|
|
File without changes
|
File without changes
|
{evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/.gitignore
RENAMED
File without changes
|
File without changes
|
{evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/README.md
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{evolutionary_policy_optimization-0.0.50 → evolutionary_policy_optimization-0.0.52}/requirements.txt
RENAMED
File without changes
|