evolutionary-policy-optimization 0.0.11__tar.gz → 0.0.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/PKG-INFO +1 -1
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/evolutionary_policy_optimization/__init__.py +1 -0
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/evolutionary_policy_optimization/epo.py +55 -4
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/pyproject.toml +1 -1
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/tests/test_epo.py +26 -0
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/.github/workflows/python-publish.yml +0 -0
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/.github/workflows/test.yml +0 -0
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/.gitignore +0 -0
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/LICENSE +0 -0
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/README.md +0 -0
- {evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/evolutionary_policy_optimization/experimental.py +0 -0
{evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.14
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -502,6 +502,38 @@ class LatentGenePool(Module):
|
|
502
502
|
|
503
503
|
# agent contains the actor, critic, and the latent genetic pool
|
504
504
|
|
505
|
+
def create_agent(
|
506
|
+
dim_state,
|
507
|
+
num_latents,
|
508
|
+
dim_latent,
|
509
|
+
actor_num_actions,
|
510
|
+
actor_dim_hiddens: int | tuple[int, ...],
|
511
|
+
critic_dim_hiddens: int | tuple[int, ...],
|
512
|
+
num_latent_sets = 1
|
513
|
+
) -> Agent:
|
514
|
+
|
515
|
+
actor = Actor(
|
516
|
+
num_actions = actor_num_actions,
|
517
|
+
dim_state = dim_state,
|
518
|
+
dim_latent = dim_latent,
|
519
|
+
dim_hiddens = actor_dim_hiddens
|
520
|
+
)
|
521
|
+
|
522
|
+
critic = Critic(
|
523
|
+
dim_state = dim_state,
|
524
|
+
dim_latent = dim_latent,
|
525
|
+
dim_hiddens = critic_dim_hiddens
|
526
|
+
)
|
527
|
+
|
528
|
+
latent_gene_pool = LatentGenePool(
|
529
|
+
dim_state = dim_state,
|
530
|
+
num_latents = num_latents,
|
531
|
+
dim_latent = dim_latent,
|
532
|
+
num_latent_sets = num_latent_sets
|
533
|
+
)
|
534
|
+
|
535
|
+
return Agent(actor = actor, critic = critic, latent_gene_pool = latent_gene_pool)
|
536
|
+
|
505
537
|
class Agent(Module):
|
506
538
|
def __init__(
|
507
539
|
self,
|
@@ -516,6 +548,28 @@ class Agent(Module):
|
|
516
548
|
|
517
549
|
self.latent_gene_pool = latent_gene_pool
|
518
550
|
|
551
|
+
def get_actor_actions(
|
552
|
+
self,
|
553
|
+
state,
|
554
|
+
latent_id
|
555
|
+
):
|
556
|
+
latent = self.latent_gene_pool(latent_id = latent_id, state = state)
|
557
|
+
return self.actor(state, latent)
|
558
|
+
|
559
|
+
def get_critic_values(
|
560
|
+
self,
|
561
|
+
state,
|
562
|
+
latent_id
|
563
|
+
):
|
564
|
+
latent = self.latent_gene_pool(latent_id = latent_id, state = state)
|
565
|
+
return self.critic(state, latent)
|
566
|
+
|
567
|
+
def update_latent_gene_pool_(
|
568
|
+
self,
|
569
|
+
fitnesses
|
570
|
+
):
|
571
|
+
return self.latent_gene_pool.genetic_algorithm_step(fitnesses)
|
572
|
+
|
519
573
|
def forward(
|
520
574
|
self,
|
521
575
|
memories: list[Memory]
|
@@ -539,13 +593,10 @@ class EPO(Module):
|
|
539
593
|
|
540
594
|
def __init__(
|
541
595
|
self,
|
542
|
-
agent: Agent
|
543
|
-
latent_gene_pool: LatentGenePool
|
596
|
+
agent: Agent
|
544
597
|
):
|
545
598
|
super().__init__()
|
546
|
-
|
547
599
|
self.agent = agent
|
548
|
-
self.latent_gene_pool = latent_gene_pool
|
549
600
|
|
550
601
|
def forward(
|
551
602
|
self,
|
@@ -36,3 +36,29 @@ def test_readme(
|
|
36
36
|
fitness = torch.randn(128)
|
37
37
|
|
38
38
|
latent_pool.genetic_algorithm_step(fitness) # update once
|
39
|
+
|
40
|
+
|
41
|
+
def test_create_agent():
|
42
|
+
from evolutionary_policy_optimization import create_agent
|
43
|
+
|
44
|
+
agent = create_agent(
|
45
|
+
dim_state = 512,
|
46
|
+
num_latents = 128,
|
47
|
+
dim_latent = 32,
|
48
|
+
actor_num_actions = 5,
|
49
|
+
actor_dim_hiddens = (256, 128),
|
50
|
+
critic_dim_hiddens = (256, 128, 64)
|
51
|
+
)
|
52
|
+
|
53
|
+
state = torch.randn(1, 512)
|
54
|
+
|
55
|
+
actions = agent.get_actor_actions(state, latent_id = 3)
|
56
|
+
value = agent.get_critic_values(state, latent_id = 3)
|
57
|
+
|
58
|
+
# interact with environment and receive rewards, termination etc
|
59
|
+
|
60
|
+
# derive a fitness score for each gene / latent
|
61
|
+
|
62
|
+
fitness = torch.randn(128)
|
63
|
+
|
64
|
+
agent.update_latent_gene_pool_(fitness) # update once
|
File without changes
|
File without changes
|
{evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/.gitignore
RENAMED
File without changes
|
File without changes
|
{evolutionary_policy_optimization-0.0.11 → evolutionary_policy_optimization-0.0.14}/README.md
RENAMED
File without changes
|