dreamer4 0.0.94__tar.gz → 0.0.96__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dreamer4 might be problematic. Click here for more details.
- {dreamer4-0.0.94 → dreamer4-0.0.96}/PKG-INFO +1 -1
- {dreamer4-0.0.94 → dreamer4-0.0.96}/dreamer4/dreamer4.py +24 -12
- {dreamer4-0.0.94 → dreamer4-0.0.96}/dreamer4/trainers.py +23 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/pyproject.toml +1 -1
- {dreamer4-0.0.94 → dreamer4-0.0.96}/tests/test_dreamer.py +2 -2
- {dreamer4-0.0.94 → dreamer4-0.0.96}/.github/workflows/python-publish.yml +0 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/.github/workflows/test.yml +0 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/.gitignore +0 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/LICENSE +0 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/README.md +0 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/dreamer4/__init__.py +0 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/dreamer4/mocks.py +0 -0
- {dreamer4-0.0.94 → dreamer4-0.0.96}/dreamer4-fig2.png +0 -0
|
@@ -72,20 +72,22 @@ TokenizerLosses = namedtuple('TokenizerLosses', ('recon', 'lpips'))
|
|
|
72
72
|
|
|
73
73
|
WorldModelLosses = namedtuple('WorldModelLosses', ('flow', 'rewards', 'discrete_actions', 'continuous_actions'))
|
|
74
74
|
|
|
75
|
+
MaybeTensor = Tensor | None
|
|
76
|
+
|
|
75
77
|
@dataclass
|
|
76
78
|
class Experience:
|
|
77
79
|
latents: Tensor
|
|
78
|
-
video:
|
|
79
|
-
proprio:
|
|
80
|
-
agent_embed:
|
|
80
|
+
video: MaybeTensor = None
|
|
81
|
+
proprio: MaybeTensor = None
|
|
82
|
+
agent_embed: MaybeTensor = None
|
|
81
83
|
rewards: Tensor | None = None
|
|
82
|
-
actions: tuple[
|
|
83
|
-
log_probs: tuple[
|
|
84
|
-
old_action_unembeds: tuple[
|
|
85
|
-
values:
|
|
84
|
+
actions: tuple[MaybeTensor, MaybeTensor] | None = None
|
|
85
|
+
log_probs: tuple[MaybeTensor, MaybeTensor] | None = None
|
|
86
|
+
old_action_unembeds: tuple[MaybeTensor, MaybeTensor] | None = None
|
|
87
|
+
values: MaybeTensor = None
|
|
86
88
|
step_size: int | None = None
|
|
87
|
-
lens:
|
|
88
|
-
is_truncated:
|
|
89
|
+
lens: MaybeTensor = None
|
|
90
|
+
is_truncated: MaybeTensor = None
|
|
89
91
|
agent_index: int = 0
|
|
90
92
|
is_from_world_model: bool = True
|
|
91
93
|
|
|
@@ -850,9 +852,10 @@ class ActionEmbedder(Module):
|
|
|
850
852
|
|
|
851
853
|
def kl_div(
|
|
852
854
|
self,
|
|
853
|
-
src: tuple[
|
|
854
|
-
tgt: tuple[
|
|
855
|
-
|
|
855
|
+
src: tuple[MaybeTensor, MaybeTensor],
|
|
856
|
+
tgt: tuple[MaybeTensor, MaybeTensor],
|
|
857
|
+
reduce_across_num_actions = True
|
|
858
|
+
) -> tuple[MaybeTensor, MaybeTensor]:
|
|
856
859
|
|
|
857
860
|
src_discrete, src_continuous = src
|
|
858
861
|
tgt_discrete, tgt_continuous = tgt
|
|
@@ -894,6 +897,15 @@ class ActionEmbedder(Module):
|
|
|
894
897
|
|
|
895
898
|
continuous_kl_div = kl.kl_divergence(src_normal, tgt_normal)
|
|
896
899
|
|
|
900
|
+
# maybe reduce
|
|
901
|
+
|
|
902
|
+
if reduce_across_num_actions:
|
|
903
|
+
if exists(discrete_kl_div):
|
|
904
|
+
discrete_kl_div = discrete_kl_div.sum(dim = -1)
|
|
905
|
+
|
|
906
|
+
if exists(continuous_kl_div):
|
|
907
|
+
continuous_kl_div = continuous_kl_div.sum(dim = -1)
|
|
908
|
+
|
|
897
909
|
return discrete_kl_div, continuous_kl_div
|
|
898
910
|
|
|
899
911
|
def forward(
|
|
@@ -396,13 +396,20 @@ class SimTrainer(Module):
|
|
|
396
396
|
old_values = experience.values
|
|
397
397
|
rewards = experience.rewards
|
|
398
398
|
|
|
399
|
+
has_agent_embed = exists(experience.agent_embed)
|
|
400
|
+
agent_embed = experience.agent_embed
|
|
401
|
+
|
|
399
402
|
discrete_actions, continuous_actions = experience.actions
|
|
400
403
|
discrete_log_probs, continuous_log_probs = experience.log_probs
|
|
401
404
|
|
|
405
|
+
discrete_old_action_unembeds, continuous_old_action_unembeds = default(experience.old_action_unembeds, (None, None))
|
|
406
|
+
|
|
402
407
|
# handle empties
|
|
403
408
|
|
|
404
409
|
empty_tensor = torch.empty_like(rewards)
|
|
405
410
|
|
|
411
|
+
agent_embed = default(agent_embed, empty_tensor)
|
|
412
|
+
|
|
406
413
|
has_discrete = exists(discrete_actions)
|
|
407
414
|
has_continuous = exists(continuous_actions)
|
|
408
415
|
|
|
@@ -412,6 +419,9 @@ class SimTrainer(Module):
|
|
|
412
419
|
discrete_log_probs = default(discrete_log_probs, empty_tensor)
|
|
413
420
|
continuous_log_probs = default(continuous_log_probs, empty_tensor)
|
|
414
421
|
|
|
422
|
+
discrete_old_action_unembeds = default(discrete_old_action_unembeds, empty_tensor)
|
|
423
|
+
continuous_old_action_unembeds = default(discrete_old_action_unembeds, empty_tensor)
|
|
424
|
+
|
|
415
425
|
# create the dataset and dataloader
|
|
416
426
|
|
|
417
427
|
dataset = TensorDataset(
|
|
@@ -420,6 +430,9 @@ class SimTrainer(Module):
|
|
|
420
430
|
continuous_actions,
|
|
421
431
|
discrete_log_probs,
|
|
422
432
|
continuous_log_probs,
|
|
433
|
+
agent_embed,
|
|
434
|
+
discrete_old_action_unembeds,
|
|
435
|
+
continuous_old_action_unembeds,
|
|
423
436
|
old_values,
|
|
424
437
|
rewards
|
|
425
438
|
)
|
|
@@ -434,6 +447,9 @@ class SimTrainer(Module):
|
|
|
434
447
|
continuous_actions,
|
|
435
448
|
discrete_log_probs,
|
|
436
449
|
continuous_log_probs,
|
|
450
|
+
agent_embed,
|
|
451
|
+
discrete_old_action_unembeds,
|
|
452
|
+
continuous_old_action_unembeds,
|
|
437
453
|
old_values,
|
|
438
454
|
rewards
|
|
439
455
|
) in dataloader:
|
|
@@ -448,10 +464,17 @@ class SimTrainer(Module):
|
|
|
448
464
|
continuous_log_probs if has_continuous else None
|
|
449
465
|
)
|
|
450
466
|
|
|
467
|
+
old_action_unembeds = (
|
|
468
|
+
discrete_old_action_unembeds if has_discrete else None,
|
|
469
|
+
continuous_old_action_unembeds if has_continuous else None
|
|
470
|
+
)
|
|
471
|
+
|
|
451
472
|
batch_experience = Experience(
|
|
452
473
|
latents = latents,
|
|
453
474
|
actions = actions,
|
|
454
475
|
log_probs = log_probs,
|
|
476
|
+
agent_embed = agent_embed if has_agent_embed else None,
|
|
477
|
+
old_action_unembeds = old_action_unembeds,
|
|
455
478
|
values = old_values,
|
|
456
479
|
rewards = rewards,
|
|
457
480
|
step_size = step_size,
|
|
@@ -352,8 +352,8 @@ def test_action_embedder():
|
|
|
352
352
|
|
|
353
353
|
discrete_kl_div, continuous_kl_div = embedder.kl_div((discrete_logits, continuous_mean_log_var), (discrete_logits_tgt, continuous_mean_log_var_tgt))
|
|
354
354
|
|
|
355
|
-
assert discrete_kl_div.shape == (2, 3
|
|
356
|
-
assert continuous_kl_div.shape == (2, 3
|
|
355
|
+
assert discrete_kl_div.shape == (2, 3)
|
|
356
|
+
assert continuous_kl_div.shape == (2, 3)
|
|
357
357
|
|
|
358
358
|
# return discrete split by number of actions
|
|
359
359
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|