mani-skill-nightly 2025.5.3.1612__py3-none-any.whl → 2025.5.3.1619__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,85 @@
1
+ import torch
2
+ import gymnasium as gym
3
+ from mani_skill.envs.sapien_env import BaseEnv
4
+ from mani_skill.utils.structs.types import Array
5
+
6
+
7
+ class ActionRepeatWrapper(gym.Wrapper):
8
+ def __init__(self, env: BaseEnv, repeat: int):
9
+ """
10
+ Environment wrapper that repeats the action for a number of steps.
11
+ This wrapper will perform the same action at most repeat times, if the environment is done before repeating the action repeat times, then we only return valid data (up to the done=True).
12
+
13
+ Args:
14
+ env (BaseEnv): The base environment to wrap.
15
+ repeat (int): The number of times to repeat the action, repeat=1 means no action repeat (we use perform 1 action per step), repeat=2 means the action is repeated twice, so the environment will step twice with the same action.
16
+ """
17
+ super().__init__(env)
18
+ self.repeat = repeat
19
+
20
+ @property
21
+ def num_envs(self):
22
+ return self.base_env.num_envs
23
+
24
+ @property
25
+ def base_env(self) -> BaseEnv:
26
+ return self.env.unwrapped
27
+
28
+ def step(self, action):
29
+ final_obs, final_rew, final_terminations, final_truncations, infos = (
30
+ super().step(action)
31
+ )
32
+
33
+ is_obs_dict = isinstance(final_obs, dict)
34
+
35
+ dones = torch.logical_or(final_terminations, final_truncations)
36
+ not_dones = ~dones
37
+
38
+ if not_dones.any():
39
+ for _ in range(self.repeat - 1):
40
+ new_obs, new_rew, new_terminations, new_truncations, new_infos = (
41
+ super().step(action)
42
+ )
43
+
44
+ if is_obs_dict:
45
+ self._update_dict_values(
46
+ from_dict=new_obs, to_dict=final_obs, not_dones=not_dones
47
+ )
48
+ else:
49
+ final_obs[not_dones] = new_obs[not_dones]
50
+
51
+ final_rew[not_dones] += new_rew[not_dones]
52
+ final_terminations[not_dones] = torch.logical_or(
53
+ final_terminations, new_terminations
54
+ )[not_dones]
55
+ final_truncations[not_dones] = torch.logical_or(
56
+ final_truncations, new_truncations
57
+ )[not_dones]
58
+ self._update_dict_values(
59
+ from_dict=new_infos, to_dict=infos, not_dones=not_dones
60
+ )
61
+
62
+ dones = torch.logical_or(final_terminations, final_truncations)
63
+ not_dones = ~dones
64
+
65
+ if dones.all():
66
+ break
67
+
68
+ return final_obs, final_rew, final_terminations, final_truncations, infos
69
+
70
+ def _update_dict_values(self, from_dict: dict, to_dict: dict, not_dones: Array):
71
+ """
72
+ Recursively updates the values of a dictionary with the values from another dictionary but only for the envs that are not done.
73
+ This allows us to update the observation and info dictionaries with new values only for the environments that are not done.
74
+ If a sub-env becomes done, its future step data will be discarded since not_dones will be false for this sub-environment.
75
+ Therefore the final observation/info will come from the true last step of the sub-env.
76
+ """
77
+ for k, v in from_dict.items():
78
+ if isinstance(v, dict):
79
+ self._update_dict_values(
80
+ from_dict=v, to_dict=to_dict[k], not_dones=not_dones
81
+ )
82
+ elif isinstance(v, Array):
83
+ to_dict[k][not_dones] = v[not_dones]
84
+ else:
85
+ to_dict[k] = v
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mani-skill-nightly
3
- Version: 2025.5.3.1612
3
+ Version: 2025.5.3.1619
4
4
  Summary: ManiSkill3: A Unified Benchmark for Generalizable Manipulation Skills
5
5
  Home-page: https://github.com/haosulab/ManiSkill
6
6
  Author: ManiSkill contributors
@@ -800,6 +800,7 @@ mani_skill/utils/visualization/jupyter_utils.py,sha256=dXXUQz-rFTOV_Xq5yA6YE6cXg
800
800
  mani_skill/utils/visualization/misc.py,sha256=KrDCef7F5GmGOdiBQ4qFUnmUTe-7-nNBz2DVBGFD8YU,5041
801
801
  mani_skill/utils/visualization/renderer.py,sha256=-Z18-fXe5NLBYBYXFB9m2EDKdhOkAdDVWSs9vjxGCSQ,1245
802
802
  mani_skill/utils/wrappers/__init__.py,sha256=f6HDHHoM8gyNgX5RwTr2u3oGlAeHqawRvVNQiWXEJfI,229
803
+ mani_skill/utils/wrappers/action_repeat.py,sha256=RhCtzt3fYCtD-CClIOhAzdycGwVTXP_FG61yEf-QLqY,3542
803
804
  mani_skill/utils/wrappers/flatten.py,sha256=GuHJ3fCOdj9G_jm--XgG8k0p2G1eJx4LY1tesQQjnkg,4913
804
805
  mani_skill/utils/wrappers/frame_stack.py,sha256=pCp83HqXnFxbsKRYgwXreNBHnhD-yF0R2_7jdtGOTWQ,4213
805
806
  mani_skill/utils/wrappers/gymnasium.py,sha256=p0kl29kkedD2arIvGskClKhYDBAH97mZO4rTepz62jQ,4174
@@ -810,8 +811,8 @@ mani_skill/vector/wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
810
811
  mani_skill/vector/wrappers/gymnasium.py,sha256=v1MDPIrVACBKCulrpdXBK2jDZQI7LKYFZgGgaCC5avY,7408
811
812
  mani_skill/vector/wrappers/sb3.py,sha256=SlXdiEPqcNHYMhJCzA29kBU6zK7DKTe1nc0L6Z3QQtY,4722
812
813
  mani_skill/viewer/__init__.py,sha256=srvDBsk4LQU75K2VIttrhiQ68p_ro7PSDqQRls2PY5c,1722
813
- mani_skill_nightly-2025.5.3.1612.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
814
- mani_skill_nightly-2025.5.3.1612.dist-info/METADATA,sha256=4nixw1uljNAUYQ0gC6Th21pC-pStemPbqA4SlftUBdU,9291
815
- mani_skill_nightly-2025.5.3.1612.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
816
- mani_skill_nightly-2025.5.3.1612.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
817
- mani_skill_nightly-2025.5.3.1612.dist-info/RECORD,,
814
+ mani_skill_nightly-2025.5.3.1619.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
815
+ mani_skill_nightly-2025.5.3.1619.dist-info/METADATA,sha256=_GtuLPVaaiZsUUCPLl9Twf3rCk5MAQnQkGuxVJ5hjsY,9291
816
+ mani_skill_nightly-2025.5.3.1619.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
817
+ mani_skill_nightly-2025.5.3.1619.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
818
+ mani_skill_nightly-2025.5.3.1619.dist-info/RECORD,,