mani-skill-nightly 2025.7.25.329__py3-none-any.whl → 2025.7.27.104__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mani-skill-nightly might be problematic. Click here for more details.

Files changed (30) hide show
  1. mani_skill/agents/__init__.py +3 -1
  2. mani_skill/envs/sapien_env.py +33 -16
  3. mani_skill/envs/tasks/drawing/__init__.py +3 -3
  4. mani_skill/envs/tasks/humanoid/__init__.py +1 -1
  5. mani_skill/trajectory/utils/__init__.py +0 -2
  6. mani_skill/utils/building/actors/__init__.py +11 -35
  7. mani_skill/utils/building/actors/common.py +30 -0
  8. mani_skill/utils/download_demo.py +0 -2
  9. mani_skill/utils/sapien_utils.py +44 -4
  10. mani_skill/utils/scene_builder/control/__init__.py +0 -0
  11. mani_skill/utils/scene_builder/control/planar/__init__.py +0 -0
  12. mani_skill/utils/scene_builder/robocasa/__init__.py +0 -0
  13. mani_skill/utils/structs/articulation.py +4 -2
  14. mani_skill/utils/tree.py +24 -0
  15. mani_skill/utils/wrappers/__init__.py +2 -1
  16. mani_skill/utils/wrappers/cached_reset.py +151 -0
  17. mani_skill/utils/wrappers/record.py +3 -4
  18. mani_skill/vector/wrappers/gymnasium.py +3 -4
  19. {mani_skill_nightly-2025.7.25.329.dist-info → mani_skill_nightly-2025.7.27.104.dist-info}/METADATA +2 -1
  20. {mani_skill_nightly-2025.7.25.329.dist-info → mani_skill_nightly-2025.7.27.104.dist-info}/RECORD +24 -25
  21. mani_skill/evaluation/__init__.py +0 -1
  22. mani_skill/evaluation/evaluator.py +0 -129
  23. mani_skill/evaluation/run_evaluation.py +0 -147
  24. mani_skill/evaluation/solution.py +0 -42
  25. mani_skill/utils/precompile_mpm.py +0 -13
  26. mani_skill/viewer/__init__.py +0 -46
  27. /mani_skill/{utils/scene_builder/robocasa/objects → envs/utils/system}/__init__.py +0 -0
  28. {mani_skill_nightly-2025.7.25.329.dist-info → mani_skill_nightly-2025.7.27.104.dist-info}/LICENSE +0 -0
  29. {mani_skill_nightly-2025.7.25.329.dist-info → mani_skill_nightly-2025.7.27.104.dist-info}/WHEEL +0 -0
  30. {mani_skill_nightly-2025.7.25.329.dist-info → mani_skill_nightly-2025.7.27.104.dist-info}/top_level.txt +0 -0
@@ -1 +1,3 @@
1
- from .registration import REGISTERED_AGENTS
1
+ from .base_agent import BaseAgent
2
+ from .multi_agent import MultiAgent
3
+ from .registration import REGISTERED_AGENTS, register_agent
@@ -16,9 +16,7 @@ from gymnasium.vector.utils import batch_space
16
16
 
17
17
  import mani_skill.render.utils as render_utils
18
18
  from mani_skill import logger
19
- from mani_skill.agents import REGISTERED_AGENTS
20
- from mani_skill.agents.base_agent import BaseAgent
21
- from mani_skill.agents.multi_agent import MultiAgent
19
+ from mani_skill.agents import REGISTERED_AGENTS, BaseAgent, MultiAgent
22
20
  from mani_skill.envs.scene import ManiSkillScene
23
21
  from mani_skill.envs.utils.observations import (
24
22
  parse_obs_mode_to_struct,
@@ -37,12 +35,11 @@ from mani_skill.sensors.camera import (
37
35
  update_camera_configs_from_dict,
38
36
  )
39
37
  from mani_skill.sensors.depth_camera import StereoDepthCamera, StereoDepthCameraConfig
40
- from mani_skill.utils import common, gym_utils, sapien_utils
38
+ from mani_skill.utils import common, gym_utils, sapien_utils, tree
41
39
  from mani_skill.utils.structs import Actor, Articulation
42
40
  from mani_skill.utils.structs.pose import Pose
43
41
  from mani_skill.utils.structs.types import Array, SimConfig
44
42
  from mani_skill.utils.visualization.misc import tile_images
45
- from mani_skill.viewer import create_viewer
46
43
 
47
44
 
48
45
  class BaseEnv(gym.Env):
@@ -316,6 +313,8 @@ class BaseEnv(gym.Env):
316
313
  self._elapsed_steps = (
317
314
  torch.zeros(self.num_envs, device=self.device, dtype=torch.int32)
318
315
  )
316
+ self._last_obs = None
317
+ """the last observation returned by the environment"""
319
318
  obs, _ = self.reset(seed=[2022 + i for i in range(self.num_envs)], options=dict(reconfigure=True))
320
319
 
321
320
  self._init_raw_obs = common.to_cpu_tensor(obs)
@@ -739,7 +738,7 @@ class BaseEnv(gym.Env):
739
738
  # for GPU sim, we have to setup sensors after we call setup gpu in order to enable loading mounted sensors as they depend on GPU buffer data
740
739
  if self.scene.can_render(): self._setup_sensors(options)
741
740
  if self.render_mode == "human" and self._viewer is None:
742
- self._viewer = create_viewer(self._viewer_camera_config)
741
+ self._viewer = sapien_utils.create_viewer(self._viewer_camera_config)
743
742
  if self._viewer is not None:
744
743
  self._setup_viewer()
745
744
  self._reconfig_counter = self.reconfiguration_freq
@@ -850,7 +849,11 @@ class BaseEnv(gym.Env):
850
849
  options["reconfigure"] is True, will call self._reconfigure() which deletes the entire physx scene and reconstructs everything.
851
850
  Users building custom tasks generally do not need to override this function.
852
851
 
853
- Returns the first observation and a info dictionary. The info dictionary is of type
852
+ If options["reset_to_env_states"] is given, we expect there to be options["reset_to_env_states"]["env_states"] and optionally options["reset_to_env_states"]["obs"], both with
853
+ batch size equal to the number of environments being reset. "env_states" can be a dictionary or flat tensor and we skip calling the environment's _initialize_episode function which
854
+ generates the initial state on a normal reset. If "obs" is given we skip calling the environment's get_obs function which can save some compute/time.
855
+
856
+ Returns the observations and an info dictionary. The info dictionary is of type
854
857
 
855
858
 
856
859
  .. highlight:: python
@@ -917,12 +920,22 @@ class BaseEnv(gym.Env):
917
920
  if self.agent is not None:
918
921
  self.agent.reset()
919
922
 
920
- if seed is not None or self._enhanced_determinism:
921
- with torch.random.fork_rng():
922
- torch.manual_seed(self._episode_seed[0])
923
- self._initialize_episode(env_idx, options)
923
+ # we either reset to given env states or use the environment's defined _initialize_episode function to generate the initial state
924
+ reset_to_env_states_obs = None
925
+ if "reset_to_env_states" in options:
926
+ env_states = options["reset_to_env_states"]["env_states"]
927
+ reset_to_env_states_obs = options["reset_to_env_states"].get("obs", None)
928
+ if isinstance(env_states, dict):
929
+ self.set_state_dict(env_states, env_idx)
930
+ else:
931
+ self.set_state(env_states, env_idx)
924
932
  else:
925
- self._initialize_episode(env_idx, options)
933
+ if seed is not None or self._enhanced_determinism:
934
+ with torch.random.fork_rng():
935
+ torch.manual_seed(self._episode_seed[0])
936
+ self._initialize_episode(env_idx, options)
937
+ else:
938
+ self._initialize_episode(env_idx, options)
926
939
  # reset the reset mask back to all ones so any internal code in maniskill can continue to manipulate all scenes at once as usual
927
940
  self.scene._reset_mask = torch.ones(
928
941
  self.num_envs, dtype=bool, device=self.device
@@ -942,9 +955,13 @@ class BaseEnv(gym.Env):
942
955
  self.agent.controller.reset()
943
956
 
944
957
  info = self.get_info()
945
- obs = self.get_obs(info)
946
-
958
+ if reset_to_env_states_obs is None:
959
+ obs = self.get_obs(info)
960
+ else:
961
+ obs = self._last_obs
962
+ tree.replace(obs, env_idx, common.to_tensor(reset_to_env_states_obs, device=self.device))
947
963
  info["reconfigure"] = reconfigure
964
+ self._last_obs = obs
948
965
  return obs, info
949
966
 
950
967
  def _set_main_rng(self, seed):
@@ -1031,7 +1048,7 @@ class BaseEnv(gym.Env):
1031
1048
  terminated = info["fail"].clone()
1032
1049
  else:
1033
1050
  terminated = torch.zeros(self.num_envs, dtype=bool, device=self.device)
1034
-
1051
+ self._last_obs = obs
1035
1052
  return (
1036
1053
  obs,
1037
1054
  reward,
@@ -1324,7 +1341,7 @@ class BaseEnv(gym.Env):
1324
1341
  for obj in self._hidden_objects:
1325
1342
  obj.show_visual()
1326
1343
  if self._viewer is None:
1327
- self._viewer = create_viewer(self._viewer_camera_config)
1344
+ self._viewer = sapien_utils.create_viewer(self._viewer_camera_config)
1328
1345
  self._setup_viewer()
1329
1346
  if self.gpu_sim_enabled and self.scene._gpu_sim_initialized:
1330
1347
  self.scene.px.sync_poses_gpu_to_cpu()
@@ -1,3 +1,3 @@
1
- from .draw import *
2
- from .draw_triangle import *
3
- from .draw_svg import *
1
+ from .draw import TableTopFreeDrawEnv
2
+ from .draw_svg import DrawSVGEnv
3
+ from .draw_triangle import DrawTriangleEnv
@@ -1,3 +1,3 @@
1
- from .humanoid_pick_place import *
1
+ from .humanoid_pick_place import HumanoidPlaceAppleInBowl
2
2
  from .humanoid_stand import HumanoidStandEnv
3
3
  from .transport_box import TransportBoxEnv
@@ -6,8 +6,6 @@ Utils for working with ManiSkill trajectory files
6
6
  import h5py
7
7
  import numpy as np
8
8
 
9
- from mani_skill.utils.structs.types import Array
10
-
11
9
 
12
10
  def _get_dict_len(x):
13
11
  if isinstance(x, dict) or isinstance(x, h5py.Group):
@@ -1,35 +1,11 @@
1
- from mani_skill.envs.scene import ManiSkillScene
2
- from mani_skill.utils.building.actor_builder import ActorBuilder
3
-
4
- from .common import *
5
-
6
-
7
- def get_actor_builder(
8
- scene: ManiSkillScene, id: str, add_collision: bool = True, add_visual: bool = True
9
- ) -> ActorBuilder:
10
- """Builds an actor or returns an actor builder given an ID specifying which dataset/source and then the actor ID
11
-
12
- Currently these IDs are hardcoded for a few datasets. The new Shapedex platform for hosting and managing all assets will be
13
- integrated in the future
14
-
15
- Args:
16
- scene: The ManiSkillScene. If building a custom task this is generally just self.scene
17
- id (str): The unique ID identifying the dataset and the ID of the actor in that dataset to build. The format should be
18
- "<dataset_id>:<actor_id_in_dataset>"
19
- add_collision (bool): Whether to include the collision shapes/meshes
20
- add_visual (bool): Whether to include visual shapes/meshes
21
- """
22
- splits = id.split(":")
23
- dataset_source = splits[0]
24
- actor_id = ":".join(splits[1:])
25
-
26
- if dataset_source == "ycb":
27
- from mani_skill.utils.building.actors.ycb import get_ycb_builder
28
-
29
- builder = get_ycb_builder(
30
- scene=scene, id=actor_id, add_collision=add_collision, add_visual=add_visual
31
- )
32
- else:
33
- raise RuntimeError(f"No dataset with id {dataset_source} was found")
34
-
35
- return builder
1
+ from .common import (
2
+ build_box,
3
+ build_colorful_cube,
4
+ build_cube,
5
+ build_cylinder,
6
+ build_fourcolor_peg,
7
+ build_red_white_target,
8
+ build_sphere,
9
+ build_twocolor_peg,
10
+ get_actor_builder,
11
+ )
@@ -14,6 +14,36 @@ from mani_skill.utils.structs.pose import Pose
14
14
  from mani_skill.utils.structs.types import Array
15
15
 
16
16
 
17
+ def get_actor_builder(
18
+ scene: ManiSkillScene, id: str, add_collision: bool = True, add_visual: bool = True
19
+ ) -> ActorBuilder:
20
+ """Returns an :py:class:`~mani_skill.utils.building.actor_builder.ActorBuilder` given an ID specifying which dataset/source and then the ID of the asset.
21
+
22
+ Currently these IDs are hardcoded for a few datasets. We may add more actor datasets in the future for easy loading by users
23
+
24
+ Args:
25
+ scene: The ManiSkillScene. If building a custom task this is generally just self.scene
26
+ id (str): The unique ID identifying the dataset and the ID of the actor in that dataset to build. The format should be
27
+ "<dataset_id>:<actor_id_in_dataset>"
28
+ add_collision (bool): Whether to include the collision shapes/meshes
29
+ add_visual (bool): Whether to include visual shapes/meshes
30
+ """
31
+ splits = id.split(":")
32
+ dataset_source = splits[0]
33
+ actor_id = ":".join(splits[1:])
34
+
35
+ if dataset_source == "ycb":
36
+ from mani_skill.utils.building.actors.ycb import get_ycb_builder
37
+
38
+ builder = get_ycb_builder(
39
+ scene=scene, id=actor_id, add_collision=add_collision, add_visual=add_visual
40
+ )
41
+ else:
42
+ raise RuntimeError(f"No dataset with id {dataset_source} was found")
43
+
44
+ return builder
45
+
46
+
17
47
  def _build_by_type(
18
48
  builder: ActorBuilder,
19
49
  name,
@@ -46,8 +46,6 @@ for env_id in [
46
46
  raw_dataset_url=f"https://huggingface.co/datasets/haosulab/ManiSkill_Demonstrations/resolve/main/demos/{env_id}.zip?download=true"
47
47
  )
48
48
 
49
- pbar = None
50
-
51
49
 
52
50
  def tqdmhook(t):
53
51
  last_b = [0]
@@ -3,6 +3,7 @@ Utilities that work with the simulation / SAPIEN
3
3
  """
4
4
  from __future__ import annotations
5
5
 
6
+ import sys
6
7
  from typing import TYPE_CHECKING, Dict, List, Tuple, TypeVar
7
8
 
8
9
  import numpy as np
@@ -10,15 +11,16 @@ import sapien
10
11
  import sapien.physx as physx
11
12
  import sapien.render
12
13
  import sapien.wrapper.urdf_loader
14
+ import torch
15
+ from sapien.utils import Viewer
13
16
 
17
+ from mani_skill.render import SAPIEN_RENDER_SYSTEM
14
18
  from mani_skill.utils.geometry.rotation_conversions import matrix_to_quaternion
15
19
  from mani_skill.utils.structs.pose import Pose
16
20
 
17
21
  if TYPE_CHECKING:
18
22
  from mani_skill.utils.structs.actor import Actor
19
- from mani_skill.envs.scene import ManiSkillScene
20
-
21
- import torch
23
+ from mani_skill.sensors.camera import CameraConfig
22
24
 
23
25
  from mani_skill.utils.structs.types import Array, Device
24
26
 
@@ -115,7 +117,6 @@ def parse_urdf_config(config_dict: dict) -> Dict:
115
117
 
116
118
  Args:
117
119
  config_dict (dict): a dict containing link physical properties.
118
- scene (ManiSkillScene): the simulation scene
119
120
 
120
121
  Returns:
121
122
  Dict: urdf config passed to `sapien.URDFLoader.load`.
@@ -436,3 +437,42 @@ def is_state_dict_consistent(state_dict: dict):
436
437
  if v.shape[0] != batch_size:
437
438
  return False
438
439
  return True
440
+
441
+
442
+ def create_viewer(viewer_camera_config: CameraConfig):
443
+ """Creates a viewer with the given camera config"""
444
+ if SAPIEN_RENDER_SYSTEM == "3.0":
445
+ sapien.render.set_viewer_shader_dir(
446
+ viewer_camera_config.shader_config.shader_pack
447
+ )
448
+ if viewer_camera_config.shader_config.shader_pack[:2] == "rt":
449
+ sapien.render.set_ray_tracing_denoiser(
450
+ viewer_camera_config.shader_config.shader_pack_config[
451
+ "ray_tracing_denoiser"
452
+ ]
453
+ )
454
+ sapien.render.set_ray_tracing_path_depth(
455
+ viewer_camera_config.shader_config.shader_pack_config[
456
+ "ray_tracing_path_depth"
457
+ ]
458
+ )
459
+ sapien.render.set_ray_tracing_samples_per_pixel(
460
+ viewer_camera_config.shader_config.shader_pack_config[
461
+ "ray_tracing_samples_per_pixel"
462
+ ]
463
+ )
464
+ viewer = Viewer(
465
+ resolutions=(viewer_camera_config.width, viewer_camera_config.height)
466
+ )
467
+ if sys.platform == "darwin": # macOS
468
+ viewer.window.set_content_scale(1)
469
+ elif SAPIEN_RENDER_SYSTEM == "3.1":
470
+ # TODO (stao): figure out how shader pack configs can be set at run time
471
+ viewer = Viewer(
472
+ resolutions=(viewer_camera_config.width, viewer_camera_config.height),
473
+ shader_pack=sapien.render.get_shader_pack(
474
+ viewer_camera_config.shader_config.shader_pack
475
+ ),
476
+ )
477
+
478
+ return viewer
File without changes
File without changes
@@ -888,7 +888,7 @@ class Articulation(BaseStruct[physx.PhysxArticulation]):
888
888
  else:
889
889
  gx, gy = self.get_joint_target_indices(joint_indices)
890
890
  self.px.cuda_articulation_target_qpos.torch()[
891
- gx[self.scene._reset_mask], gy[self.scene._reset_mask]
891
+ gx[self.scene._reset_mask[self._scene_idxs]], gy[self.scene._reset_mask[self._scene_idxs]]
892
892
  ] = targets
893
893
  else:
894
894
  for i, joint in enumerate(joints):
@@ -911,7 +911,9 @@ class Articulation(BaseStruct[physx.PhysxArticulation]):
911
911
  gx, gy = self.get_joint_target_indices(joints)
912
912
  else:
913
913
  gx, gy = self.get_joint_target_indices(joint_indices)
914
- self.px.cuda_articulation_target_qvel.torch()[gx, gy] = targets
914
+ self.px.cuda_articulation_target_qvel.torch()[
915
+ gx[self.scene._reset_mask[self._scene_idxs]], gy[self.scene._reset_mask[self._scene_idxs]]
916
+ ] = targets
915
917
  else:
916
918
  for i, joint in enumerate(joints):
917
919
  joint.set_drive_velocity_target(targets[0, i])
@@ -0,0 +1,24 @@
1
+ import torch
2
+
3
+
4
+ # NOTE (stao): when tensordict is used we should replace all of this
5
+ def slice(x, i):
6
+ if isinstance(x, dict):
7
+ return {k: slice(v, i) for k, v in x.items()}
8
+ else:
9
+ return x[i]
10
+
11
+
12
+ def cat(x: list):
13
+ if isinstance(x[0], dict):
14
+ return {k: cat([d[k] for d in x]) for k in x[0].keys()}
15
+ else:
16
+ return torch.cat(x, dim=0)
17
+
18
+
19
+ def replace(x, i, y):
20
+ if isinstance(x, dict):
21
+ for k, v in x.items():
22
+ replace(v, i, y[k])
23
+ else:
24
+ x[i] = y
@@ -1,3 +1,5 @@
1
+ from .action_repeat import ActionRepeatWrapper
2
+ from .cached_reset import CachedResetWrapper
1
3
  from .flatten import (
2
4
  FlattenActionSpaceWrapper,
3
5
  FlattenObservationWrapper,
@@ -6,4 +8,3 @@ from .flatten import (
6
8
  from .frame_stack import FrameStack
7
9
  from .gymnasium import CPUGymWrapper
8
10
  from .record import RecordEpisode
9
- from .action_repeat import ActionRepeatWrapper
@@ -0,0 +1,151 @@
1
+ from dataclasses import asdict, dataclass
2
+ from typing import List, Optional, Union
3
+
4
+ import dacite
5
+ import gymnasium as gym
6
+ import torch
7
+
8
+ from mani_skill.envs.sapien_env import BaseEnv
9
+ from mani_skill.utils import common, tree
10
+ from mani_skill.utils.structs.types import Device
11
+
12
+
13
+ @dataclass
14
+ class CachedResetsConfig:
15
+ num_resets: Optional[int] = None
16
+ """The number of reset states to cache. If none it will cache `num_envs` number of reset states."""
17
+ device: Optional[Device] = None
18
+ """The device to cache the reset states on. If none it will use the base environment's device."""
19
+ seed: Optional[int] = None
20
+ """The seed to use for generating the cached reset states."""
21
+
22
+ def dict(self):
23
+ return {k: v for k, v in asdict(self).items()}
24
+
25
+
26
+ class CachedResetWrapper(gym.Wrapper):
27
+ """
28
+ Cached reset wrapper for ManiSkill3 environments. Caching resets allows you to skip slower parts of the reset function call and boost environment FPS as a result.
29
+
30
+ Args:
31
+ env: The environment to wrap.
32
+ reset_to_env_states: A dictionary with keys "env_states" and optionally "obs". "env_states" is a dictionary of environment states to reset to.
33
+ "obs" contains the corresponding observations generated at those env states. If reset_to_env_states is not provided, the wrapper will sample reset states
34
+ from the environment using the given seed.
35
+ config: A dictionary or a `CachedResetsConfig` object that contains the configuration for the cached resets.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ env: gym.Env,
41
+ reset_to_env_states: Optional[dict] = None,
42
+ config: Union[CachedResetsConfig, dict] = CachedResetsConfig(),
43
+ ):
44
+ super().__init__(env)
45
+ self.num_envs = self.base_env.num_envs
46
+ if isinstance(config, CachedResetsConfig):
47
+ config = config.dict()
48
+ self.cached_resets_config = dacite.from_dict(
49
+ data_class=CachedResetsConfig,
50
+ data=config,
51
+ config=dacite.Config(strict=True),
52
+ )
53
+ cached_data_device = self.cached_resets_config.device
54
+ if cached_data_device is None:
55
+ cached_data_device = self.base_env.device
56
+ self._num_cached_resets = 0
57
+ if reset_to_env_states is not None:
58
+ self._cached_resets_env_states = reset_to_env_states["env_states"]
59
+ self._cached_resets_obs_buffer = reset_to_env_states.get("obs", None)
60
+ self._num_cached_resets = len(self._cached_resets_env_states)
61
+ else:
62
+ if self.cached_resets_config.num_resets is None:
63
+ self.cached_resets_config.num_resets = 16384
64
+ self._cached_resets_env_states = []
65
+ self._cached_resets_obs_buffer = []
66
+ while self._num_cached_resets < self.cached_resets_config.num_resets:
67
+ obs, _ = self.env.reset(
68
+ seed=self.cached_resets_config.seed,
69
+ options=dict(
70
+ env_idx=torch.arange(
71
+ 0,
72
+ min(
73
+ self.cached_resets_config.num_resets
74
+ - self._num_cached_resets,
75
+ self.num_envs,
76
+ ),
77
+ device=self.base_env.device,
78
+ )
79
+ ),
80
+ )
81
+ state = self.env.get_wrapper_attr("get_state_dict")()
82
+ if (
83
+ self.cached_resets_config.num_resets - self._num_cached_resets
84
+ < self.num_envs
85
+ ):
86
+ obs = tree.slice(
87
+ obs,
88
+ slice(
89
+ 0,
90
+ self.cached_resets_config.num_resets
91
+ - self._num_cached_resets,
92
+ ),
93
+ )
94
+ state = tree.slice(
95
+ state,
96
+ slice(
97
+ 0,
98
+ self.cached_resets_config.num_resets
99
+ - self._num_cached_resets,
100
+ ),
101
+ )
102
+ self._cached_resets_obs_buffer.append(
103
+ common.to_tensor(obs, device=self.cached_resets_config.device)
104
+ )
105
+ self._cached_resets_env_states.append(
106
+ common.to_tensor(state, device=self.cached_resets_config.device)
107
+ )
108
+ self._num_cached_resets += self.num_envs
109
+ self._cached_resets_env_states = tree.cat(self._cached_resets_env_states)
110
+ self._cached_resets_obs_buffer = tree.cat(self._cached_resets_obs_buffer)
111
+
112
+ self._cached_resets_env_states = common.to_tensor(
113
+ self._cached_resets_env_states, device=cached_data_device
114
+ )
115
+ if self._cached_resets_obs_buffer is not None:
116
+ self._cached_resets_obs_buffer = common.to_tensor(
117
+ self._cached_resets_obs_buffer, device=cached_data_device
118
+ )
119
+
120
+ @property
121
+ def base_env(self) -> BaseEnv:
122
+ return self.env.unwrapped
123
+
124
+ def reset(
125
+ self,
126
+ *args,
127
+ seed: Optional[Union[int, List[int]]] = None,
128
+ options: Optional[dict] = None,
129
+ **kwargs
130
+ ):
131
+ env_idx = None
132
+ if options is None:
133
+ options = dict()
134
+ if "env_idx" in options:
135
+ env_idx = options["env_idx"]
136
+ if self._cached_resets_env_states is not None:
137
+ sampled_ids = torch.randint(
138
+ 0,
139
+ self._num_cached_resets,
140
+ size=(len(env_idx) if env_idx is not None else self.num_envs,),
141
+ device=self.base_env.device,
142
+ )
143
+ options["reset_to_env_states"] = dict(
144
+ env_states=tree.slice(self._cached_resets_env_states, sampled_ids),
145
+ )
146
+ if self._cached_resets_obs_buffer is not None:
147
+ options["reset_to_env_states"]["obs"] = tree.slice(
148
+ self._cached_resets_obs_buffer, sampled_ids
149
+ )
150
+ obs, info = self.env.reset(seed=seed, options=options)
151
+ return obs, info
@@ -357,16 +357,15 @@ class RecordEpisode(gym.Wrapper):
357
357
  self,
358
358
  *args,
359
359
  seed: Optional[Union[int, List[int]]] = None,
360
- options: Optional[dict] = dict(),
360
+ options: Optional[dict] = None,
361
361
  **kwargs,
362
362
  ):
363
-
364
363
  if self.save_on_reset:
365
364
  if self.save_video and self.num_envs == 1:
366
365
  self.flush_video()
367
366
  # if doing a full reset then we flush all trajectories including incompleted ones
368
367
  if self._trajectory_buffer is not None:
369
- if "env_idx" not in options:
368
+ if options is None or "env_idx" not in options:
370
369
  self.flush_trajectory(env_idxs_to_flush=np.arange(self.num_envs))
371
370
  else:
372
371
  self.flush_trajectory(
@@ -415,7 +414,7 @@ class RecordEpisode(gym.Wrapper):
415
414
  if self.record_env_state:
416
415
  first_step.state = common.to_numpy(common.batch(state_dict))
417
416
  env_idx = np.arange(self.num_envs)
418
- if "env_idx" in options:
417
+ if options is not None and "env_idx" in options:
419
418
  env_idx = common.to_numpy(options["env_idx"])
420
419
  if self._trajectory_buffer is None:
421
420
  # Initialize trajectory buffer on the first episode based on given observation (which should be generated after all wrappers)
@@ -11,6 +11,7 @@ from mani_skill.utils.structs.types import Array
11
11
 
12
12
  if TYPE_CHECKING:
13
13
  from gymnasium import Env
14
+
14
15
  from mani_skill.envs.sapien_env import BaseEnv
15
16
 
16
17
 
@@ -18,8 +19,6 @@ class ManiSkillVectorEnv(VectorEnv):
18
19
  """
19
20
  Gymnasium Vector Env implementation for ManiSkill environments running on the GPU for parallel simulation and optionally parallel rendering
20
21
 
21
- Note that currently this also assumes modeling tasks as infinite horizon (e.g. terminations is always False, only reset when timelimit is reached)
22
-
23
22
  Args:
24
23
  env: The environment created via gym.make / after wrappers are applied. If a string is given, we use gym.make(env) to create an environment
25
24
  num_envs: The number of parallel environments. This is only used if the env argument is a string
@@ -89,10 +88,10 @@ class ManiSkillVectorEnv(VectorEnv):
89
88
  self,
90
89
  *,
91
90
  seed: Optional[Union[int, List[int]]] = None,
92
- options: Optional[dict] = dict(),
91
+ options: Optional[dict] = None,
93
92
  ):
94
93
  obs, info = self._env.reset(seed=seed, options=options)
95
- if "env_idx" in options:
94
+ if options is not None and "env_idx" in options:
96
95
  env_idx = options["env_idx"]
97
96
  mask = torch.zeros(self.num_envs, dtype=bool, device=self.base_env.device)
98
97
  mask[env_idx] = True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mani-skill-nightly
3
- Version: 2025.7.25.329
3
+ Version: 2025.7.27.104
4
4
  Summary: ManiSkill3: A Unified Benchmark for Generalizable Manipulation Skills
5
5
  Home-page: https://github.com/haosulab/ManiSkill
6
6
  Author: ManiSkill contributors
@@ -52,6 +52,7 @@ Requires-Dist: sphinx-subfigure; extra == "docs"
52
52
  Requires-Dist: sphinxcontrib-video; extra == "docs"
53
53
  Requires-Dist: sphinx-togglebutton; extra == "docs"
54
54
  Requires-Dist: sphinx-design; extra == "docs"
55
+ Requires-Dist: sphinx-autoapi; extra == "docs"
55
56
 
56
57
  # ManiSkill 3 (Beta)
57
58
 
@@ -1,5 +1,5 @@
1
1
  mani_skill/__init__.py,sha256=_wZjMWSlWZYeAAEjiHAHa5W6uq7Eh4fbny8HwlYSJhQ,2101
2
- mani_skill/agents/__init__.py,sha256=6u7nUfWDqWmD_ieNhECfhV6mIyf4SmMdumexE2lRlsU,44
2
+ mani_skill/agents/__init__.py,sha256=87Xa-eaPbbVm6LmD1111yXSBuRkcoTG0g_5RdkaFiGI,130
3
3
  mani_skill/agents/base_agent.py,sha256=pnaE1VWJSpdzlsFPmcG5VVLLX9sWtPnl1ZM3ZIy2kXM,18400
4
4
  mani_skill/agents/base_real_agent.py,sha256=DD9SXZa7er5zF7wdm97IO_0W4xbdw-66iLC8j86ykYw,8549
5
5
  mani_skill/agents/multi_agent.py,sha256=AFygr2797M5Hhk4qMoLuFmFk7msqnp82bFuSyP1j8JA,3341
@@ -539,7 +539,7 @@ mani_skill/assets/robots/xarm7/meshes/visual/link7.glb,sha256=aZatACOv20VJbi2tOE
539
539
  mani_skill/assets/robots/xarm7/meshes/visual/link_base.glb,sha256=vcy2lN1V72jIsSDRT0ZKVskR_0pVOXtDvBkxO2GENWs,467668
540
540
  mani_skill/envs/__init__.py,sha256=YPlttBErTcf9vSnkZ54EQ8vTABSfFFrBdUY0AkF4vmg,43
541
541
  mani_skill/envs/minimal_template.py,sha256=9THHWA1vkHatptc9g5Ojh-UBUKWQmLHVeq4fcaqv2aY,2200
542
- mani_skill/envs/sapien_env.py,sha256=VFP2hMAfaocOnOKl0xpuy95dJV_LhJBTqcNceBB2z-0,73858
542
+ mani_skill/envs/sapien_env.py,sha256=jjA3wjqjTeB7C4tUKZEE5vSAVf1qPcYw5ARD3ItybfA,75226
543
543
  mani_skill/envs/scene.py,sha256=4ZAIJs61fwPPhfDvc3v845sj_Ftsd1sSYaju10KnXbQ,48465
544
544
  mani_skill/envs/sim2real_env.py,sha256=3mkQX4TonE2pUC5_Atmx0IYDH2_v6GSwOPJvQMEvCNY,19214
545
545
  mani_skill/envs/template.py,sha256=0wnwKjnGOF7RvTR5Gz4VopaUiFxnIioXwmb4nPVxAs8,11939
@@ -570,7 +570,7 @@ mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/base_env.py,sha256=L7eQ8
570
570
  mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/put_on_in_scene.py,sha256=kDKEX-e1Hq5kZiBs9xfEzU2jjx35eHWriAAYFJcBbgE,9419
571
571
  mani_skill/envs/tasks/digital_twins/so100_arm/__init__.py,sha256=uehHSCaHoZDB9awMtF81r4A-X4fM6NFmErzOsgzgSs4,42
572
572
  mani_skill/envs/tasks/digital_twins/so100_arm/grasp_cube.py,sha256=Uv2wTMUyfh8ygG4g6WfPj5E8jaDUyBZ789lPPCRBJjM,22386
573
- mani_skill/envs/tasks/drawing/__init__.py,sha256=eZ7CnzIWBOHUihvOKStSQK94eKSv5wchRdSodiDYBlw,72
573
+ mani_skill/envs/tasks/drawing/__init__.py,sha256=b2HaUu5UwcWHetjcn6hKlKFokUMd1ZGIljOg-kPeo54,114
574
574
  mani_skill/envs/tasks/drawing/draw.py,sha256=WlhxJjt0-DlkxC3t-o0M8BoOwdwWpM9reupFg5OqiZc,8146
575
575
  mani_skill/envs/tasks/drawing/draw_svg.py,sha256=UKnVl_tMfUNHmOg24Ny-wFynb5CaZC0uIHvW9sBxbyo,16206
576
576
  mani_skill/envs/tasks/drawing/draw_triangle.py,sha256=aOSc37tHZrU_96W_Pj0mNpxOTopHyMvCVps-gihE0qc,15528
@@ -585,7 +585,7 @@ mani_skill/envs/tasks/fmb/assets/purple_u.glb,sha256=lQZAv2qipX0QpYLyQuVWNKUFs8n
585
585
  mani_skill/envs/tasks/fmb/assets/purple_u.ply,sha256=KwY2eUKF5Ci5cQ9aYUVAulO3RyRZ5AxhZZcbsaZoXTE,807
586
586
  mani_skill/envs/tasks/fmb/assets/reorienting_fixture.glb,sha256=n8_3KXNqa--hmCLhaK2d5kwM0ujTnt0hD5HLVC1_0xs,8764
587
587
  mani_skill/envs/tasks/fmb/assets/yellow_peg.glb,sha256=QgMb8lGcpQh_-BLweRhYZ5gI3V4LjueZvZm3yVBTacE,1700
588
- mani_skill/envs/tasks/humanoid/__init__.py,sha256=sU7DtnY15uZLrthoGFo3KOFvISjkIhobzByI7szdv9I,123
588
+ mani_skill/envs/tasks/humanoid/__init__.py,sha256=18pnVy_csZjYmV2IbvMTGg3UjiTxI9anm1bDAnM2hmo,146
589
589
  mani_skill/envs/tasks/humanoid/humanoid_pick_place.py,sha256=k8OyBB2wnXuFHrBgp-YMIb5TDtBHIgZHkimRckMv-eo,10692
590
590
  mani_skill/envs/tasks/humanoid/humanoid_stand.py,sha256=azpTPczHIiyECYH672qzp2RtcAFCsuUjxF2o5gMbK2U,4808
591
591
  mani_skill/envs/tasks/humanoid/transport_box.py,sha256=w9E41h1h7gLKx9PE0a6vtXkSo_9GgYnr03hChdpFD2w,12260
@@ -637,11 +637,8 @@ mani_skill/envs/utils/randomization/pose.py,sha256=9PPg-QMorHVe3fV4e3T-BRYu0E_8I
637
637
  mani_skill/envs/utils/randomization/samplers.py,sha256=EOkF18mmDC7fA2hgj8QC2Ag0gnf4H-4MIjOCDvTMpCE,3665
638
638
  mani_skill/envs/utils/rewards/__init__.py,sha256=66MV5YCbnpF4ac_SvTVJ000RxM1AIsclX7OatiA-Wak,22
639
639
  mani_skill/envs/utils/rewards/common.py,sha256=1lfJNWG3x7UjarHLteLXa8DCzbe_L7nYBMOBo5D9CRQ,3647
640
+ mani_skill/envs/utils/system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
640
641
  mani_skill/envs/utils/system/backend.py,sha256=9xk-ef8779Cj27THXkVM-sxEMQWH8bUrWiwgaf-MKCo,3309
641
- mani_skill/evaluation/__init__.py,sha256=PCMN6dc9zgFKuXTPmkAUEahP3XG65cKRLGMXrk5IeXY,33
642
- mani_skill/evaluation/evaluator.py,sha256=1EN6qAiGx3taB4vCeArUp33UZjLvBt1Ke6iUW8Z8aV0,4493
643
- mani_skill/evaluation/run_evaluation.py,sha256=yorphrlJKEGycHfQS8equnJHRsyjDuv77ZGNpg9wvCs,4780
644
- mani_skill/evaluation/solution.py,sha256=e_Aa0f4sSQ56KXL7tVDPUKf7WTjcuFc5X4J76p884Zs,1269
645
642
  mani_skill/examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
646
643
  mani_skill/examples/demo_manual_control.py,sha256=Z17ER37oehS8VgtDO_4dwiy5jDgL93nT9IdCsNDf0Es,8275
647
644
  mani_skill/examples/demo_manual_control_continuous.py,sha256=tnCnKX2v1iIhtXwvWR2NzXgpf3e0y2-qAO91jJBLIO0,9679
@@ -706,20 +703,20 @@ mani_skill/trajectory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
706
703
  mani_skill/trajectory/dataset.py,sha256=nrG3jkhdzRqAdjxC_c8Z4FxpkvW3A9XPvUp9-Ux_u38,6351
707
704
  mani_skill/trajectory/merge_trajectory.py,sha256=zsjRMTsiIirZGIV4KrtYOM2-zoOAzd7ObZEdWGJzZbE,3685
708
705
  mani_skill/trajectory/replay_trajectory.py,sha256=ABiM4pMSkTAhU1L2fdaY-Mwnw2Hzg8p1rAaWf3ijWOE,27681
709
- mani_skill/trajectory/utils/__init__.py,sha256=Nchv09IpXv0FOgpf7Ng1Ekus6ZfAh3kI0KJs-79QOig,1515
706
+ mani_skill/trajectory/utils/__init__.py,sha256=-Efv2GEzTnFHd3SxqQtaZLaMRGrCc-P1ClmgLhoV4gs,1465
710
707
  mani_skill/trajectory/utils/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
711
708
  mani_skill/trajectory/utils/actions/conversion.py,sha256=x88C64ke44gB-HEbqq_gSRFv34L7irSwT_wYttkQUn8,12922
712
709
  mani_skill/utils/README.md,sha256=A2UG1u5-4LLi-Fkc4lCcEBjSHwxY0xst2Xtvyw4q-0c,1519
713
710
  mani_skill/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
714
711
  mani_skill/utils/common.py,sha256=PYzgQRpTFnqKj2brD0mfGuOICLPsruEDXqs9D7u8Trg,13118
715
712
  mani_skill/utils/download_asset.py,sha256=vP7dsho4Fc-b-Ydm9vj6opFmORa-2Svd69w1izuk5go,8060
716
- mani_skill/utils/download_demo.py,sha256=ndnnc1TfyuLVb-odrA50HvuZemXZf-zZlqyFTinx-mA,4467
713
+ mani_skill/utils/download_demo.py,sha256=Hu-JSkxQpsoLhxZeM-m2NIeDiBUA2YMoA6cJH0HrVaE,4454
717
714
  mani_skill/utils/gym_utils.py,sha256=rAYNyxJtF1bSghi_EDPlqgnYZQwG0RklobgaPztSEvw,5922
718
715
  mani_skill/utils/io_utils.py,sha256=VJ93qoHxzFWIljAp9043pJqETmEGmxzt_DoYtE5Hd_I,1615
719
716
  mani_skill/utils/logging_utils.py,sha256=Iomnw2VrJ56A6kvyIm9Zhgp94Qf-WOf9WuMAGNB-2fc,1765
720
- mani_skill/utils/precompile_mpm.py,sha256=D497m_0mcNifeBEV7wmG5gHU_SstNpOoHIb-OQiqvUA,291
721
717
  mani_skill/utils/registration.py,sha256=u8ftfGvQP4qzlKNqLQjGi3cRF_-h6Rz-28xbLkW_YJ4,9718
722
- mani_skill/utils/sapien_utils.py,sha256=QMV0jRZO51KzIMB5CVW_Ne-4fPw0-mqM4a3yhNZaMYo,16430
718
+ mani_skill/utils/sapien_utils.py,sha256=bUo4jFvWbxjJxXY-cjcR7Q2ZAhAcArlPaElYI2f3RUI,18025
719
+ mani_skill/utils/tree.py,sha256=jFyqCqVkarR6AaTDissVo33nOfbPfkZp8CsqEQmCV9Q,515
723
720
  mani_skill/utils/assets/README.md,sha256=5kkmsIiV64ySEGO34HaAlpjXTyrGs1KTV5WnofK46G0,70
724
721
  mani_skill/utils/assets/__init__.py,sha256=gQVKwAczcImTXArSltBWKlSUUuguO12sZYO3Jh5KLso,159
725
722
  mani_skill/utils/assets/data.py,sha256=xEuibRoEPBDN_vEU-MM5UWf6VDb1omE6BfZKPvlMPdI,8807
@@ -730,8 +727,8 @@ mani_skill/utils/building/articulation_builder.py,sha256=ubRJYjINo7XTQ9IfE45Ie3C
730
727
  mani_skill/utils/building/ground.py,sha256=YaVt9xQfsqG0GRjKWe0o1e7pdPzmN-PN8FQOdzdargU,4365
731
728
  mani_skill/utils/building/mjcf_loader.py,sha256=FY--8z4JWjDHCM6XYLSC8W14MHVywV0SzOTiHzWafbs,4452
732
729
  mani_skill/utils/building/urdf_loader.py,sha256=llYoiRDU5gTi7sgi8Fv-Zjwdo7SoLpmtG4gJJEpn85g,4905
733
- mani_skill/utils/building/actors/__init__.py,sha256=G7aFAkb2f7c6hUcljJ17GIvlzUzVlpIOg6AZkTWFbDg,1420
734
- mani_skill/utils/building/actors/common.py,sha256=E2T6fUxAh5It3welkTkMQ_ptkDq8dh06-klJCtagK80,9290
730
+ mani_skill/utils/building/actors/__init__.py,sha256=2jOgWQV_spgIfZIKHPEf4FFOvBISYVUadumqWHCZDP8,218
731
+ mani_skill/utils/building/actors/common.py,sha256=MxTb8nDqNFVcAJQn3SoLf8piltE5cuavArGG5VlxxNA,10586
735
732
  mani_skill/utils/building/actors/ycb.py,sha256=6iHSpHicdfXlk7RmVEthUEzGStfj6cSGOHoHSc37Pjc,1347
736
733
  mani_skill/utils/building/articulations/__init__.py,sha256=jC8rbOFzLnMkC0O-Je2RDMQlDdu3Muw8P9gbnTgLHU4,1147
737
734
  mani_skill/utils/building/articulations/partnet_mobility.py,sha256=gVlAVFOpaqE3HKUNJ2DYQTYySldK39ysPCfxPKIXyb0,2455
@@ -756,6 +753,8 @@ mani_skill/utils/scene_builder/ai2thor/metadata/ArchitecTHOR.json,sha256=mbPJWHd
756
753
  mani_skill/utils/scene_builder/ai2thor/metadata/ProcTHOR.json,sha256=OYnCKrlYdbChbF2cSfNUeI0GH2z-X6ZkQtoskRAaqtQ,605697
757
754
  mani_skill/utils/scene_builder/ai2thor/metadata/RoboTHOR.json,sha256=v92NpbBGBMisvu1dA382vML-OBp2spTj94Y71QffgbQ,3612
758
755
  mani_skill/utils/scene_builder/ai2thor/metadata/iTHOR.json,sha256=wAvoEM_JC2EpaegMs5hv9hbRQ-G4CC78_loeEh4jQZc,7488
756
+ mani_skill/utils/scene_builder/control/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
757
+ mani_skill/utils/scene_builder/control/planar/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
759
758
  mani_skill/utils/scene_builder/control/planar/scene_builder.py,sha256=9zG6SLx7aySYcru73lpK-Y8X5IvFiWYiV4M9RSFIPSY,1075
760
759
  mani_skill/utils/scene_builder/kitchen_counter/__init__.py,sha256=TFMKaUyPqRlIJyWfvs_POQHdiqWwdM-O-ssZjkxhHBA,54
761
760
  mani_skill/utils/scene_builder/kitchen_counter/scene_builder.py,sha256=bbI5c4ax9YtFLUTMD86HAnqdm9oSUN49b-5FROx-NP0,1464
@@ -767,6 +766,7 @@ mani_skill/utils/scene_builder/replicacad/metadata/scene_configs.json,sha256=DIm
767
766
  mani_skill/utils/scene_builder/replicacad/rearrange/__init__.py,sha256=6wrAvNhnpkXOSYACdzTe4FveGDfycCINEHbTUbHRszE,284
768
767
  mani_skill/utils/scene_builder/replicacad/rearrange/scene_builder.py,sha256=G1bqkiIoKrUXWbxxZqVDwLzJM-NcXnvct6PcWlRiBv8,17469
769
768
  mani_skill/utils/scene_builder/replicacad/rearrange/variants.py,sha256=x5bn_aE-uReI_ZRHUzQBMlDzhKWSS1w2tSiHosF8BAw,1170
769
+ mani_skill/utils/scene_builder/robocasa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
770
770
  mani_skill/utils/scene_builder/robocasa/scene_builder.py,sha256=1Dc4fZ5l4WKNAc-qYsfDnOGC1qW3C2HjJ6mTCHYqO9o,40749
771
771
  mani_skill/utils/scene_builder/robocasa/fixtures/accessories.py,sha256=SjG_r7coAKRJ7WttUP-bfSwuz4sZUX-4z8D28xhdYQE,7857
772
772
  mani_skill/utils/scene_builder/robocasa/fixtures/cabinet.py,sha256=TwG625RjTaM_DqTxxjyvYxPnPnoJsUcTpV8zEtJvw8A,46314
@@ -785,7 +785,6 @@ mani_skill/utils/scene_builder/robocasa/fixtures/others.py,sha256=PP1_r66uckZK1v
785
785
  mani_skill/utils/scene_builder/robocasa/fixtures/sink.py,sha256=-ci0XTadmyZ2iIWq4ckk7J__YxerScSvvRq-bpSY5tM,4274
786
786
  mani_skill/utils/scene_builder/robocasa/fixtures/stove.py,sha256=aQ7PpZ3RVYTpj0OxeA6hTFEm8uY-Vp9APQHHExZmS4c,6871
787
787
  mani_skill/utils/scene_builder/robocasa/fixtures/windows.py,sha256=WiVaFjZ6JTjb6Zy2BMpffsRXC2WEPkMzhCNSXY5FjW8,12953
788
- mani_skill/utils/scene_builder/robocasa/objects/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
789
788
  mani_skill/utils/scene_builder/robocasa/objects/kitchen_object_utils.py,sha256=Y-9SjooCfr76Rt_T6U1-57pCDQg-aaPI0jNJ5rLHIo8,14459
790
789
  mani_skill/utils/scene_builder/robocasa/objects/kitchen_objects.py,sha256=qjiON5GEXN3SCtgN2vZKmOe5bv3H-CMnIrPD926Rh4A,48332
791
790
  mani_skill/utils/scene_builder/robocasa/objects/objects.py,sha256=zVhcvuOSmm7bYMOxUgPdCErwXFaRjXHJCC1ohA2L4tc,3581
@@ -801,7 +800,7 @@ mani_skill/utils/scene_builder/table/assets/table.glb,sha256=yw69itZDjBFg8JXZAr9
801
800
  mani_skill/utils/structs/README.md,sha256=qnYKimp_ZkgNcduURrYQxVTimNmq_usDMKoQ8VtMdCs,286
802
801
  mani_skill/utils/structs/__init__.py,sha256=BItR3Xe0z6xCrMHAEaH0AAAVyeonsQ3q-DJUyRUibAA,524
803
802
  mani_skill/utils/structs/actor.py,sha256=L0p6vkr8rGtJmF22xAq8Q7nhXKnDD5dahzODSAko0bg,17394
804
- mani_skill/utils/structs/articulation.py,sha256=DfNu3irWZ7LWoMCQAR4t0F8QRnojYhmPBnk2LNYoTtw,38341
803
+ mani_skill/utils/structs/articulation.py,sha256=QvWQsHdgCUCiGHVfUzZOeEqJ_OQsbi7CrxkCgjy3wyM,38491
805
804
  mani_skill/utils/structs/articulation_joint.py,sha256=xDQkCAXM3XZ56YgFqLwH5Ec8aFqhR5BqMSvDYCS0bzw,12972
806
805
  mani_skill/utils/structs/base.py,sha256=meGQK5Y4KtHKLnp9VeOZS2gtwg9tE55whuEeqOguBaI,19465
807
806
  mani_skill/utils/structs/decorators.py,sha256=Lv6wQ989dOnreo2tB-qopDnkeBp_jsn1pmfUR-OY8VQ,535
@@ -815,20 +814,20 @@ mani_skill/utils/visualization/__init__.py,sha256=0QF97UR8d7poMHo6m52DsAUXAmUb3S
815
814
  mani_skill/utils/visualization/jupyter_utils.py,sha256=dXXUQz-rFTOV_Xq5yA6YE6cXg7DPw15YStw37NgB5Qc,1322
816
815
  mani_skill/utils/visualization/misc.py,sha256=KrDCef7F5GmGOdiBQ4qFUnmUTe-7-nNBz2DVBGFD8YU,5041
817
816
  mani_skill/utils/visualization/renderer.py,sha256=afFWwSQEeL-9c5CsBT1uug-zugGjOr1FDzmvd45-9dk,1646
818
- mani_skill/utils/wrappers/__init__.py,sha256=XCQdTP2RQ95UXrDjy1QxgcKEuevceGU_5KUGl636J6I,276
817
+ mani_skill/utils/wrappers/__init__.py,sha256=QfSTw9RNtQEtEAzD1RLx0WXjNjIS7wVM2IIkSkLT6Xw,321
819
818
  mani_skill/utils/wrappers/action_repeat.py,sha256=RhCtzt3fYCtD-CClIOhAzdycGwVTXP_FG61yEf-QLqY,3542
819
+ mani_skill/utils/wrappers/cached_reset.py,sha256=KV9Sd-mIK9NM_nes-7HtO3HAkkuflctVnUejSN77ecE,6272
820
820
  mani_skill/utils/wrappers/flatten.py,sha256=GuHJ3fCOdj9G_jm--XgG8k0p2G1eJx4LY1tesQQjnkg,4913
821
821
  mani_skill/utils/wrappers/frame_stack.py,sha256=pCp83HqXnFxbsKRYgwXreNBHnhD-yF0R2_7jdtGOTWQ,4213
822
822
  mani_skill/utils/wrappers/gymnasium.py,sha256=p0kl29kkedD2arIvGskClKhYDBAH97mZO4rTepz62jQ,4174
823
- mani_skill/utils/wrappers/record.py,sha256=757N7D6m6bH02BjIlLvFxGwbQDqDouiRv7F_6Prp57k,37322
823
+ mani_skill/utils/wrappers/record.py,sha256=73g-dvnFrXcFy7t2BksgH8UqBUVBZG8BbCNPEbdtBSc,37362
824
824
  mani_skill/utils/wrappers/visual_encoders.py,sha256=ISLO5ceaRkINhvce92VuZMDMCU3I4F7cQWFW2aVP-14,2205
825
825
  mani_skill/vector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
826
826
  mani_skill/vector/wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
827
- mani_skill/vector/wrappers/gymnasium.py,sha256=aNPB-2oGDLep8qzdsuTSIlwGGO0OGQAQ193LefOGoTk,7434
827
+ mani_skill/vector/wrappers/gymnasium.py,sha256=voHNmYg5Jyy-laMSC2Fd8VggQvhXw3NnfYLbD9QDXAc,7305
828
828
  mani_skill/vector/wrappers/sb3.py,sha256=SlXdiEPqcNHYMhJCzA29kBU6zK7DKTe1nc0L6Z3QQtY,4722
829
- mani_skill/viewer/__init__.py,sha256=srvDBsk4LQU75K2VIttrhiQ68p_ro7PSDqQRls2PY5c,1722
830
- mani_skill_nightly-2025.7.25.329.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
831
- mani_skill_nightly-2025.7.25.329.dist-info/METADATA,sha256=ZORmjNSU7HXw-F69GCdOiTQnUMeQKOaCBbpJIfaMCP8,9271
832
- mani_skill_nightly-2025.7.25.329.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
833
- mani_skill_nightly-2025.7.25.329.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
834
- mani_skill_nightly-2025.7.25.329.dist-info/RECORD,,
829
+ mani_skill_nightly-2025.7.27.104.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
830
+ mani_skill_nightly-2025.7.27.104.dist-info/METADATA,sha256=4hFvpXpBe32gKSjBcFIvT4UjJCORr8wxCvMeMuApQeg,9318
831
+ mani_skill_nightly-2025.7.27.104.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
832
+ mani_skill_nightly-2025.7.27.104.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
833
+ mani_skill_nightly-2025.7.27.104.dist-info/RECORD,,
@@ -1 +0,0 @@
1
- from .solution import BasePolicy
@@ -1,129 +0,0 @@
1
- from typing import Callable, List, Type
2
-
3
- import gymnasium as gym
4
- import numpy as np
5
-
6
- from mani_skill.envs.sapien_env import BaseEnv
7
- from mani_skill.utils import common, gym_utils
8
-
9
- from .solution import BasePolicy
10
-
11
-
12
- class BaseEvaluator:
13
- env: gym.Env
14
- policy: BasePolicy
15
-
16
- MAX_EPISODE_STEPS = 1000
17
-
18
- def setup(
19
- self,
20
- env_id: str,
21
- policy_cls: Type[BasePolicy],
22
- render_mode="cameras",
23
- env_kwargs=None,
24
- ):
25
- """Setup environment and policy."""
26
- self.env_id = env_id
27
- self.env_kwargs = {} if env_kwargs is None else env_kwargs
28
-
29
- obs_mode = policy_cls.get_obs_mode(env_id)
30
- control_mode = policy_cls.get_control_mode(env_id)
31
-
32
- self.env: BaseEnv = gym.make(
33
- self.env_id,
34
- obs_mode=obs_mode,
35
- control_mode=control_mode,
36
- render_mode=render_mode,
37
- **self.env_kwargs
38
- )
39
- self.policy = policy_cls(
40
- self.env_id, self.env.observation_space, self.env.action_space
41
- )
42
- self.result = dict()
43
-
44
- def evaluate_episode(self, reset_kwargs, render=False):
45
- """Evaluate a single episode."""
46
- env = self.env
47
- policy = self.policy
48
-
49
- obs, _ = env.reset(**reset_kwargs)
50
- policy.reset(obs)
51
- # NOTE(jigu): Use for-loop rather than while-loop
52
- # in case time limit is not correctly set.
53
- for _ in range(self.MAX_EPISODE_STEPS):
54
- action = policy.act(obs)
55
- # NOTE(jigu): render after action in case action is needed to visualize
56
- if render:
57
- env.render()
58
- obs, reward, terminated, truncated, info = env.step(action)
59
- if terminated or truncated:
60
- if render:
61
- env.render()
62
- assert "success" in info, sorted(info.keys())
63
- metrics = gym_utils.extract_scalars_from_info(
64
- info, "TimeLimit.truncated"
65
- )
66
- return metrics
67
-
68
- def evaluate_episodes(self, episode_cfgs: List[dict], callback: Callable = None):
69
- """Evaluate episodes according to configurations.
70
-
71
- Args:
72
- episode_cfgs (List[dict]): a list of episode configurations.
73
- The configuration should contain "reset_kwargs".
74
- callback (Callable, optional): callback function to report progress.
75
- It accepts two arguments:
76
- int: the number of completed episodes
77
- dict: the results of the latest evaluated episode
78
- """
79
- for i, episode_cfg in enumerate(episode_cfgs):
80
- episode_id = episode_cfg.get("episode_id", i)
81
- reset_kwargs = episode_cfg.get("reset_kwargs", {})
82
- metrics = self.evaluate_episode(reset_kwargs)
83
- if metrics is None:
84
- raise RuntimeError(
85
- "Episode {}: check whether time limit is set".format(episode_id)
86
- )
87
- if episode_id in self.result:
88
- raise RuntimeError("Episode id {} is not unique.".format(episode_id))
89
- self.result[episode_id] = metrics
90
-
91
- if callback is not None:
92
- callback(i + 1, metrics)
93
-
94
- def close(self):
95
- self.env.close()
96
-
97
- def generate_dummy_config(self, env_id, num_episodes: int):
98
- """Generate dummy configuration."""
99
- env_info = dict(env_id=env_id)
100
- episodes = [dict(episode_id=i) for i in range(num_episodes)]
101
- return dict(env_info=env_info, episodes=episodes)
102
-
103
- def merge_result(self):
104
- merged_result = common.merge_dicts(self.result.values())
105
- merged_metrics = {k: np.mean(v) for k, v in merged_result.items()}
106
- return merged_metrics
107
-
108
- def export_to_csv(self, path):
109
- """Average results and export to a csv file."""
110
- import csv
111
-
112
- import tabulate
113
-
114
- merged_metrics = self.merge_result()
115
- headers = ["env_id"] + list(merged_metrics.keys())
116
- data = [[self.env_id] + list(merged_metrics.values())]
117
- print(tabulate(data, headers=headers, tablefmt="psql", floatfmt=".4f"))
118
-
119
- with open(path, "w") as f:
120
- csv_writer = csv.writer(f)
121
- csv_writer.writerow(headers)
122
- csv_writer.writerows(data)
123
- print("The evaluation result is saved to {}.".format(path))
124
-
125
- def submit(self):
126
- raise NotImplementedError
127
-
128
- def error(self, *args, **kwargs):
129
- raise NotImplementedError
@@ -1,147 +0,0 @@
1
- import os
2
- import sys
3
-
4
- from tqdm import tqdm
5
-
6
- from mani_skill.evaluation.evaluator import BaseEvaluator
7
- from mani_skill.utils.io_utils import dump_json, load_json, write_txt
8
- from mani_skill.utils.wrappers import RecordEpisode
9
-
10
-
11
- class Evaluator(BaseEvaluator):
12
- """Local evaluation."""
13
-
14
- def __init__(self, output_dir: str, record_dir=None):
15
- if os.path.exists(output_dir):
16
- print(f"{output_dir} already exists.")
17
- os.makedirs(output_dir, exist_ok=True)
18
- self.output_dir = output_dir
19
-
20
- self.record_dir = record_dir
21
-
22
- def setup(self, *args, **kwargs):
23
- super().setup(*args, **kwargs)
24
- if self.record_dir is not None:
25
- self.env = RecordEpisode(self.env, self.record_dir, clean_on_close=False)
26
-
27
- def submit(self):
28
- # Export per-episode results
29
- json_path = os.path.join(self.output_dir, "episode_results.json")
30
- dump_json(json_path, self.result)
31
- print("The per-episode evaluation result is saved to {}.".format(json_path))
32
-
33
- # Export average result
34
- json_path = os.path.join(self.output_dir, "average_metrics.json")
35
- merged_metrics = self.merge_result()
36
- self.merged_metrics = merged_metrics
37
- dump_json(json_path, merged_metrics)
38
- print("The averaged evaluation result is saved to {}.".format(json_path))
39
-
40
- def error(self, *args):
41
- write_txt(os.path.join(self.output_dir, "error.log"), args)
42
-
43
-
44
- class TqdmCallback:
45
- def __init__(self, n: int):
46
- self.n = n
47
- self.pbar = tqdm(total=n)
48
-
49
- def __call__(self, i, metrics):
50
- self.pbar.update()
51
-
52
-
53
- def parse_args():
54
- import argparse
55
-
56
- parser = argparse.ArgumentParser()
57
- parser.add_argument(
58
- "-e", "--env-id", type=str, required=True, help="Environment ID"
59
- )
60
- parser.add_argument(
61
- "-o",
62
- "--output-dir",
63
- type=str,
64
- required=True,
65
- help="Directory to save evaluation results.",
66
- )
67
- parser.add_argument(
68
- "--config-file",
69
- type=str,
70
- help="Path to the config file. If None, use the dummy config.",
71
- )
72
- # For debug only
73
- parser.add_argument("-n", "--num-episodes", type=int, help="Number of episodes.")
74
- parser.add_argument(
75
- "--use-random-policy",
76
- action="store_true",
77
- help="Whether to use a random policy.",
78
- )
79
- parser.add_argument(
80
- "--record-dir",
81
- type=str,
82
- help="Directory to record videos and trajectories. If it is '@', use the output directory.",
83
- )
84
-
85
- args = parser.parse_args()
86
- return args
87
-
88
-
89
- def main():
90
- args = parse_args()
91
-
92
- if args.record_dir == "@":
93
- args.record_dir = args.output_dir
94
- evaluator = Evaluator(args.output_dir, record_dir=args.record_dir)
95
-
96
- # ---------------------------------------------------------------------------- #
97
- # Load evaluation configuration
98
- # ---------------------------------------------------------------------------- #
99
- try:
100
- if args.config_file is not None:
101
- config = load_json(args.config_file)
102
- config_env_id = config["env_info"]["env_id"]
103
- assert config_env_id == args.env_id, (config_env_id, args.env_id)
104
- else: # For debug
105
- config = evaluator.generate_dummy_config(args.env_id, args.num_episodes)
106
- except:
107
- exc_info = sys.exc_info()
108
- print("Fail to load evaluation configuration.", exc_info[:-1])
109
- evaluator.error("Fail to load evaluation configuration.", str(exc_info[0]))
110
- exit(1)
111
-
112
- # ---------------------------------------------------------------------------- #
113
- # Import user policy
114
- # ---------------------------------------------------------------------------- #
115
- if args.use_random_policy:
116
- from mani_skill.evaluation.solution import RandomPolicy
117
-
118
- UserPolicy = RandomPolicy
119
- else:
120
- try:
121
- from user_solution import UserPolicy
122
- except:
123
- exc_info = sys.exc_info()
124
- print("Fail to import UserPolicy", exc_info[:-1])
125
- evaluator.error("Fail to import UserPolicy", str(exc_info[0]))
126
- exit(2)
127
-
128
- # ---------------------------------------------------------------------------- #
129
- # Main
130
- # ---------------------------------------------------------------------------- #
131
- env_kwargs = config["env_info"].get("env_kwargs")
132
- evaluator.setup(
133
- args.env_id, UserPolicy, render_mode="cameras", env_kwargs=env_kwargs
134
- )
135
-
136
- episodes = config["episodes"]
137
- if args.num_episodes is not None:
138
- episodes = episodes[: args.num_episodes]
139
- cb = TqdmCallback(len(episodes))
140
- evaluator.evaluate_episodes(episodes, callback=cb)
141
-
142
- evaluator.submit()
143
- evaluator.close()
144
-
145
-
146
- if __name__ == "__main__":
147
- main()
@@ -1,42 +0,0 @@
1
- import numpy as np
2
- from gymnasium import spaces
3
-
4
-
5
- class BasePolicy:
6
- def __init__(
7
- self, env_id: str, observation_space: spaces.Space, action_space: spaces.Space
8
- ) -> None:
9
- self.env_id = env_id
10
- self.observation_space = observation_space
11
- self.action_space = action_space
12
- # NOTE(jigu): Do not assume that gym.make(env_id) works during evaluation
13
-
14
- def reset(self, observations):
15
- """Called at the beginning of an episode."""
16
-
17
- def act(self, observations) -> np.ndarray:
18
- """Act based on the observations."""
19
- raise NotImplementedError
20
-
21
- @classmethod
22
- def get_obs_mode(cls, env_id: str) -> str:
23
- """Get the observation mode for the policy. Define the observation space."""
24
- raise NotImplementedError
25
-
26
- @classmethod
27
- def get_control_mode(cls, env_id) -> str:
28
- """Get the control mode for the policy. Define the action space."""
29
- raise NotImplementedError
30
-
31
-
32
- class RandomPolicy(BasePolicy):
33
- def act(self, observations):
34
- return self.action_space.sample()
35
-
36
- @classmethod
37
- def get_obs_mode(cls, env_id: str) -> str:
38
- return "rgbd"
39
-
40
- @classmethod
41
- def get_control_mode(cls, env_id: str) -> str:
42
- return None # use default one
@@ -1,13 +0,0 @@
1
- import gymnasium as gym
2
-
3
- from mani_skill.envs import mpm
4
-
5
- ENV_IDS = ["Excavate-v0", "Fill-v0", "Pour-v0", "Hang-v0", "Write-v0", "Pinch-v0"]
6
-
7
-
8
- if __name__ == "__main__":
9
- for env_id in ENV_IDS:
10
- env = gym.make(env_id)
11
- env.reset()
12
- env.step(None)
13
- env.close()
@@ -1,46 +0,0 @@
1
- import sapien
2
- from sapien.utils import Viewer
3
- import sys
4
-
5
- from mani_skill.render import SAPIEN_RENDER_SYSTEM
6
- from mani_skill.sensors.camera import CameraConfig
7
-
8
-
9
- def create_viewer(viewer_camera_config: CameraConfig):
10
- """Creates a viewer with the given camera config"""
11
- if SAPIEN_RENDER_SYSTEM == "3.0":
12
- sapien.render.set_viewer_shader_dir(
13
- viewer_camera_config.shader_config.shader_pack
14
- )
15
- if viewer_camera_config.shader_config.shader_pack[:2] == "rt":
16
- sapien.render.set_ray_tracing_denoiser(
17
- viewer_camera_config.shader_config.shader_pack_config[
18
- "ray_tracing_denoiser"
19
- ]
20
- )
21
- sapien.render.set_ray_tracing_path_depth(
22
- viewer_camera_config.shader_config.shader_pack_config[
23
- "ray_tracing_path_depth"
24
- ]
25
- )
26
- sapien.render.set_ray_tracing_samples_per_pixel(
27
- viewer_camera_config.shader_config.shader_pack_config[
28
- "ray_tracing_samples_per_pixel"
29
- ]
30
- )
31
- viewer = Viewer(
32
- resolutions=(viewer_camera_config.width, viewer_camera_config.height)
33
- )
34
- if sys.platform == 'darwin': # macOS
35
- viewer.window.set_content_scale(1)
36
- elif SAPIEN_RENDER_SYSTEM == "3.1":
37
- # TODO (stao): figure out how shader pack configs can be set at run time
38
- viewer = Viewer(
39
- resolutions=(viewer_camera_config.width, viewer_camera_config.height),
40
- shader_pack=sapien.render.get_shader_pack(
41
- viewer_camera_config.shader_config.shader_pack
42
- ),
43
- )
44
-
45
-
46
- return viewer