mani-skill-nightly 2025.7.25.606__py3-none-any.whl → 2025.8.1.229__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mani-skill-nightly might be problematic. Click here for more details.

Files changed (28) hide show
  1. mani_skill/agents/__init__.py +3 -1
  2. mani_skill/envs/sapien_env.py +15 -9
  3. mani_skill/envs/tasks/drawing/__init__.py +3 -3
  4. mani_skill/envs/tasks/humanoid/__init__.py +1 -1
  5. mani_skill/trajectory/utils/__init__.py +0 -2
  6. mani_skill/utils/building/actor_builder.py +1 -0
  7. mani_skill/utils/building/actors/__init__.py +11 -35
  8. mani_skill/utils/building/actors/common.py +30 -0
  9. mani_skill/utils/download_demo.py +0 -2
  10. mani_skill/utils/sapien_utils.py +44 -4
  11. mani_skill/utils/scene_builder/control/__init__.py +0 -0
  12. mani_skill/utils/scene_builder/control/planar/__init__.py +0 -0
  13. mani_skill/utils/scene_builder/robocasa/__init__.py +0 -0
  14. mani_skill/utils/structs/base.py +2 -12
  15. mani_skill/utils/structs/types.py +2 -0
  16. mani_skill/vector/wrappers/gymnasium.py +1 -2
  17. {mani_skill_nightly-2025.7.25.606.dist-info → mani_skill_nightly-2025.8.1.229.dist-info}/METADATA +3 -2
  18. {mani_skill_nightly-2025.7.25.606.dist-info → mani_skill_nightly-2025.8.1.229.dist-info}/RECORD +22 -25
  19. mani_skill/evaluation/__init__.py +0 -1
  20. mani_skill/evaluation/evaluator.py +0 -129
  21. mani_skill/evaluation/run_evaluation.py +0 -147
  22. mani_skill/evaluation/solution.py +0 -42
  23. mani_skill/utils/precompile_mpm.py +0 -13
  24. mani_skill/viewer/__init__.py +0 -46
  25. /mani_skill/{utils/scene_builder/robocasa/objects → envs/utils/system}/__init__.py +0 -0
  26. {mani_skill_nightly-2025.7.25.606.dist-info → mani_skill_nightly-2025.8.1.229.dist-info}/LICENSE +0 -0
  27. {mani_skill_nightly-2025.7.25.606.dist-info → mani_skill_nightly-2025.8.1.229.dist-info}/WHEEL +0 -0
  28. {mani_skill_nightly-2025.7.25.606.dist-info → mani_skill_nightly-2025.8.1.229.dist-info}/top_level.txt +0 -0
@@ -1 +1,3 @@
1
- from .registration import REGISTERED_AGENTS
1
+ from .base_agent import BaseAgent
2
+ from .multi_agent import MultiAgent
3
+ from .registration import REGISTERED_AGENTS, register_agent
@@ -16,9 +16,7 @@ from gymnasium.vector.utils import batch_space
16
16
 
17
17
  import mani_skill.render.utils as render_utils
18
18
  from mani_skill import logger
19
- from mani_skill.agents import REGISTERED_AGENTS
20
- from mani_skill.agents.base_agent import BaseAgent
21
- from mani_skill.agents.multi_agent import MultiAgent
19
+ from mani_skill.agents import REGISTERED_AGENTS, BaseAgent, MultiAgent
22
20
  from mani_skill.envs.scene import ManiSkillScene
23
21
  from mani_skill.envs.utils.observations import (
24
22
  parse_obs_mode_to_struct,
@@ -42,7 +40,6 @@ from mani_skill.utils.structs import Actor, Articulation
42
40
  from mani_skill.utils.structs.pose import Pose
43
41
  from mani_skill.utils.structs.types import Array, SimConfig
44
42
  from mani_skill.utils.visualization.misc import tile_images
45
- from mani_skill.viewer import create_viewer
46
43
 
47
44
 
48
45
  class BaseEnv(gym.Env):
@@ -267,7 +264,16 @@ class BaseEnv(gym.Env):
267
264
  common.dict_merge(merged_gpu_sim_config, sim_config)
268
265
  self.sim_config = dacite.from_dict(data_class=SimConfig, data=merged_gpu_sim_config, config=dacite.Config(strict=True))
269
266
  """the final sim config after merging user overrides with the environment default"""
270
- physx.set_gpu_memory_config(**self.sim_config.gpu_memory_config.dict())
267
+ gpu_mem_config = self.sim_config.gpu_memory_config.dict()
268
+
269
+ # NOTE (stao): there isn't a easy way to check of collision_stack_size is supported for the installed sapien3 version
270
+ # to get around that we just try and except. To be removed once mac/windows platforms can upgrade to latest sapien versions
271
+ try:
272
+ physx.set_gpu_memory_config(**gpu_mem_config)
273
+ except TypeError:
274
+ gpu_mem_config.pop("collision_stack_size")
275
+ physx.set_gpu_memory_config(**gpu_mem_config)
276
+
271
277
  sapien.render.set_log_level(os.getenv("MS_RENDERER_LOG_LEVEL", "warn"))
272
278
 
273
279
  # Set simulation and control frequency
@@ -741,7 +747,7 @@ class BaseEnv(gym.Env):
741
747
  # for GPU sim, we have to setup sensors after we call setup gpu in order to enable loading mounted sensors as they depend on GPU buffer data
742
748
  if self.scene.can_render(): self._setup_sensors(options)
743
749
  if self.render_mode == "human" and self._viewer is None:
744
- self._viewer = create_viewer(self._viewer_camera_config)
750
+ self._viewer = sapien_utils.create_viewer(self._viewer_camera_config)
745
751
  if self._viewer is not None:
746
752
  self._setup_viewer()
747
753
  self._reconfig_counter = self.reconfiguration_freq
@@ -852,7 +858,7 @@ class BaseEnv(gym.Env):
852
858
  options["reconfigure"] is True, will call self._reconfigure() which deletes the entire physx scene and reconstructs everything.
853
859
  Users building custom tasks generally do not need to override this function.
854
860
 
855
- If options["reset_to_env_states"] is given, we expect there to be options["reset_to_env_states"]["env_states"] and optionally options["reset_to_env_states"]["obs"], both with
861
+ If options["reset_to_env_states"] is given, we expect there to be options["reset_to_env_states"]["env_states"] and optionally options["reset_to_env_states"]["obs"], both with
856
862
  batch size equal to the number of environments being reset. "env_states" can be a dictionary or flat tensor and we skip calling the environment's _initialize_episode function which
857
863
  generates the initial state on a normal reset. If "obs" is given we skip calling the environment's get_obs function which can save some compute/time.
858
864
 
@@ -928,7 +934,7 @@ class BaseEnv(gym.Env):
928
934
  if "reset_to_env_states" in options:
929
935
  env_states = options["reset_to_env_states"]["env_states"]
930
936
  reset_to_env_states_obs = options["reset_to_env_states"].get("obs", None)
931
- if isinstance(env_states, dict):
937
+ if isinstance(env_states, dict):
932
938
  self.set_state_dict(env_states, env_idx)
933
939
  else:
934
940
  self.set_state(env_states, env_idx)
@@ -1344,7 +1350,7 @@ class BaseEnv(gym.Env):
1344
1350
  for obj in self._hidden_objects:
1345
1351
  obj.show_visual()
1346
1352
  if self._viewer is None:
1347
- self._viewer = create_viewer(self._viewer_camera_config)
1353
+ self._viewer = sapien_utils.create_viewer(self._viewer_camera_config)
1348
1354
  self._setup_viewer()
1349
1355
  if self.gpu_sim_enabled and self.scene._gpu_sim_initialized:
1350
1356
  self.scene.px.sync_poses_gpu_to_cpu()
@@ -1,3 +1,3 @@
1
- from .draw import *
2
- from .draw_triangle import *
3
- from .draw_svg import *
1
+ from .draw import TableTopFreeDrawEnv
2
+ from .draw_svg import DrawSVGEnv
3
+ from .draw_triangle import DrawTriangleEnv
@@ -1,3 +1,3 @@
1
- from .humanoid_pick_place import *
1
+ from .humanoid_pick_place import HumanoidPlaceAppleInBowl
2
2
  from .humanoid_stand import HumanoidStandEnv
3
3
  from .transport_box import TransportBoxEnv
@@ -6,8 +6,6 @@ Utils for working with ManiSkill trajectory files
6
6
  import h5py
7
7
  import numpy as np
8
8
 
9
- from mani_skill.utils.structs.types import Array
10
-
11
9
 
12
10
  def _get_dict_len(x):
13
11
  if isinstance(x, dict) or isinstance(x, h5py.Group):
@@ -160,6 +160,7 @@ class ActorBuilder(SAPIENActorBuilder):
160
160
  component.cmass_local_pose = self._cmass_local_pose
161
161
  component.inertia = self._inertia
162
162
 
163
+ component.name = self.name
163
164
  return component
164
165
 
165
166
  def build_dynamic(self, name):
@@ -1,35 +1,11 @@
1
- from mani_skill.envs.scene import ManiSkillScene
2
- from mani_skill.utils.building.actor_builder import ActorBuilder
3
-
4
- from .common import *
5
-
6
-
7
- def get_actor_builder(
8
- scene: ManiSkillScene, id: str, add_collision: bool = True, add_visual: bool = True
9
- ) -> ActorBuilder:
10
- """Builds an actor or returns an actor builder given an ID specifying which dataset/source and then the actor ID
11
-
12
- Currently these IDs are hardcoded for a few datasets. The new Shapedex platform for hosting and managing all assets will be
13
- integrated in the future
14
-
15
- Args:
16
- scene: The ManiSkillScene. If building a custom task this is generally just self.scene
17
- id (str): The unique ID identifying the dataset and the ID of the actor in that dataset to build. The format should be
18
- "<dataset_id>:<actor_id_in_dataset>"
19
- add_collision (bool): Whether to include the collision shapes/meshes
20
- add_visual (bool): Whether to include visual shapes/meshes
21
- """
22
- splits = id.split(":")
23
- dataset_source = splits[0]
24
- actor_id = ":".join(splits[1:])
25
-
26
- if dataset_source == "ycb":
27
- from mani_skill.utils.building.actors.ycb import get_ycb_builder
28
-
29
- builder = get_ycb_builder(
30
- scene=scene, id=actor_id, add_collision=add_collision, add_visual=add_visual
31
- )
32
- else:
33
- raise RuntimeError(f"No dataset with id {dataset_source} was found")
34
-
35
- return builder
1
+ from .common import (
2
+ build_box,
3
+ build_colorful_cube,
4
+ build_cube,
5
+ build_cylinder,
6
+ build_fourcolor_peg,
7
+ build_red_white_target,
8
+ build_sphere,
9
+ build_twocolor_peg,
10
+ get_actor_builder,
11
+ )
@@ -14,6 +14,36 @@ from mani_skill.utils.structs.pose import Pose
14
14
  from mani_skill.utils.structs.types import Array
15
15
 
16
16
 
17
+ def get_actor_builder(
18
+ scene: ManiSkillScene, id: str, add_collision: bool = True, add_visual: bool = True
19
+ ) -> ActorBuilder:
20
+ """Returns an :py:class:`~mani_skill.utils.building.actor_builder.ActorBuilder` given an ID specifying which dataset/source and then the ID of the asset.
21
+
22
+ Currently these IDs are hardcoded for a few datasets. We may add more actor datasets in the future for easy loading by users
23
+
24
+ Args:
25
+ scene: The ManiSkillScene. If building a custom task this is generally just self.scene
26
+ id (str): The unique ID identifying the dataset and the ID of the actor in that dataset to build. The format should be
27
+ "<dataset_id>:<actor_id_in_dataset>"
28
+ add_collision (bool): Whether to include the collision shapes/meshes
29
+ add_visual (bool): Whether to include visual shapes/meshes
30
+ """
31
+ splits = id.split(":")
32
+ dataset_source = splits[0]
33
+ actor_id = ":".join(splits[1:])
34
+
35
+ if dataset_source == "ycb":
36
+ from mani_skill.utils.building.actors.ycb import get_ycb_builder
37
+
38
+ builder = get_ycb_builder(
39
+ scene=scene, id=actor_id, add_collision=add_collision, add_visual=add_visual
40
+ )
41
+ else:
42
+ raise RuntimeError(f"No dataset with id {dataset_source} was found")
43
+
44
+ return builder
45
+
46
+
17
47
  def _build_by_type(
18
48
  builder: ActorBuilder,
19
49
  name,
@@ -46,8 +46,6 @@ for env_id in [
46
46
  raw_dataset_url=f"https://huggingface.co/datasets/haosulab/ManiSkill_Demonstrations/resolve/main/demos/{env_id}.zip?download=true"
47
47
  )
48
48
 
49
- pbar = None
50
-
51
49
 
52
50
  def tqdmhook(t):
53
51
  last_b = [0]
@@ -3,6 +3,7 @@ Utilities that work with the simulation / SAPIEN
3
3
  """
4
4
  from __future__ import annotations
5
5
 
6
+ import sys
6
7
  from typing import TYPE_CHECKING, Dict, List, Tuple, TypeVar
7
8
 
8
9
  import numpy as np
@@ -10,15 +11,16 @@ import sapien
10
11
  import sapien.physx as physx
11
12
  import sapien.render
12
13
  import sapien.wrapper.urdf_loader
14
+ import torch
15
+ from sapien.utils import Viewer
13
16
 
17
+ from mani_skill.render import SAPIEN_RENDER_SYSTEM
14
18
  from mani_skill.utils.geometry.rotation_conversions import matrix_to_quaternion
15
19
  from mani_skill.utils.structs.pose import Pose
16
20
 
17
21
  if TYPE_CHECKING:
18
22
  from mani_skill.utils.structs.actor import Actor
19
- from mani_skill.envs.scene import ManiSkillScene
20
-
21
- import torch
23
+ from mani_skill.sensors.camera import CameraConfig
22
24
 
23
25
  from mani_skill.utils.structs.types import Array, Device
24
26
 
@@ -115,7 +117,6 @@ def parse_urdf_config(config_dict: dict) -> Dict:
115
117
 
116
118
  Args:
117
119
  config_dict (dict): a dict containing link physical properties.
118
- scene (ManiSkillScene): the simulation scene
119
120
 
120
121
  Returns:
121
122
  Dict: urdf config passed to `sapien.URDFLoader.load`.
@@ -436,3 +437,42 @@ def is_state_dict_consistent(state_dict: dict):
436
437
  if v.shape[0] != batch_size:
437
438
  return False
438
439
  return True
440
+
441
+
442
+ def create_viewer(viewer_camera_config: CameraConfig):
443
+ """Creates a viewer with the given camera config"""
444
+ if SAPIEN_RENDER_SYSTEM == "3.0":
445
+ sapien.render.set_viewer_shader_dir(
446
+ viewer_camera_config.shader_config.shader_pack
447
+ )
448
+ if viewer_camera_config.shader_config.shader_pack[:2] == "rt":
449
+ sapien.render.set_ray_tracing_denoiser(
450
+ viewer_camera_config.shader_config.shader_pack_config[
451
+ "ray_tracing_denoiser"
452
+ ]
453
+ )
454
+ sapien.render.set_ray_tracing_path_depth(
455
+ viewer_camera_config.shader_config.shader_pack_config[
456
+ "ray_tracing_path_depth"
457
+ ]
458
+ )
459
+ sapien.render.set_ray_tracing_samples_per_pixel(
460
+ viewer_camera_config.shader_config.shader_pack_config[
461
+ "ray_tracing_samples_per_pixel"
462
+ ]
463
+ )
464
+ viewer = Viewer(
465
+ resolutions=(viewer_camera_config.width, viewer_camera_config.height)
466
+ )
467
+ if sys.platform == "darwin": # macOS
468
+ viewer.window.set_content_scale(1)
469
+ elif SAPIEN_RENDER_SYSTEM == "3.1":
470
+ # TODO (stao): figure out how shader pack configs can be set at run time
471
+ viewer = Viewer(
472
+ resolutions=(viewer_camera_config.width, viewer_camera_config.height),
473
+ shader_pack=sapien.render.get_shader_pack(
474
+ viewer_camera_config.shader_config.shader_pack
475
+ ),
476
+ )
477
+
478
+ return viewer
File without changes
File without changes
@@ -206,9 +206,6 @@ class PhysxRigidBodyComponentStruct(PhysxRigidBaseComponentStruct[T], Generic[T]
206
206
  @property
207
207
  def angular_velocity(self) -> torch.Tensor:
208
208
  if self.scene.gpu_sim_enabled:
209
- # NOTE (stao): Currently physx has a bug that sapien inherits where link bodies on the GPU put linear/angular velocities in the wrong order...
210
- if isinstance(self._objs[0], physx.PhysxArticulationLinkComponent):
211
- return self._body_data[self._body_data_index, 7:10]
212
209
  return self._body_data[self._body_data_index, 10:13]
213
210
  else:
214
211
  return torch.tensor(
@@ -263,9 +260,8 @@ class PhysxRigidBodyComponentStruct(PhysxRigidBaseComponentStruct[T], Generic[T]
263
260
  @property
264
261
  def linear_velocity(self) -> torch.Tensor:
265
262
  if self.scene.gpu_sim_enabled:
266
- # NOTE (stao): Currently physx has a bug that sapien inherits where link bodies on the GPU put linear/angular velocities in the wrong order...
267
- if isinstance(self._objs[0], physx.PhysxArticulationLinkComponent):
268
- return self._body_data[self._body_data_index, 10:13]
263
+ # NOTE (stao): SAPIEN version 3.0.0b1 gpu sim has a bug inherited from physx where linear/angular velocities are in the wrong order
264
+ # for link entities, namely 7:10 was angular velocity and 10:13 was linear velocity. SAPIEN 3.0.0 and above fixes this
269
265
  return self._body_data[self._body_data_index, 7:10]
270
266
  else:
271
267
  return torch.from_numpy(self._bodies[0].linear_velocity[None, :])
@@ -358,9 +354,6 @@ class PhysxRigidDynamicComponentStruct(PhysxRigidBodyComponentStruct[T], Generic
358
354
  @property
359
355
  def angular_velocity(self) -> torch.Tensor:
360
356
  if self.scene.gpu_sim_enabled:
361
- # NOTE (stao): Currently physx has a bug that sapien inherits where link bodies on the GPU put linear/angular velocities in the wrong order...
362
- if isinstance(self._objs[0], physx.PhysxArticulationLinkComponent):
363
- return self._body_data[self._body_data_index, 7:10]
364
357
  return self._body_data[self._body_data_index, 10:13]
365
358
  else:
366
359
  return torch.from_numpy(self._bodies[0].angular_velocity[None, :])
@@ -429,9 +422,6 @@ class PhysxRigidDynamicComponentStruct(PhysxRigidBodyComponentStruct[T], Generic
429
422
  @property
430
423
  def linear_velocity(self) -> torch.Tensor:
431
424
  if self.scene.gpu_sim_enabled:
432
- # NOTE (stao): Currently physx has a bug that sapien inherits where link bodies on the GPU put linear/angular velocities in the wrong order...
433
- if isinstance(self._objs[0], physx.PhysxArticulationLinkComponent):
434
- return self._body_data[self._body_data_index, 10:13]
435
425
  return self._body_data[self._body_data_index, 7:10]
436
426
  else:
437
427
  return torch.tensor(
@@ -27,6 +27,8 @@ class GPUMemoryConfig:
27
27
  ) # 262144 is SAPIEN default but most tasks work with 2**25
28
28
  found_lost_aggregate_pairs_capacity: int = 2**10
29
29
  total_aggregate_pairs_capacity: int = 2**10
30
+ collision_stack_size: int = 64 * 64 * 1024 # this is the same default as SAPIEN
31
+ """Increase this if you get 'Collision stack overflow detected'"""
30
32
 
31
33
  def dict(self):
32
34
  return {k: v for k, v in asdict(self).items()}
@@ -11,6 +11,7 @@ from mani_skill.utils.structs.types import Array
11
11
 
12
12
  if TYPE_CHECKING:
13
13
  from gymnasium import Env
14
+
14
15
  from mani_skill.envs.sapien_env import BaseEnv
15
16
 
16
17
 
@@ -18,8 +19,6 @@ class ManiSkillVectorEnv(VectorEnv):
18
19
  """
19
20
  Gymnasium Vector Env implementation for ManiSkill environments running on the GPU for parallel simulation and optionally parallel rendering
20
21
 
21
- Note that currently this also assumes modeling tasks as infinite horizon (e.g. terminations is always False, only reset when timelimit is reached)
22
-
23
22
  Args:
24
23
  env: The environment created via gym.make / after wrappers are applied. If a string is given, we use gym.make(env) to create an environment
25
24
  num_envs: The number of parallel environments. This is only used if the env argument is a string
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mani-skill-nightly
3
- Version: 2025.7.25.606
3
+ Version: 2025.8.1.229
4
4
  Summary: ManiSkill3: A Unified Benchmark for Generalizable Manipulation Skills
5
5
  Home-page: https://github.com/haosulab/ManiSkill
6
6
  Author: ManiSkill contributors
@@ -27,7 +27,7 @@ Requires-Dist: tyro>=0.8.5
27
27
  Requires-Dist: huggingface-hub
28
28
  Requires-Dist: mplib==0.1.1; platform_system == "Linux"
29
29
  Requires-Dist: fast-kinematics==0.2.2; platform_system == "Linux"
30
- Requires-Dist: sapien>=3.0.0.b1; platform_system == "Linux"
30
+ Requires-Dist: sapien>=3.0.0; platform_system == "Linux"
31
31
  Requires-Dist: sapien>=3.0.0.b1; platform_system == "Windows"
32
32
  Provides-Extra: dev
33
33
  Requires-Dist: pytest; extra == "dev"
@@ -52,6 +52,7 @@ Requires-Dist: sphinx-subfigure; extra == "docs"
52
52
  Requires-Dist: sphinxcontrib-video; extra == "docs"
53
53
  Requires-Dist: sphinx-togglebutton; extra == "docs"
54
54
  Requires-Dist: sphinx-design; extra == "docs"
55
+ Requires-Dist: sphinx-autoapi; extra == "docs"
55
56
 
56
57
  # ManiSkill 3 (Beta)
57
58
 
@@ -1,5 +1,5 @@
1
1
  mani_skill/__init__.py,sha256=_wZjMWSlWZYeAAEjiHAHa5W6uq7Eh4fbny8HwlYSJhQ,2101
2
- mani_skill/agents/__init__.py,sha256=6u7nUfWDqWmD_ieNhECfhV6mIyf4SmMdumexE2lRlsU,44
2
+ mani_skill/agents/__init__.py,sha256=87Xa-eaPbbVm6LmD1111yXSBuRkcoTG0g_5RdkaFiGI,130
3
3
  mani_skill/agents/base_agent.py,sha256=pnaE1VWJSpdzlsFPmcG5VVLLX9sWtPnl1ZM3ZIy2kXM,18400
4
4
  mani_skill/agents/base_real_agent.py,sha256=DD9SXZa7er5zF7wdm97IO_0W4xbdw-66iLC8j86ykYw,8549
5
5
  mani_skill/agents/multi_agent.py,sha256=AFygr2797M5Hhk4qMoLuFmFk7msqnp82bFuSyP1j8JA,3341
@@ -539,7 +539,7 @@ mani_skill/assets/robots/xarm7/meshes/visual/link7.glb,sha256=aZatACOv20VJbi2tOE
539
539
  mani_skill/assets/robots/xarm7/meshes/visual/link_base.glb,sha256=vcy2lN1V72jIsSDRT0ZKVskR_0pVOXtDvBkxO2GENWs,467668
540
540
  mani_skill/envs/__init__.py,sha256=YPlttBErTcf9vSnkZ54EQ8vTABSfFFrBdUY0AkF4vmg,43
541
541
  mani_skill/envs/minimal_template.py,sha256=9THHWA1vkHatptc9g5Ojh-UBUKWQmLHVeq4fcaqv2aY,2200
542
- mani_skill/envs/sapien_env.py,sha256=-reQWgDhCQga2RqFovQlHrsAKxxnvpcYYtK1AqxPEyI,75338
542
+ mani_skill/envs/sapien_env.py,sha256=uaRiPL6srGB3ZAGC791HiT8RYmDYhBxvOuDy7dwGags,75682
543
543
  mani_skill/envs/scene.py,sha256=4ZAIJs61fwPPhfDvc3v845sj_Ftsd1sSYaju10KnXbQ,48465
544
544
  mani_skill/envs/sim2real_env.py,sha256=3mkQX4TonE2pUC5_Atmx0IYDH2_v6GSwOPJvQMEvCNY,19214
545
545
  mani_skill/envs/template.py,sha256=0wnwKjnGOF7RvTR5Gz4VopaUiFxnIioXwmb4nPVxAs8,11939
@@ -570,7 +570,7 @@ mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/base_env.py,sha256=L7eQ8
570
570
  mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/put_on_in_scene.py,sha256=kDKEX-e1Hq5kZiBs9xfEzU2jjx35eHWriAAYFJcBbgE,9419
571
571
  mani_skill/envs/tasks/digital_twins/so100_arm/__init__.py,sha256=uehHSCaHoZDB9awMtF81r4A-X4fM6NFmErzOsgzgSs4,42
572
572
  mani_skill/envs/tasks/digital_twins/so100_arm/grasp_cube.py,sha256=Uv2wTMUyfh8ygG4g6WfPj5E8jaDUyBZ789lPPCRBJjM,22386
573
- mani_skill/envs/tasks/drawing/__init__.py,sha256=eZ7CnzIWBOHUihvOKStSQK94eKSv5wchRdSodiDYBlw,72
573
+ mani_skill/envs/tasks/drawing/__init__.py,sha256=b2HaUu5UwcWHetjcn6hKlKFokUMd1ZGIljOg-kPeo54,114
574
574
  mani_skill/envs/tasks/drawing/draw.py,sha256=WlhxJjt0-DlkxC3t-o0M8BoOwdwWpM9reupFg5OqiZc,8146
575
575
  mani_skill/envs/tasks/drawing/draw_svg.py,sha256=UKnVl_tMfUNHmOg24Ny-wFynb5CaZC0uIHvW9sBxbyo,16206
576
576
  mani_skill/envs/tasks/drawing/draw_triangle.py,sha256=aOSc37tHZrU_96W_Pj0mNpxOTopHyMvCVps-gihE0qc,15528
@@ -585,7 +585,7 @@ mani_skill/envs/tasks/fmb/assets/purple_u.glb,sha256=lQZAv2qipX0QpYLyQuVWNKUFs8n
585
585
  mani_skill/envs/tasks/fmb/assets/purple_u.ply,sha256=KwY2eUKF5Ci5cQ9aYUVAulO3RyRZ5AxhZZcbsaZoXTE,807
586
586
  mani_skill/envs/tasks/fmb/assets/reorienting_fixture.glb,sha256=n8_3KXNqa--hmCLhaK2d5kwM0ujTnt0hD5HLVC1_0xs,8764
587
587
  mani_skill/envs/tasks/fmb/assets/yellow_peg.glb,sha256=QgMb8lGcpQh_-BLweRhYZ5gI3V4LjueZvZm3yVBTacE,1700
588
- mani_skill/envs/tasks/humanoid/__init__.py,sha256=sU7DtnY15uZLrthoGFo3KOFvISjkIhobzByI7szdv9I,123
588
+ mani_skill/envs/tasks/humanoid/__init__.py,sha256=18pnVy_csZjYmV2IbvMTGg3UjiTxI9anm1bDAnM2hmo,146
589
589
  mani_skill/envs/tasks/humanoid/humanoid_pick_place.py,sha256=k8OyBB2wnXuFHrBgp-YMIb5TDtBHIgZHkimRckMv-eo,10692
590
590
  mani_skill/envs/tasks/humanoid/humanoid_stand.py,sha256=azpTPczHIiyECYH672qzp2RtcAFCsuUjxF2o5gMbK2U,4808
591
591
  mani_skill/envs/tasks/humanoid/transport_box.py,sha256=w9E41h1h7gLKx9PE0a6vtXkSo_9GgYnr03hChdpFD2w,12260
@@ -637,11 +637,8 @@ mani_skill/envs/utils/randomization/pose.py,sha256=9PPg-QMorHVe3fV4e3T-BRYu0E_8I
637
637
  mani_skill/envs/utils/randomization/samplers.py,sha256=EOkF18mmDC7fA2hgj8QC2Ag0gnf4H-4MIjOCDvTMpCE,3665
638
638
  mani_skill/envs/utils/rewards/__init__.py,sha256=66MV5YCbnpF4ac_SvTVJ000RxM1AIsclX7OatiA-Wak,22
639
639
  mani_skill/envs/utils/rewards/common.py,sha256=1lfJNWG3x7UjarHLteLXa8DCzbe_L7nYBMOBo5D9CRQ,3647
640
+ mani_skill/envs/utils/system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
640
641
  mani_skill/envs/utils/system/backend.py,sha256=9xk-ef8779Cj27THXkVM-sxEMQWH8bUrWiwgaf-MKCo,3309
641
- mani_skill/evaluation/__init__.py,sha256=PCMN6dc9zgFKuXTPmkAUEahP3XG65cKRLGMXrk5IeXY,33
642
- mani_skill/evaluation/evaluator.py,sha256=1EN6qAiGx3taB4vCeArUp33UZjLvBt1Ke6iUW8Z8aV0,4493
643
- mani_skill/evaluation/run_evaluation.py,sha256=yorphrlJKEGycHfQS8equnJHRsyjDuv77ZGNpg9wvCs,4780
644
- mani_skill/evaluation/solution.py,sha256=e_Aa0f4sSQ56KXL7tVDPUKf7WTjcuFc5X4J76p884Zs,1269
645
642
  mani_skill/examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
646
643
  mani_skill/examples/demo_manual_control.py,sha256=Z17ER37oehS8VgtDO_4dwiy5jDgL93nT9IdCsNDf0Es,8275
647
644
  mani_skill/examples/demo_manual_control_continuous.py,sha256=tnCnKX2v1iIhtXwvWR2NzXgpf3e0y2-qAO91jJBLIO0,9679
@@ -706,33 +703,32 @@ mani_skill/trajectory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
706
703
  mani_skill/trajectory/dataset.py,sha256=nrG3jkhdzRqAdjxC_c8Z4FxpkvW3A9XPvUp9-Ux_u38,6351
707
704
  mani_skill/trajectory/merge_trajectory.py,sha256=zsjRMTsiIirZGIV4KrtYOM2-zoOAzd7ObZEdWGJzZbE,3685
708
705
  mani_skill/trajectory/replay_trajectory.py,sha256=ABiM4pMSkTAhU1L2fdaY-Mwnw2Hzg8p1rAaWf3ijWOE,27681
709
- mani_skill/trajectory/utils/__init__.py,sha256=Nchv09IpXv0FOgpf7Ng1Ekus6ZfAh3kI0KJs-79QOig,1515
706
+ mani_skill/trajectory/utils/__init__.py,sha256=-Efv2GEzTnFHd3SxqQtaZLaMRGrCc-P1ClmgLhoV4gs,1465
710
707
  mani_skill/trajectory/utils/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
711
708
  mani_skill/trajectory/utils/actions/conversion.py,sha256=x88C64ke44gB-HEbqq_gSRFv34L7irSwT_wYttkQUn8,12922
712
709
  mani_skill/utils/README.md,sha256=A2UG1u5-4LLi-Fkc4lCcEBjSHwxY0xst2Xtvyw4q-0c,1519
713
710
  mani_skill/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
714
711
  mani_skill/utils/common.py,sha256=PYzgQRpTFnqKj2brD0mfGuOICLPsruEDXqs9D7u8Trg,13118
715
712
  mani_skill/utils/download_asset.py,sha256=vP7dsho4Fc-b-Ydm9vj6opFmORa-2Svd69w1izuk5go,8060
716
- mani_skill/utils/download_demo.py,sha256=ndnnc1TfyuLVb-odrA50HvuZemXZf-zZlqyFTinx-mA,4467
713
+ mani_skill/utils/download_demo.py,sha256=Hu-JSkxQpsoLhxZeM-m2NIeDiBUA2YMoA6cJH0HrVaE,4454
717
714
  mani_skill/utils/gym_utils.py,sha256=rAYNyxJtF1bSghi_EDPlqgnYZQwG0RklobgaPztSEvw,5922
718
715
  mani_skill/utils/io_utils.py,sha256=VJ93qoHxzFWIljAp9043pJqETmEGmxzt_DoYtE5Hd_I,1615
719
716
  mani_skill/utils/logging_utils.py,sha256=Iomnw2VrJ56A6kvyIm9Zhgp94Qf-WOf9WuMAGNB-2fc,1765
720
- mani_skill/utils/precompile_mpm.py,sha256=D497m_0mcNifeBEV7wmG5gHU_SstNpOoHIb-OQiqvUA,291
721
717
  mani_skill/utils/registration.py,sha256=u8ftfGvQP4qzlKNqLQjGi3cRF_-h6Rz-28xbLkW_YJ4,9718
722
- mani_skill/utils/sapien_utils.py,sha256=QMV0jRZO51KzIMB5CVW_Ne-4fPw0-mqM4a3yhNZaMYo,16430
718
+ mani_skill/utils/sapien_utils.py,sha256=bUo4jFvWbxjJxXY-cjcR7Q2ZAhAcArlPaElYI2f3RUI,18025
723
719
  mani_skill/utils/tree.py,sha256=jFyqCqVkarR6AaTDissVo33nOfbPfkZp8CsqEQmCV9Q,515
724
720
  mani_skill/utils/assets/README.md,sha256=5kkmsIiV64ySEGO34HaAlpjXTyrGs1KTV5WnofK46G0,70
725
721
  mani_skill/utils/assets/__init__.py,sha256=gQVKwAczcImTXArSltBWKlSUUuguO12sZYO3Jh5KLso,159
726
722
  mani_skill/utils/assets/data.py,sha256=xEuibRoEPBDN_vEU-MM5UWf6VDb1omE6BfZKPvlMPdI,8807
727
723
  mani_skill/utils/building/__init__.py,sha256=quCI5WYGhzGLMVg_NDyYv2G_MxRTBL8R6XD4a6iY8qc,218
728
724
  mani_skill/utils/building/_mjcf_loader.py,sha256=SqzSoRootFvItHrzwrDuSHScePxbaPqWb7262M7HzIU,37011
729
- mani_skill/utils/building/actor_builder.py,sha256=BKfGpqsvwE1nTQqpMUiq36NQyLB6mga0OqYYnwR0fWw,14971
725
+ mani_skill/utils/building/actor_builder.py,sha256=WHaJKmN9FsIOPjUYnRrOsaD5jhWN_EnncOPOxE1lz-c,15006
730
726
  mani_skill/utils/building/articulation_builder.py,sha256=ubRJYjINo7XTQ9IfE45Ie3CZGl79rmzYi_Kpq86Czrs,8492
731
727
  mani_skill/utils/building/ground.py,sha256=YaVt9xQfsqG0GRjKWe0o1e7pdPzmN-PN8FQOdzdargU,4365
732
728
  mani_skill/utils/building/mjcf_loader.py,sha256=FY--8z4JWjDHCM6XYLSC8W14MHVywV0SzOTiHzWafbs,4452
733
729
  mani_skill/utils/building/urdf_loader.py,sha256=llYoiRDU5gTi7sgi8Fv-Zjwdo7SoLpmtG4gJJEpn85g,4905
734
- mani_skill/utils/building/actors/__init__.py,sha256=G7aFAkb2f7c6hUcljJ17GIvlzUzVlpIOg6AZkTWFbDg,1420
735
- mani_skill/utils/building/actors/common.py,sha256=E2T6fUxAh5It3welkTkMQ_ptkDq8dh06-klJCtagK80,9290
730
+ mani_skill/utils/building/actors/__init__.py,sha256=2jOgWQV_spgIfZIKHPEf4FFOvBISYVUadumqWHCZDP8,218
731
+ mani_skill/utils/building/actors/common.py,sha256=MxTb8nDqNFVcAJQn3SoLf8piltE5cuavArGG5VlxxNA,10586
736
732
  mani_skill/utils/building/actors/ycb.py,sha256=6iHSpHicdfXlk7RmVEthUEzGStfj6cSGOHoHSc37Pjc,1347
737
733
  mani_skill/utils/building/articulations/__init__.py,sha256=jC8rbOFzLnMkC0O-Je2RDMQlDdu3Muw8P9gbnTgLHU4,1147
738
734
  mani_skill/utils/building/articulations/partnet_mobility.py,sha256=gVlAVFOpaqE3HKUNJ2DYQTYySldK39ysPCfxPKIXyb0,2455
@@ -757,6 +753,8 @@ mani_skill/utils/scene_builder/ai2thor/metadata/ArchitecTHOR.json,sha256=mbPJWHd
757
753
  mani_skill/utils/scene_builder/ai2thor/metadata/ProcTHOR.json,sha256=OYnCKrlYdbChbF2cSfNUeI0GH2z-X6ZkQtoskRAaqtQ,605697
758
754
  mani_skill/utils/scene_builder/ai2thor/metadata/RoboTHOR.json,sha256=v92NpbBGBMisvu1dA382vML-OBp2spTj94Y71QffgbQ,3612
759
755
  mani_skill/utils/scene_builder/ai2thor/metadata/iTHOR.json,sha256=wAvoEM_JC2EpaegMs5hv9hbRQ-G4CC78_loeEh4jQZc,7488
756
+ mani_skill/utils/scene_builder/control/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
757
+ mani_skill/utils/scene_builder/control/planar/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
760
758
  mani_skill/utils/scene_builder/control/planar/scene_builder.py,sha256=9zG6SLx7aySYcru73lpK-Y8X5IvFiWYiV4M9RSFIPSY,1075
761
759
  mani_skill/utils/scene_builder/kitchen_counter/__init__.py,sha256=TFMKaUyPqRlIJyWfvs_POQHdiqWwdM-O-ssZjkxhHBA,54
762
760
  mani_skill/utils/scene_builder/kitchen_counter/scene_builder.py,sha256=bbI5c4ax9YtFLUTMD86HAnqdm9oSUN49b-5FROx-NP0,1464
@@ -768,6 +766,7 @@ mani_skill/utils/scene_builder/replicacad/metadata/scene_configs.json,sha256=DIm
768
766
  mani_skill/utils/scene_builder/replicacad/rearrange/__init__.py,sha256=6wrAvNhnpkXOSYACdzTe4FveGDfycCINEHbTUbHRszE,284
769
767
  mani_skill/utils/scene_builder/replicacad/rearrange/scene_builder.py,sha256=G1bqkiIoKrUXWbxxZqVDwLzJM-NcXnvct6PcWlRiBv8,17469
770
768
  mani_skill/utils/scene_builder/replicacad/rearrange/variants.py,sha256=x5bn_aE-uReI_ZRHUzQBMlDzhKWSS1w2tSiHosF8BAw,1170
769
+ mani_skill/utils/scene_builder/robocasa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
771
770
  mani_skill/utils/scene_builder/robocasa/scene_builder.py,sha256=1Dc4fZ5l4WKNAc-qYsfDnOGC1qW3C2HjJ6mTCHYqO9o,40749
772
771
  mani_skill/utils/scene_builder/robocasa/fixtures/accessories.py,sha256=SjG_r7coAKRJ7WttUP-bfSwuz4sZUX-4z8D28xhdYQE,7857
773
772
  mani_skill/utils/scene_builder/robocasa/fixtures/cabinet.py,sha256=TwG625RjTaM_DqTxxjyvYxPnPnoJsUcTpV8zEtJvw8A,46314
@@ -786,7 +785,6 @@ mani_skill/utils/scene_builder/robocasa/fixtures/others.py,sha256=PP1_r66uckZK1v
786
785
  mani_skill/utils/scene_builder/robocasa/fixtures/sink.py,sha256=-ci0XTadmyZ2iIWq4ckk7J__YxerScSvvRq-bpSY5tM,4274
787
786
  mani_skill/utils/scene_builder/robocasa/fixtures/stove.py,sha256=aQ7PpZ3RVYTpj0OxeA6hTFEm8uY-Vp9APQHHExZmS4c,6871
788
787
  mani_skill/utils/scene_builder/robocasa/fixtures/windows.py,sha256=WiVaFjZ6JTjb6Zy2BMpffsRXC2WEPkMzhCNSXY5FjW8,12953
789
- mani_skill/utils/scene_builder/robocasa/objects/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
790
788
  mani_skill/utils/scene_builder/robocasa/objects/kitchen_object_utils.py,sha256=Y-9SjooCfr76Rt_T6U1-57pCDQg-aaPI0jNJ5rLHIo8,14459
791
789
  mani_skill/utils/scene_builder/robocasa/objects/kitchen_objects.py,sha256=qjiON5GEXN3SCtgN2vZKmOe5bv3H-CMnIrPD926Rh4A,48332
792
790
  mani_skill/utils/scene_builder/robocasa/objects/objects.py,sha256=zVhcvuOSmm7bYMOxUgPdCErwXFaRjXHJCC1ohA2L4tc,3581
@@ -804,13 +802,13 @@ mani_skill/utils/structs/__init__.py,sha256=BItR3Xe0z6xCrMHAEaH0AAAVyeonsQ3q-DJU
804
802
  mani_skill/utils/structs/actor.py,sha256=L0p6vkr8rGtJmF22xAq8Q7nhXKnDD5dahzODSAko0bg,17394
805
803
  mani_skill/utils/structs/articulation.py,sha256=QvWQsHdgCUCiGHVfUzZOeEqJ_OQsbi7CrxkCgjy3wyM,38491
806
804
  mani_skill/utils/structs/articulation_joint.py,sha256=xDQkCAXM3XZ56YgFqLwH5Ec8aFqhR5BqMSvDYCS0bzw,12972
807
- mani_skill/utils/structs/base.py,sha256=meGQK5Y4KtHKLnp9VeOZS2gtwg9tE55whuEeqOguBaI,19465
805
+ mani_skill/utils/structs/base.py,sha256=R3L8s3acMUmzQTeP5iYISHp3CXVeJFg1Nml8wF0Zm40,18526
808
806
  mani_skill/utils/structs/decorators.py,sha256=Lv6wQ989dOnreo2tB-qopDnkeBp_jsn1pmfUR-OY8VQ,535
809
807
  mani_skill/utils/structs/drive.py,sha256=UPQDkGbXS-CMRsZ1MHCb9s1vfAo5nqsywF83wKBVzSY,7505
810
808
  mani_skill/utils/structs/link.py,sha256=Syq2_PSwmQGj1KOkmRjmDETIus_SR6qXorc2mnsDq38,13792
811
809
  mani_skill/utils/structs/pose.py,sha256=76Sjrs-y3f8YhnuqMZNih-NxcnhnfomIbnNW1SWqK6A,11938
812
810
  mani_skill/utils/structs/render_camera.py,sha256=cNdi_DMsrHDqO-vHjwEIMVFxVvPHNTmVZe0sCQ1XMbI,12599
813
- mani_skill/utils/structs/types.py,sha256=eDezQSxtu8IyjIe4k6m8amQn7eYIuP9iLatXbr7FYH8,3810
811
+ mani_skill/utils/structs/types.py,sha256=XnrIjvv-W41iNDVkCzXdHXa7SogfZMLaKozK213zSJ8,3966
814
812
  mani_skill/utils/visualization/UbuntuSansMono-Regular.ttf,sha256=y_4ls2KucNhIA6CDNgSSAjUxGGSvLQqRrzacKfdpW-0,118780
815
813
  mani_skill/utils/visualization/__init__.py,sha256=0QF97UR8d7poMHo6m52DsAUXAmUb3SDr1B21bx33EEU,163
816
814
  mani_skill/utils/visualization/jupyter_utils.py,sha256=dXXUQz-rFTOV_Xq5yA6YE6cXg7DPw15YStw37NgB5Qc,1322
@@ -826,11 +824,10 @@ mani_skill/utils/wrappers/record.py,sha256=73g-dvnFrXcFy7t2BksgH8UqBUVBZG8BbCNPE
826
824
  mani_skill/utils/wrappers/visual_encoders.py,sha256=ISLO5ceaRkINhvce92VuZMDMCU3I4F7cQWFW2aVP-14,2205
827
825
  mani_skill/vector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
828
826
  mani_skill/vector/wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
829
- mani_skill/vector/wrappers/gymnasium.py,sha256=SbFD9inb_a8Hm36ckpdZumxVRfVcw5l1-nfyWGF0KfA,7456
827
+ mani_skill/vector/wrappers/gymnasium.py,sha256=voHNmYg5Jyy-laMSC2Fd8VggQvhXw3NnfYLbD9QDXAc,7305
830
828
  mani_skill/vector/wrappers/sb3.py,sha256=SlXdiEPqcNHYMhJCzA29kBU6zK7DKTe1nc0L6Z3QQtY,4722
831
- mani_skill/viewer/__init__.py,sha256=srvDBsk4LQU75K2VIttrhiQ68p_ro7PSDqQRls2PY5c,1722
832
- mani_skill_nightly-2025.7.25.606.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
833
- mani_skill_nightly-2025.7.25.606.dist-info/METADATA,sha256=Fz68W0vbVEG-GCrKasNzdf8kz8wMbUYdJnzsWDJCKFY,9271
834
- mani_skill_nightly-2025.7.25.606.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
835
- mani_skill_nightly-2025.7.25.606.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
836
- mani_skill_nightly-2025.7.25.606.dist-info/RECORD,,
829
+ mani_skill_nightly-2025.8.1.229.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
830
+ mani_skill_nightly-2025.8.1.229.dist-info/METADATA,sha256=hvtmxCMssOwGhFzznHugJZwYMCMvtyPz0X-XTYMxFd4,9314
831
+ mani_skill_nightly-2025.8.1.229.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
832
+ mani_skill_nightly-2025.8.1.229.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
833
+ mani_skill_nightly-2025.8.1.229.dist-info/RECORD,,
@@ -1 +0,0 @@
1
- from .solution import BasePolicy
@@ -1,129 +0,0 @@
1
- from typing import Callable, List, Type
2
-
3
- import gymnasium as gym
4
- import numpy as np
5
-
6
- from mani_skill.envs.sapien_env import BaseEnv
7
- from mani_skill.utils import common, gym_utils
8
-
9
- from .solution import BasePolicy
10
-
11
-
12
- class BaseEvaluator:
13
- env: gym.Env
14
- policy: BasePolicy
15
-
16
- MAX_EPISODE_STEPS = 1000
17
-
18
- def setup(
19
- self,
20
- env_id: str,
21
- policy_cls: Type[BasePolicy],
22
- render_mode="cameras",
23
- env_kwargs=None,
24
- ):
25
- """Setup environment and policy."""
26
- self.env_id = env_id
27
- self.env_kwargs = {} if env_kwargs is None else env_kwargs
28
-
29
- obs_mode = policy_cls.get_obs_mode(env_id)
30
- control_mode = policy_cls.get_control_mode(env_id)
31
-
32
- self.env: BaseEnv = gym.make(
33
- self.env_id,
34
- obs_mode=obs_mode,
35
- control_mode=control_mode,
36
- render_mode=render_mode,
37
- **self.env_kwargs
38
- )
39
- self.policy = policy_cls(
40
- self.env_id, self.env.observation_space, self.env.action_space
41
- )
42
- self.result = dict()
43
-
44
- def evaluate_episode(self, reset_kwargs, render=False):
45
- """Evaluate a single episode."""
46
- env = self.env
47
- policy = self.policy
48
-
49
- obs, _ = env.reset(**reset_kwargs)
50
- policy.reset(obs)
51
- # NOTE(jigu): Use for-loop rather than while-loop
52
- # in case time limit is not correctly set.
53
- for _ in range(self.MAX_EPISODE_STEPS):
54
- action = policy.act(obs)
55
- # NOTE(jigu): render after action in case action is needed to visualize
56
- if render:
57
- env.render()
58
- obs, reward, terminated, truncated, info = env.step(action)
59
- if terminated or truncated:
60
- if render:
61
- env.render()
62
- assert "success" in info, sorted(info.keys())
63
- metrics = gym_utils.extract_scalars_from_info(
64
- info, "TimeLimit.truncated"
65
- )
66
- return metrics
67
-
68
- def evaluate_episodes(self, episode_cfgs: List[dict], callback: Callable = None):
69
- """Evaluate episodes according to configurations.
70
-
71
- Args:
72
- episode_cfgs (List[dict]): a list of episode configurations.
73
- The configuration should contain "reset_kwargs".
74
- callback (Callable, optional): callback function to report progress.
75
- It accepts two arguments:
76
- int: the number of completed episodes
77
- dict: the results of the latest evaluated episode
78
- """
79
- for i, episode_cfg in enumerate(episode_cfgs):
80
- episode_id = episode_cfg.get("episode_id", i)
81
- reset_kwargs = episode_cfg.get("reset_kwargs", {})
82
- metrics = self.evaluate_episode(reset_kwargs)
83
- if metrics is None:
84
- raise RuntimeError(
85
- "Episode {}: check whether time limit is set".format(episode_id)
86
- )
87
- if episode_id in self.result:
88
- raise RuntimeError("Episode id {} is not unique.".format(episode_id))
89
- self.result[episode_id] = metrics
90
-
91
- if callback is not None:
92
- callback(i + 1, metrics)
93
-
94
- def close(self):
95
- self.env.close()
96
-
97
- def generate_dummy_config(self, env_id, num_episodes: int):
98
- """Generate dummy configuration."""
99
- env_info = dict(env_id=env_id)
100
- episodes = [dict(episode_id=i) for i in range(num_episodes)]
101
- return dict(env_info=env_info, episodes=episodes)
102
-
103
- def merge_result(self):
104
- merged_result = common.merge_dicts(self.result.values())
105
- merged_metrics = {k: np.mean(v) for k, v in merged_result.items()}
106
- return merged_metrics
107
-
108
- def export_to_csv(self, path):
109
- """Average results and export to a csv file."""
110
- import csv
111
-
112
- import tabulate
113
-
114
- merged_metrics = self.merge_result()
115
- headers = ["env_id"] + list(merged_metrics.keys())
116
- data = [[self.env_id] + list(merged_metrics.values())]
117
- print(tabulate(data, headers=headers, tablefmt="psql", floatfmt=".4f"))
118
-
119
- with open(path, "w") as f:
120
- csv_writer = csv.writer(f)
121
- csv_writer.writerow(headers)
122
- csv_writer.writerows(data)
123
- print("The evaluation result is saved to {}.".format(path))
124
-
125
- def submit(self):
126
- raise NotImplementedError
127
-
128
- def error(self, *args, **kwargs):
129
- raise NotImplementedError
@@ -1,147 +0,0 @@
1
- import os
2
- import sys
3
-
4
- from tqdm import tqdm
5
-
6
- from mani_skill.evaluation.evaluator import BaseEvaluator
7
- from mani_skill.utils.io_utils import dump_json, load_json, write_txt
8
- from mani_skill.utils.wrappers import RecordEpisode
9
-
10
-
11
- class Evaluator(BaseEvaluator):
12
- """Local evaluation."""
13
-
14
- def __init__(self, output_dir: str, record_dir=None):
15
- if os.path.exists(output_dir):
16
- print(f"{output_dir} already exists.")
17
- os.makedirs(output_dir, exist_ok=True)
18
- self.output_dir = output_dir
19
-
20
- self.record_dir = record_dir
21
-
22
- def setup(self, *args, **kwargs):
23
- super().setup(*args, **kwargs)
24
- if self.record_dir is not None:
25
- self.env = RecordEpisode(self.env, self.record_dir, clean_on_close=False)
26
-
27
- def submit(self):
28
- # Export per-episode results
29
- json_path = os.path.join(self.output_dir, "episode_results.json")
30
- dump_json(json_path, self.result)
31
- print("The per-episode evaluation result is saved to {}.".format(json_path))
32
-
33
- # Export average result
34
- json_path = os.path.join(self.output_dir, "average_metrics.json")
35
- merged_metrics = self.merge_result()
36
- self.merged_metrics = merged_metrics
37
- dump_json(json_path, merged_metrics)
38
- print("The averaged evaluation result is saved to {}.".format(json_path))
39
-
40
- def error(self, *args):
41
- write_txt(os.path.join(self.output_dir, "error.log"), args)
42
-
43
-
44
- class TqdmCallback:
45
- def __init__(self, n: int):
46
- self.n = n
47
- self.pbar = tqdm(total=n)
48
-
49
- def __call__(self, i, metrics):
50
- self.pbar.update()
51
-
52
-
53
- def parse_args():
54
- import argparse
55
-
56
- parser = argparse.ArgumentParser()
57
- parser.add_argument(
58
- "-e", "--env-id", type=str, required=True, help="Environment ID"
59
- )
60
- parser.add_argument(
61
- "-o",
62
- "--output-dir",
63
- type=str,
64
- required=True,
65
- help="Directory to save evaluation results.",
66
- )
67
- parser.add_argument(
68
- "--config-file",
69
- type=str,
70
- help="Path to the config file. If None, use the dummy config.",
71
- )
72
- # For debug only
73
- parser.add_argument("-n", "--num-episodes", type=int, help="Number of episodes.")
74
- parser.add_argument(
75
- "--use-random-policy",
76
- action="store_true",
77
- help="Whether to use a random policy.",
78
- )
79
- parser.add_argument(
80
- "--record-dir",
81
- type=str,
82
- help="Directory to record videos and trajectories. If it is '@', use the output directory.",
83
- )
84
-
85
- args = parser.parse_args()
86
- return args
87
-
88
-
89
- def main():
90
- args = parse_args()
91
-
92
- if args.record_dir == "@":
93
- args.record_dir = args.output_dir
94
- evaluator = Evaluator(args.output_dir, record_dir=args.record_dir)
95
-
96
- # ---------------------------------------------------------------------------- #
97
- # Load evaluation configuration
98
- # ---------------------------------------------------------------------------- #
99
- try:
100
- if args.config_file is not None:
101
- config = load_json(args.config_file)
102
- config_env_id = config["env_info"]["env_id"]
103
- assert config_env_id == args.env_id, (config_env_id, args.env_id)
104
- else: # For debug
105
- config = evaluator.generate_dummy_config(args.env_id, args.num_episodes)
106
- except:
107
- exc_info = sys.exc_info()
108
- print("Fail to load evaluation configuration.", exc_info[:-1])
109
- evaluator.error("Fail to load evaluation configuration.", str(exc_info[0]))
110
- exit(1)
111
-
112
- # ---------------------------------------------------------------------------- #
113
- # Import user policy
114
- # ---------------------------------------------------------------------------- #
115
- if args.use_random_policy:
116
- from mani_skill.evaluation.solution import RandomPolicy
117
-
118
- UserPolicy = RandomPolicy
119
- else:
120
- try:
121
- from user_solution import UserPolicy
122
- except:
123
- exc_info = sys.exc_info()
124
- print("Fail to import UserPolicy", exc_info[:-1])
125
- evaluator.error("Fail to import UserPolicy", str(exc_info[0]))
126
- exit(2)
127
-
128
- # ---------------------------------------------------------------------------- #
129
- # Main
130
- # ---------------------------------------------------------------------------- #
131
- env_kwargs = config["env_info"].get("env_kwargs")
132
- evaluator.setup(
133
- args.env_id, UserPolicy, render_mode="cameras", env_kwargs=env_kwargs
134
- )
135
-
136
- episodes = config["episodes"]
137
- if args.num_episodes is not None:
138
- episodes = episodes[: args.num_episodes]
139
- cb = TqdmCallback(len(episodes))
140
- evaluator.evaluate_episodes(episodes, callback=cb)
141
-
142
- evaluator.submit()
143
- evaluator.close()
144
-
145
-
146
- if __name__ == "__main__":
147
- main()
@@ -1,42 +0,0 @@
1
- import numpy as np
2
- from gymnasium import spaces
3
-
4
-
5
- class BasePolicy:
6
- def __init__(
7
- self, env_id: str, observation_space: spaces.Space, action_space: spaces.Space
8
- ) -> None:
9
- self.env_id = env_id
10
- self.observation_space = observation_space
11
- self.action_space = action_space
12
- # NOTE(jigu): Do not assume that gym.make(env_id) works during evaluation
13
-
14
- def reset(self, observations):
15
- """Called at the beginning of an episode."""
16
-
17
- def act(self, observations) -> np.ndarray:
18
- """Act based on the observations."""
19
- raise NotImplementedError
20
-
21
- @classmethod
22
- def get_obs_mode(cls, env_id: str) -> str:
23
- """Get the observation mode for the policy. Define the observation space."""
24
- raise NotImplementedError
25
-
26
- @classmethod
27
- def get_control_mode(cls, env_id) -> str:
28
- """Get the control mode for the policy. Define the action space."""
29
- raise NotImplementedError
30
-
31
-
32
- class RandomPolicy(BasePolicy):
33
- def act(self, observations):
34
- return self.action_space.sample()
35
-
36
- @classmethod
37
- def get_obs_mode(cls, env_id: str) -> str:
38
- return "rgbd"
39
-
40
- @classmethod
41
- def get_control_mode(cls, env_id: str) -> str:
42
- return None # use default one
@@ -1,13 +0,0 @@
1
- import gymnasium as gym
2
-
3
- from mani_skill.envs import mpm
4
-
5
- ENV_IDS = ["Excavate-v0", "Fill-v0", "Pour-v0", "Hang-v0", "Write-v0", "Pinch-v0"]
6
-
7
-
8
- if __name__ == "__main__":
9
- for env_id in ENV_IDS:
10
- env = gym.make(env_id)
11
- env.reset()
12
- env.step(None)
13
- env.close()
@@ -1,46 +0,0 @@
1
- import sapien
2
- from sapien.utils import Viewer
3
- import sys
4
-
5
- from mani_skill.render import SAPIEN_RENDER_SYSTEM
6
- from mani_skill.sensors.camera import CameraConfig
7
-
8
-
9
- def create_viewer(viewer_camera_config: CameraConfig):
10
- """Creates a viewer with the given camera config"""
11
- if SAPIEN_RENDER_SYSTEM == "3.0":
12
- sapien.render.set_viewer_shader_dir(
13
- viewer_camera_config.shader_config.shader_pack
14
- )
15
- if viewer_camera_config.shader_config.shader_pack[:2] == "rt":
16
- sapien.render.set_ray_tracing_denoiser(
17
- viewer_camera_config.shader_config.shader_pack_config[
18
- "ray_tracing_denoiser"
19
- ]
20
- )
21
- sapien.render.set_ray_tracing_path_depth(
22
- viewer_camera_config.shader_config.shader_pack_config[
23
- "ray_tracing_path_depth"
24
- ]
25
- )
26
- sapien.render.set_ray_tracing_samples_per_pixel(
27
- viewer_camera_config.shader_config.shader_pack_config[
28
- "ray_tracing_samples_per_pixel"
29
- ]
30
- )
31
- viewer = Viewer(
32
- resolutions=(viewer_camera_config.width, viewer_camera_config.height)
33
- )
34
- if sys.platform == 'darwin': # macOS
35
- viewer.window.set_content_scale(1)
36
- elif SAPIEN_RENDER_SYSTEM == "3.1":
37
- # TODO (stao): figure out how shader pack configs can be set at run time
38
- viewer = Viewer(
39
- resolutions=(viewer_camera_config.width, viewer_camera_config.height),
40
- shader_pack=sapien.render.get_shader_pack(
41
- viewer_camera_config.shader_config.shader_pack
42
- ),
43
- )
44
-
45
-
46
- return viewer