mani-skill-nightly 2025.7.16.652__py3-none-any.whl → 2025.7.25.606__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mani-skill-nightly might be problematic. Click here for more details.
- mani_skill/agents/robots/xarm6/xarm6_robotiq.py +6 -1
- mani_skill/envs/sapien_env.py +30 -10
- mani_skill/utils/structs/articulation.py +4 -2
- mani_skill/utils/tree.py +24 -0
- mani_skill/utils/wrappers/__init__.py +2 -1
- mani_skill/utils/wrappers/cached_reset.py +151 -0
- mani_skill/utils/wrappers/record.py +3 -4
- mani_skill/vector/wrappers/gymnasium.py +2 -2
- {mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/METADATA +1 -1
- {mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/RECORD +13 -11
- {mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/LICENSE +0 -0
- {mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/WHEEL +0 -0
- {mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/top_level.txt +0 -0
|
@@ -214,7 +214,7 @@ class XArm6Robotiq(BaseAgent):
|
|
|
214
214
|
# -------------------------------------------------------------------------- #
|
|
215
215
|
|
|
216
216
|
# Define a passive controller config to simply "turn off" other joints from being controlled and set their properties (damping/friction) to 0.
|
|
217
|
-
# These joints are controlled
|
|
217
|
+
# These joints are not explicitly controlled, and are free to move as per surrounding forces.
|
|
218
218
|
passive_finger_joint_names = [
|
|
219
219
|
"left_inner_knuckle_joint",
|
|
220
220
|
"right_inner_knuckle_joint",
|
|
@@ -231,6 +231,9 @@ class XArm6Robotiq(BaseAgent):
|
|
|
231
231
|
finger_joint_names = ["left_outer_knuckle_joint", "right_outer_knuckle_joint"]
|
|
232
232
|
|
|
233
233
|
# Use a mimic controller config to define one action to control both fingers
|
|
234
|
+
mimic_config = dict(
|
|
235
|
+
left_outer_knuckle_joint=dict(joint="right_outer_knuckle_joint", multiplier=1.0, offset=0.0),
|
|
236
|
+
)
|
|
234
237
|
finger_mimic_pd_joint_pos = PDJointPosMimicControllerConfig(
|
|
235
238
|
finger_joint_names,
|
|
236
239
|
lower=None,
|
|
@@ -240,6 +243,7 @@ class XArm6Robotiq(BaseAgent):
|
|
|
240
243
|
force_limit=self.gripper_force_limit,
|
|
241
244
|
friction=self.gripper_friction,
|
|
242
245
|
normalize_action=False,
|
|
246
|
+
mimic=mimic_config,
|
|
243
247
|
)
|
|
244
248
|
|
|
245
249
|
finger_mimic_pd_joint_delta_pos = PDJointPosMimicControllerConfig(
|
|
@@ -252,6 +256,7 @@ class XArm6Robotiq(BaseAgent):
|
|
|
252
256
|
friction=self.gripper_friction,
|
|
253
257
|
normalize_action=True,
|
|
254
258
|
use_delta=True,
|
|
259
|
+
mimic=mimic_config,
|
|
255
260
|
)
|
|
256
261
|
|
|
257
262
|
controller_configs = dict(
|
mani_skill/envs/sapien_env.py
CHANGED
|
@@ -37,7 +37,7 @@ from mani_skill.sensors.camera import (
|
|
|
37
37
|
update_camera_configs_from_dict,
|
|
38
38
|
)
|
|
39
39
|
from mani_skill.sensors.depth_camera import StereoDepthCamera, StereoDepthCameraConfig
|
|
40
|
-
from mani_skill.utils import common, gym_utils, sapien_utils
|
|
40
|
+
from mani_skill.utils import common, gym_utils, sapien_utils, tree
|
|
41
41
|
from mani_skill.utils.structs import Actor, Articulation
|
|
42
42
|
from mani_skill.utils.structs.pose import Pose
|
|
43
43
|
from mani_skill.utils.structs.types import Array, SimConfig
|
|
@@ -316,6 +316,8 @@ class BaseEnv(gym.Env):
|
|
|
316
316
|
self._elapsed_steps = (
|
|
317
317
|
torch.zeros(self.num_envs, device=self.device, dtype=torch.int32)
|
|
318
318
|
)
|
|
319
|
+
self._last_obs = None
|
|
320
|
+
"""the last observation returned by the environment"""
|
|
319
321
|
obs, _ = self.reset(seed=[2022 + i for i in range(self.num_envs)], options=dict(reconfigure=True))
|
|
320
322
|
|
|
321
323
|
self._init_raw_obs = common.to_cpu_tensor(obs)
|
|
@@ -850,7 +852,11 @@ class BaseEnv(gym.Env):
|
|
|
850
852
|
options["reconfigure"] is True, will call self._reconfigure() which deletes the entire physx scene and reconstructs everything.
|
|
851
853
|
Users building custom tasks generally do not need to override this function.
|
|
852
854
|
|
|
853
|
-
|
|
855
|
+
If options["reset_to_env_states"] is given, we expect there to be options["reset_to_env_states"]["env_states"] and optionally options["reset_to_env_states"]["obs"], both with
|
|
856
|
+
batch size equal to the number of environments being reset. "env_states" can be a dictionary or flat tensor and we skip calling the environment's _initialize_episode function which
|
|
857
|
+
generates the initial state on a normal reset. If "obs" is given we skip calling the environment's get_obs function which can save some compute/time.
|
|
858
|
+
|
|
859
|
+
Returns the observations and an info dictionary. The info dictionary is of type
|
|
854
860
|
|
|
855
861
|
|
|
856
862
|
.. highlight:: python
|
|
@@ -917,12 +923,22 @@ class BaseEnv(gym.Env):
|
|
|
917
923
|
if self.agent is not None:
|
|
918
924
|
self.agent.reset()
|
|
919
925
|
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
926
|
+
# we either reset to given env states or use the environment's defined _initialize_episode function to generate the initial state
|
|
927
|
+
reset_to_env_states_obs = None
|
|
928
|
+
if "reset_to_env_states" in options:
|
|
929
|
+
env_states = options["reset_to_env_states"]["env_states"]
|
|
930
|
+
reset_to_env_states_obs = options["reset_to_env_states"].get("obs", None)
|
|
931
|
+
if isinstance(env_states, dict):
|
|
932
|
+
self.set_state_dict(env_states, env_idx)
|
|
933
|
+
else:
|
|
934
|
+
self.set_state(env_states, env_idx)
|
|
924
935
|
else:
|
|
925
|
-
self.
|
|
936
|
+
if seed is not None or self._enhanced_determinism:
|
|
937
|
+
with torch.random.fork_rng():
|
|
938
|
+
torch.manual_seed(self._episode_seed[0])
|
|
939
|
+
self._initialize_episode(env_idx, options)
|
|
940
|
+
else:
|
|
941
|
+
self._initialize_episode(env_idx, options)
|
|
926
942
|
# reset the reset mask back to all ones so any internal code in maniskill can continue to manipulate all scenes at once as usual
|
|
927
943
|
self.scene._reset_mask = torch.ones(
|
|
928
944
|
self.num_envs, dtype=bool, device=self.device
|
|
@@ -942,9 +958,13 @@ class BaseEnv(gym.Env):
|
|
|
942
958
|
self.agent.controller.reset()
|
|
943
959
|
|
|
944
960
|
info = self.get_info()
|
|
945
|
-
|
|
946
|
-
|
|
961
|
+
if reset_to_env_states_obs is None:
|
|
962
|
+
obs = self.get_obs(info)
|
|
963
|
+
else:
|
|
964
|
+
obs = self._last_obs
|
|
965
|
+
tree.replace(obs, env_idx, common.to_tensor(reset_to_env_states_obs, device=self.device))
|
|
947
966
|
info["reconfigure"] = reconfigure
|
|
967
|
+
self._last_obs = obs
|
|
948
968
|
return obs, info
|
|
949
969
|
|
|
950
970
|
def _set_main_rng(self, seed):
|
|
@@ -1031,7 +1051,7 @@ class BaseEnv(gym.Env):
|
|
|
1031
1051
|
terminated = info["fail"].clone()
|
|
1032
1052
|
else:
|
|
1033
1053
|
terminated = torch.zeros(self.num_envs, dtype=bool, device=self.device)
|
|
1034
|
-
|
|
1054
|
+
self._last_obs = obs
|
|
1035
1055
|
return (
|
|
1036
1056
|
obs,
|
|
1037
1057
|
reward,
|
|
@@ -888,7 +888,7 @@ class Articulation(BaseStruct[physx.PhysxArticulation]):
|
|
|
888
888
|
else:
|
|
889
889
|
gx, gy = self.get_joint_target_indices(joint_indices)
|
|
890
890
|
self.px.cuda_articulation_target_qpos.torch()[
|
|
891
|
-
gx[self.scene._reset_mask], gy[self.scene._reset_mask]
|
|
891
|
+
gx[self.scene._reset_mask[self._scene_idxs]], gy[self.scene._reset_mask[self._scene_idxs]]
|
|
892
892
|
] = targets
|
|
893
893
|
else:
|
|
894
894
|
for i, joint in enumerate(joints):
|
|
@@ -911,7 +911,9 @@ class Articulation(BaseStruct[physx.PhysxArticulation]):
|
|
|
911
911
|
gx, gy = self.get_joint_target_indices(joints)
|
|
912
912
|
else:
|
|
913
913
|
gx, gy = self.get_joint_target_indices(joint_indices)
|
|
914
|
-
self.px.cuda_articulation_target_qvel.torch()[
|
|
914
|
+
self.px.cuda_articulation_target_qvel.torch()[
|
|
915
|
+
gx[self.scene._reset_mask[self._scene_idxs]], gy[self.scene._reset_mask[self._scene_idxs]]
|
|
916
|
+
] = targets
|
|
915
917
|
else:
|
|
916
918
|
for i, joint in enumerate(joints):
|
|
917
919
|
joint.set_drive_velocity_target(targets[0, i])
|
mani_skill/utils/tree.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
# NOTE (stao): when tensordict is used we should replace all of this
|
|
5
|
+
def slice(x, i):
|
|
6
|
+
if isinstance(x, dict):
|
|
7
|
+
return {k: slice(v, i) for k, v in x.items()}
|
|
8
|
+
else:
|
|
9
|
+
return x[i]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def cat(x: list):
|
|
13
|
+
if isinstance(x[0], dict):
|
|
14
|
+
return {k: cat([d[k] for d in x]) for k in x[0].keys()}
|
|
15
|
+
else:
|
|
16
|
+
return torch.cat(x, dim=0)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def replace(x, i, y):
|
|
20
|
+
if isinstance(x, dict):
|
|
21
|
+
for k, v in x.items():
|
|
22
|
+
replace(v, i, y[k])
|
|
23
|
+
else:
|
|
24
|
+
x[i] = y
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from .action_repeat import ActionRepeatWrapper
|
|
2
|
+
from .cached_reset import CachedResetWrapper
|
|
1
3
|
from .flatten import (
|
|
2
4
|
FlattenActionSpaceWrapper,
|
|
3
5
|
FlattenObservationWrapper,
|
|
@@ -6,4 +8,3 @@ from .flatten import (
|
|
|
6
8
|
from .frame_stack import FrameStack
|
|
7
9
|
from .gymnasium import CPUGymWrapper
|
|
8
10
|
from .record import RecordEpisode
|
|
9
|
-
from .action_repeat import ActionRepeatWrapper
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
from dataclasses import asdict, dataclass
|
|
2
|
+
from typing import List, Optional, Union
|
|
3
|
+
|
|
4
|
+
import dacite
|
|
5
|
+
import gymnasium as gym
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
from mani_skill.envs.sapien_env import BaseEnv
|
|
9
|
+
from mani_skill.utils import common, tree
|
|
10
|
+
from mani_skill.utils.structs.types import Device
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class CachedResetsConfig:
|
|
15
|
+
num_resets: Optional[int] = None
|
|
16
|
+
"""The number of reset states to cache. If none it will cache `num_envs` number of reset states."""
|
|
17
|
+
device: Optional[Device] = None
|
|
18
|
+
"""The device to cache the reset states on. If none it will use the base environment's device."""
|
|
19
|
+
seed: Optional[int] = None
|
|
20
|
+
"""The seed to use for generating the cached reset states."""
|
|
21
|
+
|
|
22
|
+
def dict(self):
|
|
23
|
+
return {k: v for k, v in asdict(self).items()}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class CachedResetWrapper(gym.Wrapper):
|
|
27
|
+
"""
|
|
28
|
+
Cached reset wrapper for ManiSkill3 environments. Caching resets allows you to skip slower parts of the reset function call and boost environment FPS as a result.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
env: The environment to wrap.
|
|
32
|
+
reset_to_env_states: A dictionary with keys "env_states" and optionally "obs". "env_states" is a dictionary of environment states to reset to.
|
|
33
|
+
"obs" contains the corresponding observations generated at those env states. If reset_to_env_states is not provided, the wrapper will sample reset states
|
|
34
|
+
from the environment using the given seed.
|
|
35
|
+
config: A dictionary or a `CachedResetsConfig` object that contains the configuration for the cached resets.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
env: gym.Env,
|
|
41
|
+
reset_to_env_states: Optional[dict] = None,
|
|
42
|
+
config: Union[CachedResetsConfig, dict] = CachedResetsConfig(),
|
|
43
|
+
):
|
|
44
|
+
super().__init__(env)
|
|
45
|
+
self.num_envs = self.base_env.num_envs
|
|
46
|
+
if isinstance(config, CachedResetsConfig):
|
|
47
|
+
config = config.dict()
|
|
48
|
+
self.cached_resets_config = dacite.from_dict(
|
|
49
|
+
data_class=CachedResetsConfig,
|
|
50
|
+
data=config,
|
|
51
|
+
config=dacite.Config(strict=True),
|
|
52
|
+
)
|
|
53
|
+
cached_data_device = self.cached_resets_config.device
|
|
54
|
+
if cached_data_device is None:
|
|
55
|
+
cached_data_device = self.base_env.device
|
|
56
|
+
self._num_cached_resets = 0
|
|
57
|
+
if reset_to_env_states is not None:
|
|
58
|
+
self._cached_resets_env_states = reset_to_env_states["env_states"]
|
|
59
|
+
self._cached_resets_obs_buffer = reset_to_env_states.get("obs", None)
|
|
60
|
+
self._num_cached_resets = len(self._cached_resets_env_states)
|
|
61
|
+
else:
|
|
62
|
+
if self.cached_resets_config.num_resets is None:
|
|
63
|
+
self.cached_resets_config.num_resets = 16384
|
|
64
|
+
self._cached_resets_env_states = []
|
|
65
|
+
self._cached_resets_obs_buffer = []
|
|
66
|
+
while self._num_cached_resets < self.cached_resets_config.num_resets:
|
|
67
|
+
obs, _ = self.env.reset(
|
|
68
|
+
seed=self.cached_resets_config.seed,
|
|
69
|
+
options=dict(
|
|
70
|
+
env_idx=torch.arange(
|
|
71
|
+
0,
|
|
72
|
+
min(
|
|
73
|
+
self.cached_resets_config.num_resets
|
|
74
|
+
- self._num_cached_resets,
|
|
75
|
+
self.num_envs,
|
|
76
|
+
),
|
|
77
|
+
device=self.base_env.device,
|
|
78
|
+
)
|
|
79
|
+
),
|
|
80
|
+
)
|
|
81
|
+
state = self.env.get_wrapper_attr("get_state_dict")()
|
|
82
|
+
if (
|
|
83
|
+
self.cached_resets_config.num_resets - self._num_cached_resets
|
|
84
|
+
< self.num_envs
|
|
85
|
+
):
|
|
86
|
+
obs = tree.slice(
|
|
87
|
+
obs,
|
|
88
|
+
slice(
|
|
89
|
+
0,
|
|
90
|
+
self.cached_resets_config.num_resets
|
|
91
|
+
- self._num_cached_resets,
|
|
92
|
+
),
|
|
93
|
+
)
|
|
94
|
+
state = tree.slice(
|
|
95
|
+
state,
|
|
96
|
+
slice(
|
|
97
|
+
0,
|
|
98
|
+
self.cached_resets_config.num_resets
|
|
99
|
+
- self._num_cached_resets,
|
|
100
|
+
),
|
|
101
|
+
)
|
|
102
|
+
self._cached_resets_obs_buffer.append(
|
|
103
|
+
common.to_tensor(obs, device=self.cached_resets_config.device)
|
|
104
|
+
)
|
|
105
|
+
self._cached_resets_env_states.append(
|
|
106
|
+
common.to_tensor(state, device=self.cached_resets_config.device)
|
|
107
|
+
)
|
|
108
|
+
self._num_cached_resets += self.num_envs
|
|
109
|
+
self._cached_resets_env_states = tree.cat(self._cached_resets_env_states)
|
|
110
|
+
self._cached_resets_obs_buffer = tree.cat(self._cached_resets_obs_buffer)
|
|
111
|
+
|
|
112
|
+
self._cached_resets_env_states = common.to_tensor(
|
|
113
|
+
self._cached_resets_env_states, device=cached_data_device
|
|
114
|
+
)
|
|
115
|
+
if self._cached_resets_obs_buffer is not None:
|
|
116
|
+
self._cached_resets_obs_buffer = common.to_tensor(
|
|
117
|
+
self._cached_resets_obs_buffer, device=cached_data_device
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
@property
|
|
121
|
+
def base_env(self) -> BaseEnv:
|
|
122
|
+
return self.env.unwrapped
|
|
123
|
+
|
|
124
|
+
def reset(
|
|
125
|
+
self,
|
|
126
|
+
*args,
|
|
127
|
+
seed: Optional[Union[int, List[int]]] = None,
|
|
128
|
+
options: Optional[dict] = None,
|
|
129
|
+
**kwargs
|
|
130
|
+
):
|
|
131
|
+
env_idx = None
|
|
132
|
+
if options is None:
|
|
133
|
+
options = dict()
|
|
134
|
+
if "env_idx" in options:
|
|
135
|
+
env_idx = options["env_idx"]
|
|
136
|
+
if self._cached_resets_env_states is not None:
|
|
137
|
+
sampled_ids = torch.randint(
|
|
138
|
+
0,
|
|
139
|
+
self._num_cached_resets,
|
|
140
|
+
size=(len(env_idx) if env_idx is not None else self.num_envs,),
|
|
141
|
+
device=self.base_env.device,
|
|
142
|
+
)
|
|
143
|
+
options["reset_to_env_states"] = dict(
|
|
144
|
+
env_states=tree.slice(self._cached_resets_env_states, sampled_ids),
|
|
145
|
+
)
|
|
146
|
+
if self._cached_resets_obs_buffer is not None:
|
|
147
|
+
options["reset_to_env_states"]["obs"] = tree.slice(
|
|
148
|
+
self._cached_resets_obs_buffer, sampled_ids
|
|
149
|
+
)
|
|
150
|
+
obs, info = self.env.reset(seed=seed, options=options)
|
|
151
|
+
return obs, info
|
|
@@ -357,16 +357,15 @@ class RecordEpisode(gym.Wrapper):
|
|
|
357
357
|
self,
|
|
358
358
|
*args,
|
|
359
359
|
seed: Optional[Union[int, List[int]]] = None,
|
|
360
|
-
options: Optional[dict] =
|
|
360
|
+
options: Optional[dict] = None,
|
|
361
361
|
**kwargs,
|
|
362
362
|
):
|
|
363
|
-
|
|
364
363
|
if self.save_on_reset:
|
|
365
364
|
if self.save_video and self.num_envs == 1:
|
|
366
365
|
self.flush_video()
|
|
367
366
|
# if doing a full reset then we flush all trajectories including incompleted ones
|
|
368
367
|
if self._trajectory_buffer is not None:
|
|
369
|
-
if "env_idx" not in options:
|
|
368
|
+
if options is None or "env_idx" not in options:
|
|
370
369
|
self.flush_trajectory(env_idxs_to_flush=np.arange(self.num_envs))
|
|
371
370
|
else:
|
|
372
371
|
self.flush_trajectory(
|
|
@@ -415,7 +414,7 @@ class RecordEpisode(gym.Wrapper):
|
|
|
415
414
|
if self.record_env_state:
|
|
416
415
|
first_step.state = common.to_numpy(common.batch(state_dict))
|
|
417
416
|
env_idx = np.arange(self.num_envs)
|
|
418
|
-
if "env_idx" in options:
|
|
417
|
+
if options is not None and "env_idx" in options:
|
|
419
418
|
env_idx = common.to_numpy(options["env_idx"])
|
|
420
419
|
if self._trajectory_buffer is None:
|
|
421
420
|
# Initialize trajectory buffer on the first episode based on given observation (which should be generated after all wrappers)
|
|
@@ -89,10 +89,10 @@ class ManiSkillVectorEnv(VectorEnv):
|
|
|
89
89
|
self,
|
|
90
90
|
*,
|
|
91
91
|
seed: Optional[Union[int, List[int]]] = None,
|
|
92
|
-
options: Optional[dict] =
|
|
92
|
+
options: Optional[dict] = None,
|
|
93
93
|
):
|
|
94
94
|
obs, info = self._env.reset(seed=seed, options=options)
|
|
95
|
-
if "env_idx" in options:
|
|
95
|
+
if options is not None and "env_idx" in options:
|
|
96
96
|
env_idx = options["env_idx"]
|
|
97
97
|
mask = torch.zeros(self.num_envs, dtype=bool, device=self.base_env.device)
|
|
98
98
|
mask[env_idx] = True
|
{mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/RECORD
RENAMED
|
@@ -71,7 +71,7 @@ mani_skill/agents/robots/xarm/__init__.py,sha256=6Mhn4vV4f9XxcK493U5W9VE6yGGgydP
|
|
|
71
71
|
mani_skill/agents/robots/xarm/xarm7_ability.py,sha256=yj7CUBQpbGVUiT22qweJKTniJE0DxdEyyKj329vr0HY,6106
|
|
72
72
|
mani_skill/agents/robots/xarm6/__init__.py,sha256=0r19OsKmm1ssKB5Rrie8syWQvpXNooVOv6m-iygrdM0,109
|
|
73
73
|
mani_skill/agents/robots/xarm6/xarm6_nogripper.py,sha256=FPhOpWQw5RPsSHLhZ9JWjYeh25GboO4I5_Hn05Ub84Q,7379
|
|
74
|
-
mani_skill/agents/robots/xarm6/xarm6_robotiq.py,sha256=
|
|
74
|
+
mani_skill/agents/robots/xarm6/xarm6_robotiq.py,sha256=0JPtaa3kchle2XqNjltiuPuntC0YMwn-HfejX3gI2uc,16358
|
|
75
75
|
mani_skill/assets/maniskill2-scene-2.glb,sha256=C5om9o9r6B-fWoauzNfUm2WV5sh8Nf7AvZRlYo1-IXQ,4737204
|
|
76
76
|
mani_skill/assets/deformable_manipulation/beaker.glb,sha256=MMaoH6OruVSzO8CKuK2AMyaxA5kjsbbDQXyTVycWsPI,18104
|
|
77
77
|
mani_skill/assets/deformable_manipulation/bottle.glb,sha256=AHWoATBEBeesfbiYNfSB0O0PWhsH0oa2wUBv79w9AVA,36476
|
|
@@ -539,7 +539,7 @@ mani_skill/assets/robots/xarm7/meshes/visual/link7.glb,sha256=aZatACOv20VJbi2tOE
|
|
|
539
539
|
mani_skill/assets/robots/xarm7/meshes/visual/link_base.glb,sha256=vcy2lN1V72jIsSDRT0ZKVskR_0pVOXtDvBkxO2GENWs,467668
|
|
540
540
|
mani_skill/envs/__init__.py,sha256=YPlttBErTcf9vSnkZ54EQ8vTABSfFFrBdUY0AkF4vmg,43
|
|
541
541
|
mani_skill/envs/minimal_template.py,sha256=9THHWA1vkHatptc9g5Ojh-UBUKWQmLHVeq4fcaqv2aY,2200
|
|
542
|
-
mani_skill/envs/sapien_env.py,sha256
|
|
542
|
+
mani_skill/envs/sapien_env.py,sha256=-reQWgDhCQga2RqFovQlHrsAKxxnvpcYYtK1AqxPEyI,75338
|
|
543
543
|
mani_skill/envs/scene.py,sha256=4ZAIJs61fwPPhfDvc3v845sj_Ftsd1sSYaju10KnXbQ,48465
|
|
544
544
|
mani_skill/envs/sim2real_env.py,sha256=3mkQX4TonE2pUC5_Atmx0IYDH2_v6GSwOPJvQMEvCNY,19214
|
|
545
545
|
mani_skill/envs/template.py,sha256=0wnwKjnGOF7RvTR5Gz4VopaUiFxnIioXwmb4nPVxAs8,11939
|
|
@@ -720,6 +720,7 @@ mani_skill/utils/logging_utils.py,sha256=Iomnw2VrJ56A6kvyIm9Zhgp94Qf-WOf9WuMAGNB
|
|
|
720
720
|
mani_skill/utils/precompile_mpm.py,sha256=D497m_0mcNifeBEV7wmG5gHU_SstNpOoHIb-OQiqvUA,291
|
|
721
721
|
mani_skill/utils/registration.py,sha256=u8ftfGvQP4qzlKNqLQjGi3cRF_-h6Rz-28xbLkW_YJ4,9718
|
|
722
722
|
mani_skill/utils/sapien_utils.py,sha256=QMV0jRZO51KzIMB5CVW_Ne-4fPw0-mqM4a3yhNZaMYo,16430
|
|
723
|
+
mani_skill/utils/tree.py,sha256=jFyqCqVkarR6AaTDissVo33nOfbPfkZp8CsqEQmCV9Q,515
|
|
723
724
|
mani_skill/utils/assets/README.md,sha256=5kkmsIiV64ySEGO34HaAlpjXTyrGs1KTV5WnofK46G0,70
|
|
724
725
|
mani_skill/utils/assets/__init__.py,sha256=gQVKwAczcImTXArSltBWKlSUUuguO12sZYO3Jh5KLso,159
|
|
725
726
|
mani_skill/utils/assets/data.py,sha256=xEuibRoEPBDN_vEU-MM5UWf6VDb1omE6BfZKPvlMPdI,8807
|
|
@@ -801,7 +802,7 @@ mani_skill/utils/scene_builder/table/assets/table.glb,sha256=yw69itZDjBFg8JXZAr9
|
|
|
801
802
|
mani_skill/utils/structs/README.md,sha256=qnYKimp_ZkgNcduURrYQxVTimNmq_usDMKoQ8VtMdCs,286
|
|
802
803
|
mani_skill/utils/structs/__init__.py,sha256=BItR3Xe0z6xCrMHAEaH0AAAVyeonsQ3q-DJUyRUibAA,524
|
|
803
804
|
mani_skill/utils/structs/actor.py,sha256=L0p6vkr8rGtJmF22xAq8Q7nhXKnDD5dahzODSAko0bg,17394
|
|
804
|
-
mani_skill/utils/structs/articulation.py,sha256=
|
|
805
|
+
mani_skill/utils/structs/articulation.py,sha256=QvWQsHdgCUCiGHVfUzZOeEqJ_OQsbi7CrxkCgjy3wyM,38491
|
|
805
806
|
mani_skill/utils/structs/articulation_joint.py,sha256=xDQkCAXM3XZ56YgFqLwH5Ec8aFqhR5BqMSvDYCS0bzw,12972
|
|
806
807
|
mani_skill/utils/structs/base.py,sha256=meGQK5Y4KtHKLnp9VeOZS2gtwg9tE55whuEeqOguBaI,19465
|
|
807
808
|
mani_skill/utils/structs/decorators.py,sha256=Lv6wQ989dOnreo2tB-qopDnkeBp_jsn1pmfUR-OY8VQ,535
|
|
@@ -815,20 +816,21 @@ mani_skill/utils/visualization/__init__.py,sha256=0QF97UR8d7poMHo6m52DsAUXAmUb3S
|
|
|
815
816
|
mani_skill/utils/visualization/jupyter_utils.py,sha256=dXXUQz-rFTOV_Xq5yA6YE6cXg7DPw15YStw37NgB5Qc,1322
|
|
816
817
|
mani_skill/utils/visualization/misc.py,sha256=KrDCef7F5GmGOdiBQ4qFUnmUTe-7-nNBz2DVBGFD8YU,5041
|
|
817
818
|
mani_skill/utils/visualization/renderer.py,sha256=afFWwSQEeL-9c5CsBT1uug-zugGjOr1FDzmvd45-9dk,1646
|
|
818
|
-
mani_skill/utils/wrappers/__init__.py,sha256=
|
|
819
|
+
mani_skill/utils/wrappers/__init__.py,sha256=QfSTw9RNtQEtEAzD1RLx0WXjNjIS7wVM2IIkSkLT6Xw,321
|
|
819
820
|
mani_skill/utils/wrappers/action_repeat.py,sha256=RhCtzt3fYCtD-CClIOhAzdycGwVTXP_FG61yEf-QLqY,3542
|
|
821
|
+
mani_skill/utils/wrappers/cached_reset.py,sha256=KV9Sd-mIK9NM_nes-7HtO3HAkkuflctVnUejSN77ecE,6272
|
|
820
822
|
mani_skill/utils/wrappers/flatten.py,sha256=GuHJ3fCOdj9G_jm--XgG8k0p2G1eJx4LY1tesQQjnkg,4913
|
|
821
823
|
mani_skill/utils/wrappers/frame_stack.py,sha256=pCp83HqXnFxbsKRYgwXreNBHnhD-yF0R2_7jdtGOTWQ,4213
|
|
822
824
|
mani_skill/utils/wrappers/gymnasium.py,sha256=p0kl29kkedD2arIvGskClKhYDBAH97mZO4rTepz62jQ,4174
|
|
823
|
-
mani_skill/utils/wrappers/record.py,sha256=
|
|
825
|
+
mani_skill/utils/wrappers/record.py,sha256=73g-dvnFrXcFy7t2BksgH8UqBUVBZG8BbCNPEbdtBSc,37362
|
|
824
826
|
mani_skill/utils/wrappers/visual_encoders.py,sha256=ISLO5ceaRkINhvce92VuZMDMCU3I4F7cQWFW2aVP-14,2205
|
|
825
827
|
mani_skill/vector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
826
828
|
mani_skill/vector/wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
827
|
-
mani_skill/vector/wrappers/gymnasium.py,sha256=
|
|
829
|
+
mani_skill/vector/wrappers/gymnasium.py,sha256=SbFD9inb_a8Hm36ckpdZumxVRfVcw5l1-nfyWGF0KfA,7456
|
|
828
830
|
mani_skill/vector/wrappers/sb3.py,sha256=SlXdiEPqcNHYMhJCzA29kBU6zK7DKTe1nc0L6Z3QQtY,4722
|
|
829
831
|
mani_skill/viewer/__init__.py,sha256=srvDBsk4LQU75K2VIttrhiQ68p_ro7PSDqQRls2PY5c,1722
|
|
830
|
-
mani_skill_nightly-2025.7.
|
|
831
|
-
mani_skill_nightly-2025.7.
|
|
832
|
-
mani_skill_nightly-2025.7.
|
|
833
|
-
mani_skill_nightly-2025.7.
|
|
834
|
-
mani_skill_nightly-2025.7.
|
|
832
|
+
mani_skill_nightly-2025.7.25.606.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
833
|
+
mani_skill_nightly-2025.7.25.606.dist-info/METADATA,sha256=Fz68W0vbVEG-GCrKasNzdf8kz8wMbUYdJnzsWDJCKFY,9271
|
|
834
|
+
mani_skill_nightly-2025.7.25.606.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
|
835
|
+
mani_skill_nightly-2025.7.25.606.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
|
|
836
|
+
mani_skill_nightly-2025.7.25.606.dist-info/RECORD,,
|
{mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/LICENSE
RENAMED
|
File without changes
|
{mani_skill_nightly-2025.7.16.652.dist-info → mani_skill_nightly-2025.7.25.606.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|