torchrl 0.11.0__cp314-cp314-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchmarks/benchmark_batched_envs.py +104 -0
- benchmarks/conftest.py +91 -0
- benchmarks/ecosystem/gym_env_throughput.py +321 -0
- benchmarks/ecosystem/vmas_rllib_vs_torchrl_sampling_performance.py +231 -0
- benchmarks/requirements.txt +7 -0
- benchmarks/storage/benchmark_sample_latency_over_rpc.py +193 -0
- benchmarks/test_collectors_benchmark.py +240 -0
- benchmarks/test_compressed_storage_benchmark.py +145 -0
- benchmarks/test_envs_benchmark.py +133 -0
- benchmarks/test_llm.py +101 -0
- benchmarks/test_non_tensor_env_benchmark.py +70 -0
- benchmarks/test_objectives_benchmarks.py +1199 -0
- benchmarks/test_replaybuffer_benchmark.py +254 -0
- sota-check/README.md +35 -0
- sota-implementations/README.md +142 -0
- sota-implementations/a2c/README.md +39 -0
- sota-implementations/a2c/a2c_atari.py +291 -0
- sota-implementations/a2c/a2c_mujoco.py +273 -0
- sota-implementations/a2c/utils_atari.py +240 -0
- sota-implementations/a2c/utils_mujoco.py +160 -0
- sota-implementations/bandits/README.md +7 -0
- sota-implementations/bandits/dqn.py +126 -0
- sota-implementations/cql/cql_offline.py +198 -0
- sota-implementations/cql/cql_online.py +249 -0
- sota-implementations/cql/discrete_cql_offline.py +180 -0
- sota-implementations/cql/discrete_cql_online.py +227 -0
- sota-implementations/cql/utils.py +471 -0
- sota-implementations/crossq/crossq.py +271 -0
- sota-implementations/crossq/utils.py +320 -0
- sota-implementations/ddpg/ddpg.py +231 -0
- sota-implementations/ddpg/utils.py +325 -0
- sota-implementations/decision_transformer/dt.py +163 -0
- sota-implementations/decision_transformer/lamb.py +167 -0
- sota-implementations/decision_transformer/online_dt.py +178 -0
- sota-implementations/decision_transformer/utils.py +562 -0
- sota-implementations/discrete_sac/discrete_sac.py +243 -0
- sota-implementations/discrete_sac/utils.py +324 -0
- sota-implementations/dqn/README.md +30 -0
- sota-implementations/dqn/dqn_atari.py +272 -0
- sota-implementations/dqn/dqn_cartpole.py +236 -0
- sota-implementations/dqn/utils_atari.py +132 -0
- sota-implementations/dqn/utils_cartpole.py +90 -0
- sota-implementations/dreamer/README.md +129 -0
- sota-implementations/dreamer/dreamer.py +586 -0
- sota-implementations/dreamer/dreamer_utils.py +1107 -0
- sota-implementations/expert-iteration/README.md +352 -0
- sota-implementations/expert-iteration/ei_utils.py +770 -0
- sota-implementations/expert-iteration/expert-iteration-async.py +512 -0
- sota-implementations/expert-iteration/expert-iteration-sync.py +508 -0
- sota-implementations/expert-iteration/requirements_gsm8k.txt +13 -0
- sota-implementations/expert-iteration/requirements_ifeval.txt +16 -0
- sota-implementations/gail/gail.py +327 -0
- sota-implementations/gail/gail_utils.py +68 -0
- sota-implementations/gail/ppo_utils.py +157 -0
- sota-implementations/grpo/README.md +273 -0
- sota-implementations/grpo/grpo-async.py +437 -0
- sota-implementations/grpo/grpo-sync.py +435 -0
- sota-implementations/grpo/grpo_utils.py +843 -0
- sota-implementations/grpo/requirements_gsm8k.txt +11 -0
- sota-implementations/grpo/requirements_ifeval.txt +16 -0
- sota-implementations/impala/README.md +33 -0
- sota-implementations/impala/impala_multi_node_ray.py +292 -0
- sota-implementations/impala/impala_multi_node_submitit.py +284 -0
- sota-implementations/impala/impala_single_node.py +261 -0
- sota-implementations/impala/utils.py +184 -0
- sota-implementations/iql/discrete_iql.py +230 -0
- sota-implementations/iql/iql_offline.py +164 -0
- sota-implementations/iql/iql_online.py +225 -0
- sota-implementations/iql/utils.py +437 -0
- sota-implementations/multiagent/README.md +74 -0
- sota-implementations/multiagent/iql.py +237 -0
- sota-implementations/multiagent/maddpg_iddpg.py +266 -0
- sota-implementations/multiagent/mappo_ippo.py +267 -0
- sota-implementations/multiagent/qmix_vdn.py +271 -0
- sota-implementations/multiagent/sac.py +337 -0
- sota-implementations/multiagent/utils/__init__.py +4 -0
- sota-implementations/multiagent/utils/logging.py +151 -0
- sota-implementations/multiagent/utils/utils.py +43 -0
- sota-implementations/ppo/README.md +29 -0
- sota-implementations/ppo/ppo_atari.py +305 -0
- sota-implementations/ppo/ppo_mujoco.py +293 -0
- sota-implementations/ppo/utils_atari.py +238 -0
- sota-implementations/ppo/utils_mujoco.py +152 -0
- sota-implementations/ppo_trainer/train.py +21 -0
- sota-implementations/redq/README.md +7 -0
- sota-implementations/redq/redq.py +199 -0
- sota-implementations/redq/utils.py +1060 -0
- sota-implementations/sac/sac-async.py +266 -0
- sota-implementations/sac/sac.py +239 -0
- sota-implementations/sac/utils.py +381 -0
- sota-implementations/sac_trainer/train.py +16 -0
- sota-implementations/td3/td3.py +254 -0
- sota-implementations/td3/utils.py +319 -0
- sota-implementations/td3_bc/td3_bc.py +177 -0
- sota-implementations/td3_bc/utils.py +251 -0
- torchrl/__init__.py +144 -0
- torchrl/_extension.py +74 -0
- torchrl/_torchrl.cp314-win_amd64.pyd +0 -0
- torchrl/_utils.py +1431 -0
- torchrl/collectors/__init__.py +48 -0
- torchrl/collectors/_base.py +1058 -0
- torchrl/collectors/_constants.py +88 -0
- torchrl/collectors/_multi_async.py +324 -0
- torchrl/collectors/_multi_base.py +1805 -0
- torchrl/collectors/_multi_sync.py +464 -0
- torchrl/collectors/_runner.py +581 -0
- torchrl/collectors/_single.py +2009 -0
- torchrl/collectors/_single_async.py +259 -0
- torchrl/collectors/collectors.py +62 -0
- torchrl/collectors/distributed/__init__.py +32 -0
- torchrl/collectors/distributed/default_configs.py +133 -0
- torchrl/collectors/distributed/generic.py +1306 -0
- torchrl/collectors/distributed/ray.py +1092 -0
- torchrl/collectors/distributed/rpc.py +1006 -0
- torchrl/collectors/distributed/sync.py +731 -0
- torchrl/collectors/distributed/utils.py +160 -0
- torchrl/collectors/llm/__init__.py +10 -0
- torchrl/collectors/llm/base.py +494 -0
- torchrl/collectors/llm/ray_collector.py +275 -0
- torchrl/collectors/llm/utils.py +36 -0
- torchrl/collectors/llm/weight_update/__init__.py +10 -0
- torchrl/collectors/llm/weight_update/vllm.py +348 -0
- torchrl/collectors/llm/weight_update/vllm_v2.py +311 -0
- torchrl/collectors/utils.py +433 -0
- torchrl/collectors/weight_update.py +591 -0
- torchrl/csrc/numpy_utils.h +38 -0
- torchrl/csrc/pybind.cpp +27 -0
- torchrl/csrc/segment_tree.h +458 -0
- torchrl/csrc/torch_utils.h +34 -0
- torchrl/csrc/utils.cpp +48 -0
- torchrl/csrc/utils.h +31 -0
- torchrl/data/__init__.py +187 -0
- torchrl/data/datasets/__init__.py +58 -0
- torchrl/data/datasets/atari_dqn.py +878 -0
- torchrl/data/datasets/common.py +281 -0
- torchrl/data/datasets/d4rl.py +489 -0
- torchrl/data/datasets/d4rl_infos.py +187 -0
- torchrl/data/datasets/gen_dgrl.py +375 -0
- torchrl/data/datasets/minari_data.py +643 -0
- torchrl/data/datasets/openml.py +177 -0
- torchrl/data/datasets/openx.py +798 -0
- torchrl/data/datasets/roboset.py +363 -0
- torchrl/data/datasets/utils.py +11 -0
- torchrl/data/datasets/vd4rl.py +432 -0
- torchrl/data/llm/__init__.py +34 -0
- torchrl/data/llm/dataset.py +491 -0
- torchrl/data/llm/history.py +1378 -0
- torchrl/data/llm/prompt.py +198 -0
- torchrl/data/llm/reward.py +225 -0
- torchrl/data/llm/topk.py +186 -0
- torchrl/data/llm/utils.py +543 -0
- torchrl/data/map/__init__.py +21 -0
- torchrl/data/map/hash.py +185 -0
- torchrl/data/map/query.py +204 -0
- torchrl/data/map/tdstorage.py +363 -0
- torchrl/data/map/tree.py +1434 -0
- torchrl/data/map/utils.py +103 -0
- torchrl/data/postprocs/__init__.py +8 -0
- torchrl/data/postprocs/postprocs.py +391 -0
- torchrl/data/replay_buffers/__init__.py +99 -0
- torchrl/data/replay_buffers/checkpointers.py +622 -0
- torchrl/data/replay_buffers/ray_buffer.py +292 -0
- torchrl/data/replay_buffers/replay_buffers.py +2376 -0
- torchrl/data/replay_buffers/samplers.py +2578 -0
- torchrl/data/replay_buffers/scheduler.py +265 -0
- torchrl/data/replay_buffers/storages.py +2412 -0
- torchrl/data/replay_buffers/utils.py +1042 -0
- torchrl/data/replay_buffers/writers.py +781 -0
- torchrl/data/tensor_specs.py +7101 -0
- torchrl/data/utils.py +334 -0
- torchrl/envs/__init__.py +265 -0
- torchrl/envs/async_envs.py +1105 -0
- torchrl/envs/batched_envs.py +3093 -0
- torchrl/envs/common.py +4241 -0
- torchrl/envs/custom/__init__.py +11 -0
- torchrl/envs/custom/chess.py +617 -0
- torchrl/envs/custom/llm.py +214 -0
- torchrl/envs/custom/pendulum.py +401 -0
- torchrl/envs/custom/san_moves.txt +29274 -0
- torchrl/envs/custom/tictactoeenv.py +288 -0
- torchrl/envs/env_creator.py +263 -0
- torchrl/envs/gym_like.py +752 -0
- torchrl/envs/libs/__init__.py +68 -0
- torchrl/envs/libs/_gym_utils.py +326 -0
- torchrl/envs/libs/brax.py +846 -0
- torchrl/envs/libs/dm_control.py +544 -0
- torchrl/envs/libs/envpool.py +447 -0
- torchrl/envs/libs/gym.py +2239 -0
- torchrl/envs/libs/habitat.py +138 -0
- torchrl/envs/libs/isaac_lab.py +87 -0
- torchrl/envs/libs/isaacgym.py +203 -0
- torchrl/envs/libs/jax_utils.py +166 -0
- torchrl/envs/libs/jumanji.py +963 -0
- torchrl/envs/libs/meltingpot.py +599 -0
- torchrl/envs/libs/openml.py +153 -0
- torchrl/envs/libs/openspiel.py +652 -0
- torchrl/envs/libs/pettingzoo.py +1042 -0
- torchrl/envs/libs/procgen.py +351 -0
- torchrl/envs/libs/robohive.py +429 -0
- torchrl/envs/libs/smacv2.py +645 -0
- torchrl/envs/libs/unity_mlagents.py +891 -0
- torchrl/envs/libs/utils.py +147 -0
- torchrl/envs/libs/vmas.py +813 -0
- torchrl/envs/llm/__init__.py +63 -0
- torchrl/envs/llm/chat.py +730 -0
- torchrl/envs/llm/datasets/README.md +4 -0
- torchrl/envs/llm/datasets/__init__.py +17 -0
- torchrl/envs/llm/datasets/gsm8k.py +353 -0
- torchrl/envs/llm/datasets/ifeval.py +274 -0
- torchrl/envs/llm/envs.py +789 -0
- torchrl/envs/llm/libs/README.md +3 -0
- torchrl/envs/llm/libs/__init__.py +8 -0
- torchrl/envs/llm/libs/mlgym.py +869 -0
- torchrl/envs/llm/reward/__init__.py +10 -0
- torchrl/envs/llm/reward/gsm8k.py +324 -0
- torchrl/envs/llm/reward/ifeval/README.md +13 -0
- torchrl/envs/llm/reward/ifeval/__init__.py +10 -0
- torchrl/envs/llm/reward/ifeval/_instructions.py +1667 -0
- torchrl/envs/llm/reward/ifeval/_instructions_main.py +131 -0
- torchrl/envs/llm/reward/ifeval/_instructions_registry.py +100 -0
- torchrl/envs/llm/reward/ifeval/_instructions_util.py +1677 -0
- torchrl/envs/llm/reward/ifeval/_scorer.py +454 -0
- torchrl/envs/llm/transforms/__init__.py +55 -0
- torchrl/envs/llm/transforms/browser.py +292 -0
- torchrl/envs/llm/transforms/dataloading.py +859 -0
- torchrl/envs/llm/transforms/format.py +73 -0
- torchrl/envs/llm/transforms/kl.py +1544 -0
- torchrl/envs/llm/transforms/policy_version.py +189 -0
- torchrl/envs/llm/transforms/reason.py +323 -0
- torchrl/envs/llm/transforms/tokenizer.py +321 -0
- torchrl/envs/llm/transforms/tools.py +1955 -0
- torchrl/envs/model_based/__init__.py +9 -0
- torchrl/envs/model_based/common.py +180 -0
- torchrl/envs/model_based/dreamer.py +112 -0
- torchrl/envs/transforms/__init__.py +147 -0
- torchrl/envs/transforms/functional.py +48 -0
- torchrl/envs/transforms/gym_transforms.py +203 -0
- torchrl/envs/transforms/module.py +341 -0
- torchrl/envs/transforms/r3m.py +372 -0
- torchrl/envs/transforms/ray_service.py +663 -0
- torchrl/envs/transforms/rb_transforms.py +214 -0
- torchrl/envs/transforms/transforms.py +11835 -0
- torchrl/envs/transforms/utils.py +94 -0
- torchrl/envs/transforms/vc1.py +307 -0
- torchrl/envs/transforms/vecnorm.py +845 -0
- torchrl/envs/transforms/vip.py +407 -0
- torchrl/envs/utils.py +1718 -0
- torchrl/envs/vec_envs.py +11 -0
- torchrl/modules/__init__.py +206 -0
- torchrl/modules/distributions/__init__.py +73 -0
- torchrl/modules/distributions/continuous.py +830 -0
- torchrl/modules/distributions/discrete.py +908 -0
- torchrl/modules/distributions/truncated_normal.py +187 -0
- torchrl/modules/distributions/utils.py +233 -0
- torchrl/modules/llm/__init__.py +62 -0
- torchrl/modules/llm/backends/__init__.py +65 -0
- torchrl/modules/llm/backends/vllm/__init__.py +94 -0
- torchrl/modules/llm/backends/vllm/_models.py +46 -0
- torchrl/modules/llm/backends/vllm/base.py +72 -0
- torchrl/modules/llm/backends/vllm/vllm_async.py +2075 -0
- torchrl/modules/llm/backends/vllm/vllm_plugin.py +22 -0
- torchrl/modules/llm/backends/vllm/vllm_sync.py +446 -0
- torchrl/modules/llm/backends/vllm/vllm_utils.py +129 -0
- torchrl/modules/llm/policies/__init__.py +28 -0
- torchrl/modules/llm/policies/common.py +1809 -0
- torchrl/modules/llm/policies/transformers_wrapper.py +2756 -0
- torchrl/modules/llm/policies/vllm_wrapper.py +2241 -0
- torchrl/modules/llm/utils.py +23 -0
- torchrl/modules/mcts/__init__.py +21 -0
- torchrl/modules/mcts/scores.py +579 -0
- torchrl/modules/models/__init__.py +86 -0
- torchrl/modules/models/batchrenorm.py +119 -0
- torchrl/modules/models/decision_transformer.py +179 -0
- torchrl/modules/models/exploration.py +731 -0
- torchrl/modules/models/llm.py +156 -0
- torchrl/modules/models/model_based.py +596 -0
- torchrl/modules/models/models.py +1712 -0
- torchrl/modules/models/multiagent.py +1067 -0
- torchrl/modules/models/recipes/impala.py +185 -0
- torchrl/modules/models/utils.py +162 -0
- torchrl/modules/planners/__init__.py +10 -0
- torchrl/modules/planners/cem.py +228 -0
- torchrl/modules/planners/common.py +73 -0
- torchrl/modules/planners/mppi.py +265 -0
- torchrl/modules/tensordict_module/__init__.py +89 -0
- torchrl/modules/tensordict_module/actors.py +2457 -0
- torchrl/modules/tensordict_module/common.py +529 -0
- torchrl/modules/tensordict_module/exploration.py +814 -0
- torchrl/modules/tensordict_module/probabilistic.py +321 -0
- torchrl/modules/tensordict_module/rnn.py +1639 -0
- torchrl/modules/tensordict_module/sequence.py +132 -0
- torchrl/modules/tensordict_module/world_models.py +34 -0
- torchrl/modules/utils/__init__.py +38 -0
- torchrl/modules/utils/mappings.py +9 -0
- torchrl/modules/utils/utils.py +89 -0
- torchrl/objectives/__init__.py +78 -0
- torchrl/objectives/a2c.py +659 -0
- torchrl/objectives/common.py +753 -0
- torchrl/objectives/cql.py +1346 -0
- torchrl/objectives/crossq.py +710 -0
- torchrl/objectives/ddpg.py +453 -0
- torchrl/objectives/decision_transformer.py +371 -0
- torchrl/objectives/deprecated.py +516 -0
- torchrl/objectives/dqn.py +683 -0
- torchrl/objectives/dreamer.py +488 -0
- torchrl/objectives/functional.py +48 -0
- torchrl/objectives/gail.py +258 -0
- torchrl/objectives/iql.py +996 -0
- torchrl/objectives/llm/__init__.py +30 -0
- torchrl/objectives/llm/grpo.py +846 -0
- torchrl/objectives/llm/sft.py +482 -0
- torchrl/objectives/multiagent/__init__.py +8 -0
- torchrl/objectives/multiagent/qmixer.py +396 -0
- torchrl/objectives/ppo.py +1669 -0
- torchrl/objectives/redq.py +683 -0
- torchrl/objectives/reinforce.py +530 -0
- torchrl/objectives/sac.py +1580 -0
- torchrl/objectives/td3.py +570 -0
- torchrl/objectives/td3_bc.py +625 -0
- torchrl/objectives/utils.py +782 -0
- torchrl/objectives/value/__init__.py +28 -0
- torchrl/objectives/value/advantages.py +1956 -0
- torchrl/objectives/value/functional.py +1459 -0
- torchrl/objectives/value/utils.py +360 -0
- torchrl/record/__init__.py +17 -0
- torchrl/record/loggers/__init__.py +23 -0
- torchrl/record/loggers/common.py +48 -0
- torchrl/record/loggers/csv.py +226 -0
- torchrl/record/loggers/mlflow.py +142 -0
- torchrl/record/loggers/tensorboard.py +139 -0
- torchrl/record/loggers/trackio.py +163 -0
- torchrl/record/loggers/utils.py +78 -0
- torchrl/record/loggers/wandb.py +214 -0
- torchrl/record/recorder.py +554 -0
- torchrl/services/__init__.py +79 -0
- torchrl/services/base.py +109 -0
- torchrl/services/ray_service.py +453 -0
- torchrl/testing/__init__.py +107 -0
- torchrl/testing/assertions.py +179 -0
- torchrl/testing/dist_utils.py +122 -0
- torchrl/testing/env_creators.py +227 -0
- torchrl/testing/env_helper.py +35 -0
- torchrl/testing/gym_helpers.py +156 -0
- torchrl/testing/llm_mocks.py +119 -0
- torchrl/testing/mocking_classes.py +2720 -0
- torchrl/testing/modules.py +295 -0
- torchrl/testing/mp_helpers.py +15 -0
- torchrl/testing/ray_helpers.py +293 -0
- torchrl/testing/utils.py +190 -0
- torchrl/trainers/__init__.py +42 -0
- torchrl/trainers/algorithms/__init__.py +11 -0
- torchrl/trainers/algorithms/configs/__init__.py +705 -0
- torchrl/trainers/algorithms/configs/collectors.py +216 -0
- torchrl/trainers/algorithms/configs/common.py +41 -0
- torchrl/trainers/algorithms/configs/data.py +308 -0
- torchrl/trainers/algorithms/configs/envs.py +104 -0
- torchrl/trainers/algorithms/configs/envs_libs.py +361 -0
- torchrl/trainers/algorithms/configs/logging.py +80 -0
- torchrl/trainers/algorithms/configs/modules.py +570 -0
- torchrl/trainers/algorithms/configs/objectives.py +177 -0
- torchrl/trainers/algorithms/configs/trainers.py +340 -0
- torchrl/trainers/algorithms/configs/transforms.py +955 -0
- torchrl/trainers/algorithms/configs/utils.py +252 -0
- torchrl/trainers/algorithms/configs/weight_sync_schemes.py +191 -0
- torchrl/trainers/algorithms/configs/weight_update.py +159 -0
- torchrl/trainers/algorithms/ppo.py +373 -0
- torchrl/trainers/algorithms/sac.py +308 -0
- torchrl/trainers/helpers/__init__.py +40 -0
- torchrl/trainers/helpers/collectors.py +416 -0
- torchrl/trainers/helpers/envs.py +573 -0
- torchrl/trainers/helpers/logger.py +33 -0
- torchrl/trainers/helpers/losses.py +132 -0
- torchrl/trainers/helpers/models.py +658 -0
- torchrl/trainers/helpers/replay_buffer.py +59 -0
- torchrl/trainers/helpers/trainers.py +301 -0
- torchrl/trainers/trainers.py +2052 -0
- torchrl/weight_update/__init__.py +33 -0
- torchrl/weight_update/_distributed.py +749 -0
- torchrl/weight_update/_mp.py +624 -0
- torchrl/weight_update/_noupdate.py +102 -0
- torchrl/weight_update/_ray.py +1032 -0
- torchrl/weight_update/_rpc.py +284 -0
- torchrl/weight_update/_shared.py +891 -0
- torchrl/weight_update/llm/__init__.py +32 -0
- torchrl/weight_update/llm/vllm_double_buffer.py +370 -0
- torchrl/weight_update/llm/vllm_nccl.py +710 -0
- torchrl/weight_update/utils.py +73 -0
- torchrl/weight_update/weight_sync_schemes.py +1244 -0
- torchrl-0.11.0.dist-info/LICENSE +21 -0
- torchrl-0.11.0.dist-info/METADATA +1307 -0
- torchrl-0.11.0.dist-info/RECORD +394 -0
- torchrl-0.11.0.dist-info/WHEEL +5 -0
- torchrl-0.11.0.dist-info/entry_points.txt +2 -0
- torchrl-0.11.0.dist-info/top_level.txt +7 -0
|
@@ -0,0 +1,813 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
#
|
|
3
|
+
# This source code is licensed under the MIT license found in the
|
|
4
|
+
# LICENSE file in the root directory of this source tree.
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import importlib.util
|
|
8
|
+
|
|
9
|
+
import torch
|
|
10
|
+
from tensordict import LazyStackedTensorDict, TensorDict, TensorDictBase
|
|
11
|
+
|
|
12
|
+
from torchrl.data.tensor_specs import (
|
|
13
|
+
Bounded,
|
|
14
|
+
Categorical,
|
|
15
|
+
Composite,
|
|
16
|
+
DEVICE_TYPING,
|
|
17
|
+
MultiCategorical,
|
|
18
|
+
MultiOneHot,
|
|
19
|
+
OneHot,
|
|
20
|
+
StackedComposite,
|
|
21
|
+
TensorSpec,
|
|
22
|
+
Unbounded,
|
|
23
|
+
)
|
|
24
|
+
from torchrl.data.utils import numpy_to_torch_dtype_dict
|
|
25
|
+
from torchrl.envs.common import _EnvWrapper, EnvBase
|
|
26
|
+
from torchrl.envs.libs.gym import gym_backend, set_gym_backend
|
|
27
|
+
from torchrl.envs.utils import (
|
|
28
|
+
_classproperty,
|
|
29
|
+
_selective_unsqueeze,
|
|
30
|
+
check_marl_grouping,
|
|
31
|
+
MarlGroupMapType,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
_has_vmas = importlib.util.find_spec("vmas") is not None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
__all__ = ["VmasWrapper", "VmasEnv"]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _get_envs():
|
|
41
|
+
if not _has_vmas:
|
|
42
|
+
raise ImportError("VMAS is not installed in your virtual environment.")
|
|
43
|
+
import vmas
|
|
44
|
+
|
|
45
|
+
all_scenarios = vmas.scenarios + vmas.mpe_scenarios + vmas.debug_scenarios
|
|
46
|
+
|
|
47
|
+
return all_scenarios
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@set_gym_backend("gym")
|
|
51
|
+
def _vmas_to_torchrl_spec_transform(
|
|
52
|
+
spec,
|
|
53
|
+
device,
|
|
54
|
+
categorical_action_encoding,
|
|
55
|
+
) -> TensorSpec:
|
|
56
|
+
gym_spaces = gym_backend("spaces")
|
|
57
|
+
if isinstance(spec, gym_spaces.discrete.Discrete):
|
|
58
|
+
action_space_cls = Categorical if categorical_action_encoding else OneHot
|
|
59
|
+
dtype = (
|
|
60
|
+
numpy_to_torch_dtype_dict[spec.dtype]
|
|
61
|
+
if categorical_action_encoding
|
|
62
|
+
else torch.long
|
|
63
|
+
)
|
|
64
|
+
return action_space_cls(spec.n, device=device, dtype=dtype)
|
|
65
|
+
elif isinstance(spec, gym_spaces.multi_discrete.MultiDiscrete):
|
|
66
|
+
dtype = (
|
|
67
|
+
numpy_to_torch_dtype_dict[spec.dtype]
|
|
68
|
+
if categorical_action_encoding
|
|
69
|
+
else torch.long
|
|
70
|
+
)
|
|
71
|
+
return (
|
|
72
|
+
MultiCategorical(spec.nvec, device=device, dtype=dtype)
|
|
73
|
+
if categorical_action_encoding
|
|
74
|
+
else MultiOneHot(spec.nvec, device=device, dtype=dtype)
|
|
75
|
+
)
|
|
76
|
+
elif isinstance(spec, gym_spaces.Box):
|
|
77
|
+
shape = spec.shape
|
|
78
|
+
if not len(shape):
|
|
79
|
+
shape = torch.Size([1])
|
|
80
|
+
dtype = numpy_to_torch_dtype_dict[spec.dtype]
|
|
81
|
+
low = torch.tensor(spec.low, device=device, dtype=dtype)
|
|
82
|
+
high = torch.tensor(spec.high, device=device, dtype=dtype)
|
|
83
|
+
is_unbounded = low.isinf().all() and high.isinf().all()
|
|
84
|
+
return (
|
|
85
|
+
Unbounded(shape, device=device, dtype=dtype)
|
|
86
|
+
if is_unbounded
|
|
87
|
+
else Bounded(
|
|
88
|
+
low,
|
|
89
|
+
high,
|
|
90
|
+
shape,
|
|
91
|
+
dtype=dtype,
|
|
92
|
+
device=device,
|
|
93
|
+
)
|
|
94
|
+
)
|
|
95
|
+
elif isinstance(spec, gym_spaces.Dict):
|
|
96
|
+
spec_out = {}
|
|
97
|
+
for key in spec.keys():
|
|
98
|
+
spec_out[key] = _vmas_to_torchrl_spec_transform(
|
|
99
|
+
spec[key],
|
|
100
|
+
device=device,
|
|
101
|
+
categorical_action_encoding=categorical_action_encoding,
|
|
102
|
+
)
|
|
103
|
+
# the batch-size must be set later
|
|
104
|
+
return Composite(spec_out, device=device)
|
|
105
|
+
else:
|
|
106
|
+
raise NotImplementedError(
|
|
107
|
+
f"spec of type {type(spec).__name__} is currently unaccounted for vmas"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class VmasWrapper(_EnvWrapper):
|
|
112
|
+
"""Vmas environment wrapper.
|
|
113
|
+
|
|
114
|
+
GitHub: https://github.com/proroklab/VectorizedMultiAgentSimulator
|
|
115
|
+
|
|
116
|
+
Paper: https://arxiv.org/abs/2207.03530
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
env (``vmas.simulator.environment.environment.Environment``): the vmas environment to wrap.
|
|
120
|
+
|
|
121
|
+
Keyword Args:
|
|
122
|
+
num_envs (int): Number of vectorized simulation environments. VMAS performs vectorized simulations using PyTorch.
|
|
123
|
+
This argument indicates the number of vectorized environments that should be simulated in a batch. It will also
|
|
124
|
+
determine the batch size of the environment.
|
|
125
|
+
device (torch.device, optional): Device for simulation. Defaults to the default device. All the tensors created by VMAS
|
|
126
|
+
will be placed on this device.
|
|
127
|
+
continuous_actions (bool, optional): Whether to use continuous actions. Defaults to ``True``. If ``False``, actions
|
|
128
|
+
will be discrete. The number of actions and their size will depend on the chosen scenario.
|
|
129
|
+
See the VMAS repository for more info.
|
|
130
|
+
max_steps (int, optional): Horizon of the task. Defaults to ``None`` (infinite horizon). Each VMAS scenario can
|
|
131
|
+
be terminating or not. If ``max_steps`` is specified,
|
|
132
|
+
the scenario is also terminated (and the ``"terminated"`` flag is set) whenever this horizon is reached.
|
|
133
|
+
Unlike gym's ``TimeLimit`` transform or torchrl's :class:`~torchrl.envs.transforms.StepCounter`,
|
|
134
|
+
this argument will not set the ``"truncated"`` entry in the tensordict.
|
|
135
|
+
categorical_actions (bool, optional): if the environment actions are discrete, whether to transform
|
|
136
|
+
them to categorical or one-hot. Defaults to ``True``.
|
|
137
|
+
group_map (MarlGroupMapType or Dict[str, List[str]], optional): how to group agents in tensordicts for
|
|
138
|
+
input/output. By default, if the agent names follow the ``"<name>_<int>"``
|
|
139
|
+
convention, they will be grouped by ``"<name>"``. If they do not follow this convention, they will be all put
|
|
140
|
+
in one group named ``"agents"``.
|
|
141
|
+
Otherwise, a group map can be specified or selected from some premade options.
|
|
142
|
+
See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
|
|
143
|
+
|
|
144
|
+
Attributes:
|
|
145
|
+
group_map (Dict[str, List[str]]): how to group agents in tensordicts for
|
|
146
|
+
input/output. See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
|
|
147
|
+
agent_names (list of str): names of the agent in the environment
|
|
148
|
+
agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the environment
|
|
149
|
+
full_action_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
150
|
+
full_observation_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
151
|
+
full_reward_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
152
|
+
full_done_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
153
|
+
het_specs (bool): whether the environment has any lazy spec
|
|
154
|
+
het_specs_map (Dict[str, bool]): dictionary mapping each group to a flag representing of the group has lazy specs
|
|
155
|
+
available_envs (List[str]): the list of the scenarios available to build.
|
|
156
|
+
|
|
157
|
+
.. warning::
|
|
158
|
+
VMAS returns a single ``done`` flag which does not distinguish between
|
|
159
|
+
when the env reached ``max_steps`` and termination.
|
|
160
|
+
If you deem the ``truncation`` signal necessary, set ``max_steps`` to
|
|
161
|
+
``None`` and use a :class:`~torchrl.envs.transforms.StepCounter` transform.
|
|
162
|
+
|
|
163
|
+
Examples:
|
|
164
|
+
>>> env = VmasWrapper(
|
|
165
|
+
... vmas.make_env(
|
|
166
|
+
... scenario="flocking",
|
|
167
|
+
... num_envs=32,
|
|
168
|
+
... continuous_actions=True,
|
|
169
|
+
... max_steps=200,
|
|
170
|
+
... device="cpu",
|
|
171
|
+
... seed=None,
|
|
172
|
+
... # Scenario kwargs
|
|
173
|
+
... n_agents=5,
|
|
174
|
+
... )
|
|
175
|
+
... )
|
|
176
|
+
>>> print(env.rollout(10))
|
|
177
|
+
TensorDict(
|
|
178
|
+
fields={
|
|
179
|
+
agents: TensorDict(
|
|
180
|
+
fields={
|
|
181
|
+
action: Tensor(shape=torch.Size([32, 10, 5, 2]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
182
|
+
info: TensorDict(
|
|
183
|
+
fields={
|
|
184
|
+
agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
185
|
+
agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
186
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
187
|
+
device=cpu,
|
|
188
|
+
is_shared=False),
|
|
189
|
+
observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
190
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
191
|
+
device=cpu,
|
|
192
|
+
is_shared=False),
|
|
193
|
+
done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
|
|
194
|
+
next: TensorDict(
|
|
195
|
+
fields={
|
|
196
|
+
agents: TensorDict(
|
|
197
|
+
fields={
|
|
198
|
+
info: TensorDict(
|
|
199
|
+
fields={
|
|
200
|
+
agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
201
|
+
agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
202
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
203
|
+
device=cpu,
|
|
204
|
+
is_shared=False),
|
|
205
|
+
observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
206
|
+
reward: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
207
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
208
|
+
device=cpu,
|
|
209
|
+
is_shared=False),
|
|
210
|
+
done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
|
|
211
|
+
terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
|
|
212
|
+
batch_size=torch.Size([32, 10]),
|
|
213
|
+
device=cpu,
|
|
214
|
+
is_shared=False),
|
|
215
|
+
terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
|
|
216
|
+
batch_size=torch.Size([32, 10]),
|
|
217
|
+
device=cpu,
|
|
218
|
+
is_shared=False)
|
|
219
|
+
"""
|
|
220
|
+
|
|
221
|
+
git_url = "https://github.com/proroklab/VectorizedMultiAgentSimulator"
|
|
222
|
+
libname = "vmas"
|
|
223
|
+
|
|
224
|
+
@property
|
|
225
|
+
def lib(self):
|
|
226
|
+
import vmas
|
|
227
|
+
|
|
228
|
+
return vmas
|
|
229
|
+
|
|
230
|
+
@_classproperty
|
|
231
|
+
def available_envs(cls):
|
|
232
|
+
if not _has_vmas:
|
|
233
|
+
return []
|
|
234
|
+
return list(_get_envs())
|
|
235
|
+
|
|
236
|
+
def __init__(
|
|
237
|
+
self,
|
|
238
|
+
env: vmas.simulator.environment.environment.Environment = None, # noqa
|
|
239
|
+
categorical_actions: bool = True,
|
|
240
|
+
group_map: MarlGroupMapType | dict[str, list[str]] | None = None,
|
|
241
|
+
**kwargs,
|
|
242
|
+
):
|
|
243
|
+
if env is not None:
|
|
244
|
+
kwargs["env"] = env
|
|
245
|
+
if "device" in kwargs.keys() and kwargs["device"] != str(env.device):
|
|
246
|
+
raise TypeError("Env device is different from vmas device")
|
|
247
|
+
kwargs["device"] = str(env.device)
|
|
248
|
+
self.group_map = group_map
|
|
249
|
+
self.categorical_actions = categorical_actions
|
|
250
|
+
super().__init__(**kwargs, allow_done_after_reset=True)
|
|
251
|
+
|
|
252
|
+
def _build_env(
|
|
253
|
+
self,
|
|
254
|
+
env: vmas.simulator.environment.environment.Environment, # noqa
|
|
255
|
+
from_pixels: bool = False,
|
|
256
|
+
pixels_only: bool = False,
|
|
257
|
+
):
|
|
258
|
+
self.from_pixels = from_pixels
|
|
259
|
+
self.pixels_only = pixels_only
|
|
260
|
+
|
|
261
|
+
# TODO pixels
|
|
262
|
+
if self.from_pixels:
|
|
263
|
+
raise NotImplementedError("vmas rendering not yet implemented")
|
|
264
|
+
|
|
265
|
+
# Adjust batch size
|
|
266
|
+
if len(self.batch_size) == 0:
|
|
267
|
+
# Batch size not set
|
|
268
|
+
self.batch_size = torch.Size((env.num_envs,))
|
|
269
|
+
elif len(self.batch_size) == 1:
|
|
270
|
+
# Batch size is set
|
|
271
|
+
if not self.batch_size[0] == env.num_envs:
|
|
272
|
+
raise TypeError(
|
|
273
|
+
"Batch size used in constructor does not match vmas batch size."
|
|
274
|
+
)
|
|
275
|
+
else:
|
|
276
|
+
raise TypeError(
|
|
277
|
+
"Batch size used in constructor is not compatible with vmas."
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
return env
|
|
281
|
+
|
|
282
|
+
def _get_default_group_map(self, agent_names: list[str]):
|
|
283
|
+
# This function performs the default grouping in vmas.
|
|
284
|
+
# Agents with names "<name>_<int>" will be grouped in group name "<name>".
|
|
285
|
+
# If any of the agents does not follow the naming convention, we fall back
|
|
286
|
+
# back on having all agents in one group named "agents".
|
|
287
|
+
group_map = {}
|
|
288
|
+
follows_convention = True
|
|
289
|
+
for agent_name in agent_names:
|
|
290
|
+
# See if the agent follows the convention "<name>_<int>"
|
|
291
|
+
agent_name_split = agent_name.split("_")
|
|
292
|
+
if len(agent_name_split) == 1:
|
|
293
|
+
follows_convention = False
|
|
294
|
+
follows_convention = follows_convention and agent_name_split[-1].isdigit()
|
|
295
|
+
|
|
296
|
+
if not follows_convention:
|
|
297
|
+
break
|
|
298
|
+
|
|
299
|
+
# Group it with other agents that follow the same convention
|
|
300
|
+
group_name = "_".join(agent_name_split[:-1])
|
|
301
|
+
if group_name in group_map:
|
|
302
|
+
group_map[group_name].append(agent_name)
|
|
303
|
+
else:
|
|
304
|
+
group_map[group_name] = [agent_name]
|
|
305
|
+
|
|
306
|
+
if not follows_convention:
|
|
307
|
+
group_map = MarlGroupMapType.ALL_IN_ONE_GROUP.get_group_map(agent_names)
|
|
308
|
+
|
|
309
|
+
# For BC-compatibility rename the "agent" group to "agents"
|
|
310
|
+
if "agent" in group_map and len(group_map) == 1:
|
|
311
|
+
agent_group = group_map["agent"]
|
|
312
|
+
group_map["agents"] = agent_group
|
|
313
|
+
del group_map["agent"]
|
|
314
|
+
return group_map
|
|
315
|
+
|
|
316
|
+
def _make_specs(
|
|
317
|
+
self,
|
|
318
|
+
env: vmas.simulator.environment.environment.Environment, # noqa
|
|
319
|
+
) -> None:
|
|
320
|
+
# Create and check group map
|
|
321
|
+
self.agent_names = [agent.name for agent in self.agents]
|
|
322
|
+
self.agent_names_to_indices_map = {
|
|
323
|
+
agent.name: i for i, agent in enumerate(self.agents)
|
|
324
|
+
}
|
|
325
|
+
if self.group_map is None:
|
|
326
|
+
self.group_map = self._get_default_group_map(self.agent_names)
|
|
327
|
+
elif isinstance(self.group_map, MarlGroupMapType):
|
|
328
|
+
self.group_map = self.group_map.get_group_map(self.agent_names)
|
|
329
|
+
check_marl_grouping(self.group_map, self.agent_names)
|
|
330
|
+
|
|
331
|
+
full_action_spec_unbatched = Composite(device=self.device)
|
|
332
|
+
full_observation_spec_unbatched = Composite(device=self.device)
|
|
333
|
+
full_reward_spec_unbatched = Composite(device=self.device)
|
|
334
|
+
|
|
335
|
+
self.het_specs = False
|
|
336
|
+
self.het_specs_map = {}
|
|
337
|
+
for group in self.group_map.keys():
|
|
338
|
+
(
|
|
339
|
+
group_observation_spec,
|
|
340
|
+
group_action_spec,
|
|
341
|
+
group_reward_spec,
|
|
342
|
+
group_info_spec,
|
|
343
|
+
) = self._make_unbatched_group_specs(group)
|
|
344
|
+
full_action_spec_unbatched[group] = group_action_spec
|
|
345
|
+
full_observation_spec_unbatched[group] = group_observation_spec
|
|
346
|
+
full_reward_spec_unbatched[group] = group_reward_spec
|
|
347
|
+
if group_info_spec is not None:
|
|
348
|
+
full_observation_spec_unbatched[(group, "info")] = group_info_spec
|
|
349
|
+
group_het_specs = isinstance(
|
|
350
|
+
group_observation_spec, StackedComposite
|
|
351
|
+
) or isinstance(group_action_spec, StackedComposite)
|
|
352
|
+
self.het_specs_map[group] = group_het_specs
|
|
353
|
+
self.het_specs = self.het_specs or group_het_specs
|
|
354
|
+
|
|
355
|
+
full_done_spec_unbatched = Composite(
|
|
356
|
+
{
|
|
357
|
+
"done": Categorical(
|
|
358
|
+
n=2,
|
|
359
|
+
shape=torch.Size((1,)),
|
|
360
|
+
dtype=torch.bool,
|
|
361
|
+
device=self.device,
|
|
362
|
+
),
|
|
363
|
+
},
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
self.full_action_spec_unbatched = full_action_spec_unbatched
|
|
367
|
+
self.full_observation_spec_unbatched = full_observation_spec_unbatched
|
|
368
|
+
self.full_reward_spec_unbatched = full_reward_spec_unbatched
|
|
369
|
+
self.full_done_spec_unbatched = full_done_spec_unbatched
|
|
370
|
+
|
|
371
|
+
def _make_unbatched_group_specs(self, group: str):
|
|
372
|
+
# Agent specs
|
|
373
|
+
action_specs = []
|
|
374
|
+
observation_specs = []
|
|
375
|
+
reward_specs = []
|
|
376
|
+
info_specs = []
|
|
377
|
+
for agent_name in self.group_map[group]:
|
|
378
|
+
agent_index = self.agent_names_to_indices_map[agent_name]
|
|
379
|
+
agent = self.agents[agent_index]
|
|
380
|
+
action_specs.append(
|
|
381
|
+
Composite(
|
|
382
|
+
{
|
|
383
|
+
"action": _vmas_to_torchrl_spec_transform(
|
|
384
|
+
self.action_space[agent_index],
|
|
385
|
+
categorical_action_encoding=self.categorical_actions,
|
|
386
|
+
device=self.device,
|
|
387
|
+
) # shape = (n_actions_per_agent,)
|
|
388
|
+
},
|
|
389
|
+
)
|
|
390
|
+
)
|
|
391
|
+
observation_specs.append(
|
|
392
|
+
Composite(
|
|
393
|
+
{
|
|
394
|
+
"observation": _vmas_to_torchrl_spec_transform(
|
|
395
|
+
self.observation_space[agent_index],
|
|
396
|
+
device=self.device,
|
|
397
|
+
categorical_action_encoding=self.categorical_actions,
|
|
398
|
+
) # shape = (n_obs_per_agent,)
|
|
399
|
+
},
|
|
400
|
+
)
|
|
401
|
+
)
|
|
402
|
+
reward_specs.append(
|
|
403
|
+
Composite(
|
|
404
|
+
{
|
|
405
|
+
"reward": Unbounded(
|
|
406
|
+
shape=torch.Size((1,)),
|
|
407
|
+
device=self.device,
|
|
408
|
+
) # shape = (1,)
|
|
409
|
+
}
|
|
410
|
+
)
|
|
411
|
+
)
|
|
412
|
+
agent_info = self.scenario.info(agent)
|
|
413
|
+
if len(agent_info):
|
|
414
|
+
info_specs.append(
|
|
415
|
+
Composite(
|
|
416
|
+
{
|
|
417
|
+
key: Unbounded(
|
|
418
|
+
shape=_selective_unsqueeze(
|
|
419
|
+
value, batch_size=self.batch_size
|
|
420
|
+
).shape[1:],
|
|
421
|
+
device=self.device,
|
|
422
|
+
dtype=torch.float32,
|
|
423
|
+
)
|
|
424
|
+
for key, value in agent_info.items()
|
|
425
|
+
},
|
|
426
|
+
).to(self.device)
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
# Create multi-agent specs
|
|
430
|
+
group_action_spec = torch.stack(
|
|
431
|
+
action_specs, dim=0
|
|
432
|
+
) # shape = (n_agents, n_actions_per_agent)
|
|
433
|
+
group_observation_spec = torch.stack(
|
|
434
|
+
observation_specs, dim=0
|
|
435
|
+
) # shape = (n_agents, n_obs_per_agent)
|
|
436
|
+
group_reward_spec = torch.stack(reward_specs, dim=0) # shape = (n_agents, 1)
|
|
437
|
+
group_info_spec = None
|
|
438
|
+
if len(info_specs):
|
|
439
|
+
group_info_spec = torch.stack(info_specs, dim=0)
|
|
440
|
+
|
|
441
|
+
return (
|
|
442
|
+
group_observation_spec,
|
|
443
|
+
group_action_spec,
|
|
444
|
+
group_reward_spec,
|
|
445
|
+
group_info_spec,
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
def _check_kwargs(self, kwargs: dict):
|
|
449
|
+
vmas = self.lib
|
|
450
|
+
|
|
451
|
+
if "env" not in kwargs:
|
|
452
|
+
raise TypeError("Could not find environment key 'env' in kwargs.")
|
|
453
|
+
env = kwargs["env"]
|
|
454
|
+
if not isinstance(env, vmas.simulator.environment.Environment):
|
|
455
|
+
raise TypeError(
|
|
456
|
+
"env is not of type 'vmas.simulator.environment.Environment'."
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
def _init_env(self) -> int | None:
|
|
460
|
+
pass
|
|
461
|
+
|
|
462
|
+
def _set_seed(self, seed: int | None) -> None:
|
|
463
|
+
self._env.seed(seed)
|
|
464
|
+
|
|
465
|
+
def _reset(
|
|
466
|
+
self, tensordict: TensorDictBase | None = None, **kwargs
|
|
467
|
+
) -> TensorDictBase:
|
|
468
|
+
if tensordict is not None and "_reset" in tensordict.keys():
|
|
469
|
+
_reset = tensordict.get("_reset")
|
|
470
|
+
envs_to_reset = _reset.squeeze(-1)
|
|
471
|
+
if envs_to_reset.all():
|
|
472
|
+
self._env.reset(return_observations=False)
|
|
473
|
+
else:
|
|
474
|
+
for env_index, to_reset in enumerate(envs_to_reset):
|
|
475
|
+
if to_reset:
|
|
476
|
+
self._env.reset_at(env_index, return_observations=False)
|
|
477
|
+
else:
|
|
478
|
+
self._env.reset(return_observations=False)
|
|
479
|
+
|
|
480
|
+
obs, dones, infos = self._env.get_from_scenario(
|
|
481
|
+
get_observations=True,
|
|
482
|
+
get_infos=True,
|
|
483
|
+
get_rewards=False,
|
|
484
|
+
get_dones=True,
|
|
485
|
+
)
|
|
486
|
+
dones = self.read_done(dones)
|
|
487
|
+
|
|
488
|
+
source = {"done": dones, "terminated": dones.clone()}
|
|
489
|
+
for group, agent_names in self.group_map.items():
|
|
490
|
+
agent_tds = []
|
|
491
|
+
for agent_name in agent_names:
|
|
492
|
+
i = self.agent_names_to_indices_map[agent_name]
|
|
493
|
+
|
|
494
|
+
agent_obs = self.read_obs(obs[i])
|
|
495
|
+
agent_info = self.read_info(infos[i])
|
|
496
|
+
agent_td = TensorDict(
|
|
497
|
+
source={
|
|
498
|
+
"observation": agent_obs,
|
|
499
|
+
},
|
|
500
|
+
batch_size=self.batch_size,
|
|
501
|
+
device=self.device,
|
|
502
|
+
)
|
|
503
|
+
if agent_info is not None:
|
|
504
|
+
agent_td.set("info", agent_info)
|
|
505
|
+
agent_tds.append(agent_td)
|
|
506
|
+
|
|
507
|
+
agent_tds = LazyStackedTensorDict.maybe_dense_stack(agent_tds, dim=1)
|
|
508
|
+
if not self.het_specs_map[group]:
|
|
509
|
+
agent_tds = agent_tds.to_tensordict()
|
|
510
|
+
source.update({group: agent_tds})
|
|
511
|
+
|
|
512
|
+
tensordict_out = TensorDict(
|
|
513
|
+
source=source,
|
|
514
|
+
batch_size=self.batch_size,
|
|
515
|
+
device=self.device,
|
|
516
|
+
)
|
|
517
|
+
return tensordict_out
|
|
518
|
+
|
|
519
|
+
def _step(
|
|
520
|
+
self,
|
|
521
|
+
tensordict: TensorDictBase,
|
|
522
|
+
) -> TensorDictBase:
|
|
523
|
+
agent_indices = {}
|
|
524
|
+
action_list = []
|
|
525
|
+
n_agents = 0
|
|
526
|
+
for group, agent_names in self.group_map.items():
|
|
527
|
+
group_action = tensordict.get((group, "action"))
|
|
528
|
+
group_action_list = list(self.read_action(group_action, group=group))
|
|
529
|
+
agent_indices.update(
|
|
530
|
+
{
|
|
531
|
+
self.agent_names_to_indices_map[agent_name]: i + n_agents
|
|
532
|
+
for i, agent_name in enumerate(agent_names)
|
|
533
|
+
}
|
|
534
|
+
)
|
|
535
|
+
n_agents += len(agent_names)
|
|
536
|
+
action_list += group_action_list
|
|
537
|
+
action = [action_list[agent_indices[i]] for i in range(self.n_agents)]
|
|
538
|
+
|
|
539
|
+
obs, rews, dones, infos = self._env.step(action)
|
|
540
|
+
|
|
541
|
+
dones = self.read_done(dones)
|
|
542
|
+
|
|
543
|
+
source = {"done": dones, "terminated": dones.clone()}
|
|
544
|
+
for group, agent_names in self.group_map.items():
|
|
545
|
+
agent_tds = []
|
|
546
|
+
for agent_name in agent_names:
|
|
547
|
+
i = self.agent_names_to_indices_map[agent_name]
|
|
548
|
+
|
|
549
|
+
agent_obs = self.read_obs(obs[i])
|
|
550
|
+
agent_rew = self.read_reward(rews[i])
|
|
551
|
+
agent_info = self.read_info(infos[i])
|
|
552
|
+
|
|
553
|
+
agent_td = TensorDict(
|
|
554
|
+
source={
|
|
555
|
+
"observation": agent_obs,
|
|
556
|
+
"reward": agent_rew,
|
|
557
|
+
},
|
|
558
|
+
batch_size=self.batch_size,
|
|
559
|
+
device=self.device,
|
|
560
|
+
)
|
|
561
|
+
if agent_info is not None:
|
|
562
|
+
agent_td.set("info", agent_info)
|
|
563
|
+
agent_tds.append(agent_td)
|
|
564
|
+
|
|
565
|
+
agent_tds = LazyStackedTensorDict.maybe_dense_stack(agent_tds, dim=1)
|
|
566
|
+
if not self.het_specs_map[group]:
|
|
567
|
+
agent_tds = agent_tds.to_tensordict()
|
|
568
|
+
source.update({group: agent_tds})
|
|
569
|
+
|
|
570
|
+
tensordict_out = TensorDict(
|
|
571
|
+
source=source,
|
|
572
|
+
batch_size=self.batch_size,
|
|
573
|
+
device=self.device,
|
|
574
|
+
)
|
|
575
|
+
return tensordict_out
|
|
576
|
+
|
|
577
|
+
def read_obs(self, observations: dict | torch.Tensor) -> dict | torch.Tensor:
|
|
578
|
+
if isinstance(observations, torch.Tensor):
|
|
579
|
+
return _selective_unsqueeze(observations, batch_size=self.batch_size)
|
|
580
|
+
return TensorDict(
|
|
581
|
+
source={key: self.read_obs(value) for key, value in observations.items()},
|
|
582
|
+
batch_size=self.batch_size,
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
def read_info(self, infos: dict[str, torch.Tensor]) -> torch.Tensor:
|
|
586
|
+
if len(infos) == 0:
|
|
587
|
+
return None
|
|
588
|
+
infos = TensorDict(
|
|
589
|
+
source={
|
|
590
|
+
key: _selective_unsqueeze(
|
|
591
|
+
value.to(torch.float32), batch_size=self.batch_size
|
|
592
|
+
)
|
|
593
|
+
for key, value in infos.items()
|
|
594
|
+
},
|
|
595
|
+
batch_size=self.batch_size,
|
|
596
|
+
device=self.device,
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
return infos
|
|
600
|
+
|
|
601
|
+
def read_done(self, done):
|
|
602
|
+
done = _selective_unsqueeze(done, batch_size=self.batch_size)
|
|
603
|
+
return done
|
|
604
|
+
|
|
605
|
+
def read_reward(self, rewards):
|
|
606
|
+
rewards = _selective_unsqueeze(rewards, batch_size=self.batch_size)
|
|
607
|
+
return rewards
|
|
608
|
+
|
|
609
|
+
def read_action(self, action, group: str = "agents"):
|
|
610
|
+
if not self.continuous_actions and not self.categorical_actions:
|
|
611
|
+
action = self.full_action_spec_unbatched[group, "action"].to_categorical(
|
|
612
|
+
action
|
|
613
|
+
)
|
|
614
|
+
agent_actions = action.unbind(dim=1)
|
|
615
|
+
return agent_actions
|
|
616
|
+
|
|
617
|
+
def __repr__(self) -> str:
|
|
618
|
+
return (
|
|
619
|
+
f"{self.__class__.__name__}(num_envs={self.num_envs}, n_agents={self.n_agents},"
|
|
620
|
+
f" batch_size={self.batch_size}, device={self.device})"
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
def to(self, device: DEVICE_TYPING) -> EnvBase:
|
|
624
|
+
self._env.to(device)
|
|
625
|
+
return super().to(device)
|
|
626
|
+
|
|
627
|
+
|
|
628
|
+
class VmasEnv(VmasWrapper):
|
|
629
|
+
"""Vmas environment wrapper.
|
|
630
|
+
|
|
631
|
+
GitHub: https://github.com/proroklab/VectorizedMultiAgentSimulator
|
|
632
|
+
|
|
633
|
+
Paper: https://arxiv.org/abs/2207.03530
|
|
634
|
+
|
|
635
|
+
Args:
|
|
636
|
+
scenario (str or vmas.simulator.scenario.BaseScenario): the vmas scenario to build.
|
|
637
|
+
Must be one of :attr:`~.available_envs`. For a description and rendering of available scenarios see
|
|
638
|
+
`the README <https://github.com/proroklab/VectorizedMultiAgentSimulator/tree/VMAS-1.3.3?tab=readme-ov-file#main-scenarios>`__.
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
Keyword Args:
|
|
642
|
+
num_envs (int): Number of vectorized simulation environments. VMAS performs vectorized simulations using PyTorch.
|
|
643
|
+
This argument indicates the number of vectorized environments that should be simulated in a batch. It will also
|
|
644
|
+
determine the batch size of the environment.
|
|
645
|
+
device (torch.device, optional): Device for simulation. Defaults to the defaultt device. All the tensors created by VMAS
|
|
646
|
+
will be placed on this device.
|
|
647
|
+
continuous_actions (bool, optional): Whether to use continuous actions. Defaults to ``True``. If ``False``, actions
|
|
648
|
+
will be discrete. The number of actions and their size will depend on the chosen scenario.
|
|
649
|
+
See the VMAS repository for more info.
|
|
650
|
+
max_steps (int, optional): Horizon of the task. Defaults to ``None`` (infinite horizon). Each VMAS scenario can
|
|
651
|
+
be terminating or not. If ``max_steps`` is specified,
|
|
652
|
+
the scenario is also terminated (and the ``"terminated"`` flag is set) whenever this horizon is reached.
|
|
653
|
+
Unlike gym's ``TimeLimit`` transform or torchrl's :class:`~torchrl.envs.transforms.StepCounter`,
|
|
654
|
+
this argument will not set the ``"truncated"`` entry in the tensordict.
|
|
655
|
+
categorical_actions (bool, optional): if the environment actions are discrete, whether to transform
|
|
656
|
+
them to categorical or one-hot. Defaults to ``True``.
|
|
657
|
+
group_map (MarlGroupMapType or Dict[str, List[str]], optional): how to group agents in tensordicts for
|
|
658
|
+
input/output. By default, if the agent names follow the ``"<name>_<int>"``
|
|
659
|
+
convention, they will be grouped by ``"<name>"``. If they do not follow this convention, they will be all put
|
|
660
|
+
in one group named ``"agents"``.
|
|
661
|
+
Otherwise, a group map can be specified or selected from some premade options.
|
|
662
|
+
See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
|
|
663
|
+
**kwargs (Dict, optional): These are additional arguments that can be passed to the VMAS scenario constructor.
|
|
664
|
+
(e.g., number of agents, reward sparsity). The available arguments will vary based on the chosen scenario.
|
|
665
|
+
To see the available arguments for a specific scenario, see the constructor in its file from
|
|
666
|
+
`the scenario folder <https://github.com/proroklab/VectorizedMultiAgentSimulator/tree/VMAS-1.3.3/vmas/scenarios>`__.
|
|
667
|
+
|
|
668
|
+
|
|
669
|
+
Attributes:
|
|
670
|
+
group_map (Dict[str, List[str]]): how to group agents in tensordicts for
|
|
671
|
+
input/output. See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
|
|
672
|
+
agent_names (list of str): names of the agent in the environment
|
|
673
|
+
agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the environment
|
|
674
|
+
full_action_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
675
|
+
full_observation_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
676
|
+
full_reward_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
677
|
+
full_done_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
|
|
678
|
+
het_specs (bool): whether the environment has any lazy spec
|
|
679
|
+
het_specs_map (Dict[str, bool]): dictionary mapping each group to a flag representing of the group has lazy specs
|
|
680
|
+
available_envs (List[str]): the list of the scenarios available to build.
|
|
681
|
+
|
|
682
|
+
.. warning::
|
|
683
|
+
VMAS returns a single ``done`` flag which does not distinguish between
|
|
684
|
+
when the env reached ``max_steps`` and termination.
|
|
685
|
+
If you deem the ``truncation`` signal necessary, set ``max_steps`` to
|
|
686
|
+
``None`` and use a :class:`~torchrl.envs.transforms.StepCounter` transform.
|
|
687
|
+
|
|
688
|
+
Examples:
|
|
689
|
+
>>> env = VmasEnv(
|
|
690
|
+
... scenario="flocking",
|
|
691
|
+
... num_envs=32,
|
|
692
|
+
... continuous_actions=True,
|
|
693
|
+
... max_steps=200,
|
|
694
|
+
... device="cpu",
|
|
695
|
+
... seed=None,
|
|
696
|
+
... # Scenario kwargs
|
|
697
|
+
... n_agents=5,
|
|
698
|
+
... )
|
|
699
|
+
>>> print(env.rollout(10))
|
|
700
|
+
TensorDict(
|
|
701
|
+
fields={
|
|
702
|
+
agents: TensorDict(
|
|
703
|
+
fields={
|
|
704
|
+
action: Tensor(shape=torch.Size([32, 10, 5, 2]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
705
|
+
info: TensorDict(
|
|
706
|
+
fields={
|
|
707
|
+
agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
708
|
+
agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
709
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
710
|
+
device=cpu,
|
|
711
|
+
is_shared=False),
|
|
712
|
+
observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
713
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
714
|
+
device=cpu,
|
|
715
|
+
is_shared=False),
|
|
716
|
+
done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
|
|
717
|
+
next: TensorDict(
|
|
718
|
+
fields={
|
|
719
|
+
agents: TensorDict(
|
|
720
|
+
fields={
|
|
721
|
+
info: TensorDict(
|
|
722
|
+
fields={
|
|
723
|
+
agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
724
|
+
agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
725
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
726
|
+
device=cpu,
|
|
727
|
+
is_shared=False),
|
|
728
|
+
observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False),
|
|
729
|
+
reward: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
|
|
730
|
+
batch_size=torch.Size([32, 10, 5]),
|
|
731
|
+
device=cpu,
|
|
732
|
+
is_shared=False),
|
|
733
|
+
done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
|
|
734
|
+
terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
|
|
735
|
+
batch_size=torch.Size([32, 10]),
|
|
736
|
+
device=cpu,
|
|
737
|
+
is_shared=False),
|
|
738
|
+
terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
|
|
739
|
+
batch_size=torch.Size([32, 10]),
|
|
740
|
+
device=cpu,
|
|
741
|
+
is_shared=False)
|
|
742
|
+
"""
|
|
743
|
+
|
|
744
|
+
def __init__(
|
|
745
|
+
self,
|
|
746
|
+
scenario: str | vmas.simulator.scenario.BaseScenario, # noqa
|
|
747
|
+
*,
|
|
748
|
+
num_envs: int,
|
|
749
|
+
continuous_actions: bool = True,
|
|
750
|
+
max_steps: int | None = None,
|
|
751
|
+
categorical_actions: bool = True,
|
|
752
|
+
seed: int | None = None,
|
|
753
|
+
group_map: MarlGroupMapType | dict[str, list[str]] | None = None,
|
|
754
|
+
**kwargs,
|
|
755
|
+
):
|
|
756
|
+
if not _has_vmas:
|
|
757
|
+
raise ImportError(
|
|
758
|
+
f"vmas python package was not found. Please install this dependency. "
|
|
759
|
+
f"More info: {self.git_url}."
|
|
760
|
+
)
|
|
761
|
+
super().__init__(
|
|
762
|
+
scenario=scenario,
|
|
763
|
+
num_envs=num_envs,
|
|
764
|
+
continuous_actions=continuous_actions,
|
|
765
|
+
max_steps=max_steps,
|
|
766
|
+
seed=seed,
|
|
767
|
+
categorical_actions=categorical_actions,
|
|
768
|
+
group_map=group_map,
|
|
769
|
+
**kwargs,
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
def _check_kwargs(self, kwargs: dict):
|
|
773
|
+
if "scenario" not in kwargs:
|
|
774
|
+
raise TypeError("Could not find environment key 'scenario' in kwargs.")
|
|
775
|
+
if "num_envs" not in kwargs:
|
|
776
|
+
raise TypeError("Could not find environment key 'num_envs' in kwargs.")
|
|
777
|
+
|
|
778
|
+
def _build_env(
|
|
779
|
+
self,
|
|
780
|
+
scenario: str | vmas.simulator.scenario.BaseScenario, # noqa
|
|
781
|
+
num_envs: int,
|
|
782
|
+
continuous_actions: bool,
|
|
783
|
+
max_steps: int | None,
|
|
784
|
+
seed: int | None,
|
|
785
|
+
**scenario_kwargs,
|
|
786
|
+
) -> vmas.simulator.environment.environment.Environment: # noqa
|
|
787
|
+
vmas = self.lib
|
|
788
|
+
|
|
789
|
+
self.scenario_name = scenario
|
|
790
|
+
from_pixels = scenario_kwargs.pop("from_pixels", False)
|
|
791
|
+
pixels_only = scenario_kwargs.pop("pixels_only", False)
|
|
792
|
+
|
|
793
|
+
return super()._build_env(
|
|
794
|
+
env=vmas.make_env(
|
|
795
|
+
scenario=scenario,
|
|
796
|
+
num_envs=num_envs,
|
|
797
|
+
device=self.device
|
|
798
|
+
if self.device is not None
|
|
799
|
+
else getattr(
|
|
800
|
+
torch, "get_default_device", lambda: torch.device("cpu")
|
|
801
|
+
)(),
|
|
802
|
+
continuous_actions=continuous_actions,
|
|
803
|
+
max_steps=max_steps,
|
|
804
|
+
seed=seed,
|
|
805
|
+
wrapper=None,
|
|
806
|
+
**scenario_kwargs,
|
|
807
|
+
),
|
|
808
|
+
pixels_only=pixels_only,
|
|
809
|
+
from_pixels=from_pixels,
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
def __repr__(self):
|
|
813
|
+
return f"{super().__repr__()} (scenario={self.scenario_name})"
|