torchrl 0.11.0__cp314-cp314t-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. benchmarks/benchmark_batched_envs.py +104 -0
  2. benchmarks/conftest.py +91 -0
  3. benchmarks/ecosystem/gym_env_throughput.py +321 -0
  4. benchmarks/ecosystem/vmas_rllib_vs_torchrl_sampling_performance.py +231 -0
  5. benchmarks/requirements.txt +7 -0
  6. benchmarks/storage/benchmark_sample_latency_over_rpc.py +193 -0
  7. benchmarks/test_collectors_benchmark.py +240 -0
  8. benchmarks/test_compressed_storage_benchmark.py +145 -0
  9. benchmarks/test_envs_benchmark.py +133 -0
  10. benchmarks/test_llm.py +101 -0
  11. benchmarks/test_non_tensor_env_benchmark.py +70 -0
  12. benchmarks/test_objectives_benchmarks.py +1199 -0
  13. benchmarks/test_replaybuffer_benchmark.py +254 -0
  14. sota-check/README.md +35 -0
  15. sota-implementations/README.md +142 -0
  16. sota-implementations/a2c/README.md +39 -0
  17. sota-implementations/a2c/a2c_atari.py +291 -0
  18. sota-implementations/a2c/a2c_mujoco.py +273 -0
  19. sota-implementations/a2c/utils_atari.py +240 -0
  20. sota-implementations/a2c/utils_mujoco.py +160 -0
  21. sota-implementations/bandits/README.md +7 -0
  22. sota-implementations/bandits/dqn.py +126 -0
  23. sota-implementations/cql/cql_offline.py +198 -0
  24. sota-implementations/cql/cql_online.py +249 -0
  25. sota-implementations/cql/discrete_cql_offline.py +180 -0
  26. sota-implementations/cql/discrete_cql_online.py +227 -0
  27. sota-implementations/cql/utils.py +471 -0
  28. sota-implementations/crossq/crossq.py +271 -0
  29. sota-implementations/crossq/utils.py +320 -0
  30. sota-implementations/ddpg/ddpg.py +231 -0
  31. sota-implementations/ddpg/utils.py +325 -0
  32. sota-implementations/decision_transformer/dt.py +163 -0
  33. sota-implementations/decision_transformer/lamb.py +167 -0
  34. sota-implementations/decision_transformer/online_dt.py +178 -0
  35. sota-implementations/decision_transformer/utils.py +562 -0
  36. sota-implementations/discrete_sac/discrete_sac.py +243 -0
  37. sota-implementations/discrete_sac/utils.py +324 -0
  38. sota-implementations/dqn/README.md +30 -0
  39. sota-implementations/dqn/dqn_atari.py +272 -0
  40. sota-implementations/dqn/dqn_cartpole.py +236 -0
  41. sota-implementations/dqn/utils_atari.py +132 -0
  42. sota-implementations/dqn/utils_cartpole.py +90 -0
  43. sota-implementations/dreamer/README.md +129 -0
  44. sota-implementations/dreamer/dreamer.py +586 -0
  45. sota-implementations/dreamer/dreamer_utils.py +1107 -0
  46. sota-implementations/expert-iteration/README.md +352 -0
  47. sota-implementations/expert-iteration/ei_utils.py +770 -0
  48. sota-implementations/expert-iteration/expert-iteration-async.py +512 -0
  49. sota-implementations/expert-iteration/expert-iteration-sync.py +508 -0
  50. sota-implementations/expert-iteration/requirements_gsm8k.txt +13 -0
  51. sota-implementations/expert-iteration/requirements_ifeval.txt +16 -0
  52. sota-implementations/gail/gail.py +327 -0
  53. sota-implementations/gail/gail_utils.py +68 -0
  54. sota-implementations/gail/ppo_utils.py +157 -0
  55. sota-implementations/grpo/README.md +273 -0
  56. sota-implementations/grpo/grpo-async.py +437 -0
  57. sota-implementations/grpo/grpo-sync.py +435 -0
  58. sota-implementations/grpo/grpo_utils.py +843 -0
  59. sota-implementations/grpo/requirements_gsm8k.txt +11 -0
  60. sota-implementations/grpo/requirements_ifeval.txt +16 -0
  61. sota-implementations/impala/README.md +33 -0
  62. sota-implementations/impala/impala_multi_node_ray.py +292 -0
  63. sota-implementations/impala/impala_multi_node_submitit.py +284 -0
  64. sota-implementations/impala/impala_single_node.py +261 -0
  65. sota-implementations/impala/utils.py +184 -0
  66. sota-implementations/iql/discrete_iql.py +230 -0
  67. sota-implementations/iql/iql_offline.py +164 -0
  68. sota-implementations/iql/iql_online.py +225 -0
  69. sota-implementations/iql/utils.py +437 -0
  70. sota-implementations/multiagent/README.md +74 -0
  71. sota-implementations/multiagent/iql.py +237 -0
  72. sota-implementations/multiagent/maddpg_iddpg.py +266 -0
  73. sota-implementations/multiagent/mappo_ippo.py +267 -0
  74. sota-implementations/multiagent/qmix_vdn.py +271 -0
  75. sota-implementations/multiagent/sac.py +337 -0
  76. sota-implementations/multiagent/utils/__init__.py +4 -0
  77. sota-implementations/multiagent/utils/logging.py +151 -0
  78. sota-implementations/multiagent/utils/utils.py +43 -0
  79. sota-implementations/ppo/README.md +29 -0
  80. sota-implementations/ppo/ppo_atari.py +305 -0
  81. sota-implementations/ppo/ppo_mujoco.py +293 -0
  82. sota-implementations/ppo/utils_atari.py +238 -0
  83. sota-implementations/ppo/utils_mujoco.py +152 -0
  84. sota-implementations/ppo_trainer/train.py +21 -0
  85. sota-implementations/redq/README.md +7 -0
  86. sota-implementations/redq/redq.py +199 -0
  87. sota-implementations/redq/utils.py +1060 -0
  88. sota-implementations/sac/sac-async.py +266 -0
  89. sota-implementations/sac/sac.py +239 -0
  90. sota-implementations/sac/utils.py +381 -0
  91. sota-implementations/sac_trainer/train.py +16 -0
  92. sota-implementations/td3/td3.py +254 -0
  93. sota-implementations/td3/utils.py +319 -0
  94. sota-implementations/td3_bc/td3_bc.py +177 -0
  95. sota-implementations/td3_bc/utils.py +251 -0
  96. torchrl/.dylibs/libc++.1.0.dylib +0 -0
  97. torchrl/__init__.py +144 -0
  98. torchrl/_extension.py +74 -0
  99. torchrl/_torchrl.cpython-314t-darwin.so +0 -0
  100. torchrl/_utils.py +1431 -0
  101. torchrl/collectors/__init__.py +48 -0
  102. torchrl/collectors/_base.py +1058 -0
  103. torchrl/collectors/_constants.py +88 -0
  104. torchrl/collectors/_multi_async.py +324 -0
  105. torchrl/collectors/_multi_base.py +1805 -0
  106. torchrl/collectors/_multi_sync.py +464 -0
  107. torchrl/collectors/_runner.py +581 -0
  108. torchrl/collectors/_single.py +2009 -0
  109. torchrl/collectors/_single_async.py +259 -0
  110. torchrl/collectors/collectors.py +62 -0
  111. torchrl/collectors/distributed/__init__.py +32 -0
  112. torchrl/collectors/distributed/default_configs.py +133 -0
  113. torchrl/collectors/distributed/generic.py +1306 -0
  114. torchrl/collectors/distributed/ray.py +1092 -0
  115. torchrl/collectors/distributed/rpc.py +1006 -0
  116. torchrl/collectors/distributed/sync.py +731 -0
  117. torchrl/collectors/distributed/utils.py +160 -0
  118. torchrl/collectors/llm/__init__.py +10 -0
  119. torchrl/collectors/llm/base.py +494 -0
  120. torchrl/collectors/llm/ray_collector.py +275 -0
  121. torchrl/collectors/llm/utils.py +36 -0
  122. torchrl/collectors/llm/weight_update/__init__.py +10 -0
  123. torchrl/collectors/llm/weight_update/vllm.py +348 -0
  124. torchrl/collectors/llm/weight_update/vllm_v2.py +311 -0
  125. torchrl/collectors/utils.py +433 -0
  126. torchrl/collectors/weight_update.py +591 -0
  127. torchrl/csrc/numpy_utils.h +38 -0
  128. torchrl/csrc/pybind.cpp +27 -0
  129. torchrl/csrc/segment_tree.h +458 -0
  130. torchrl/csrc/torch_utils.h +34 -0
  131. torchrl/csrc/utils.cpp +48 -0
  132. torchrl/csrc/utils.h +31 -0
  133. torchrl/data/__init__.py +187 -0
  134. torchrl/data/datasets/__init__.py +58 -0
  135. torchrl/data/datasets/atari_dqn.py +878 -0
  136. torchrl/data/datasets/common.py +281 -0
  137. torchrl/data/datasets/d4rl.py +489 -0
  138. torchrl/data/datasets/d4rl_infos.py +187 -0
  139. torchrl/data/datasets/gen_dgrl.py +375 -0
  140. torchrl/data/datasets/minari_data.py +643 -0
  141. torchrl/data/datasets/openml.py +177 -0
  142. torchrl/data/datasets/openx.py +798 -0
  143. torchrl/data/datasets/roboset.py +363 -0
  144. torchrl/data/datasets/utils.py +11 -0
  145. torchrl/data/datasets/vd4rl.py +432 -0
  146. torchrl/data/llm/__init__.py +34 -0
  147. torchrl/data/llm/dataset.py +491 -0
  148. torchrl/data/llm/history.py +1378 -0
  149. torchrl/data/llm/prompt.py +198 -0
  150. torchrl/data/llm/reward.py +225 -0
  151. torchrl/data/llm/topk.py +186 -0
  152. torchrl/data/llm/utils.py +543 -0
  153. torchrl/data/map/__init__.py +21 -0
  154. torchrl/data/map/hash.py +185 -0
  155. torchrl/data/map/query.py +204 -0
  156. torchrl/data/map/tdstorage.py +363 -0
  157. torchrl/data/map/tree.py +1434 -0
  158. torchrl/data/map/utils.py +103 -0
  159. torchrl/data/postprocs/__init__.py +8 -0
  160. torchrl/data/postprocs/postprocs.py +391 -0
  161. torchrl/data/replay_buffers/__init__.py +99 -0
  162. torchrl/data/replay_buffers/checkpointers.py +622 -0
  163. torchrl/data/replay_buffers/ray_buffer.py +292 -0
  164. torchrl/data/replay_buffers/replay_buffers.py +2376 -0
  165. torchrl/data/replay_buffers/samplers.py +2578 -0
  166. torchrl/data/replay_buffers/scheduler.py +265 -0
  167. torchrl/data/replay_buffers/storages.py +2412 -0
  168. torchrl/data/replay_buffers/utils.py +1042 -0
  169. torchrl/data/replay_buffers/writers.py +781 -0
  170. torchrl/data/tensor_specs.py +7101 -0
  171. torchrl/data/utils.py +334 -0
  172. torchrl/envs/__init__.py +265 -0
  173. torchrl/envs/async_envs.py +1105 -0
  174. torchrl/envs/batched_envs.py +3093 -0
  175. torchrl/envs/common.py +4241 -0
  176. torchrl/envs/custom/__init__.py +11 -0
  177. torchrl/envs/custom/chess.py +617 -0
  178. torchrl/envs/custom/llm.py +214 -0
  179. torchrl/envs/custom/pendulum.py +401 -0
  180. torchrl/envs/custom/san_moves.txt +29274 -0
  181. torchrl/envs/custom/tictactoeenv.py +288 -0
  182. torchrl/envs/env_creator.py +263 -0
  183. torchrl/envs/gym_like.py +752 -0
  184. torchrl/envs/libs/__init__.py +68 -0
  185. torchrl/envs/libs/_gym_utils.py +326 -0
  186. torchrl/envs/libs/brax.py +846 -0
  187. torchrl/envs/libs/dm_control.py +544 -0
  188. torchrl/envs/libs/envpool.py +447 -0
  189. torchrl/envs/libs/gym.py +2239 -0
  190. torchrl/envs/libs/habitat.py +138 -0
  191. torchrl/envs/libs/isaac_lab.py +87 -0
  192. torchrl/envs/libs/isaacgym.py +203 -0
  193. torchrl/envs/libs/jax_utils.py +166 -0
  194. torchrl/envs/libs/jumanji.py +963 -0
  195. torchrl/envs/libs/meltingpot.py +599 -0
  196. torchrl/envs/libs/openml.py +153 -0
  197. torchrl/envs/libs/openspiel.py +652 -0
  198. torchrl/envs/libs/pettingzoo.py +1042 -0
  199. torchrl/envs/libs/procgen.py +351 -0
  200. torchrl/envs/libs/robohive.py +429 -0
  201. torchrl/envs/libs/smacv2.py +645 -0
  202. torchrl/envs/libs/unity_mlagents.py +891 -0
  203. torchrl/envs/libs/utils.py +147 -0
  204. torchrl/envs/libs/vmas.py +813 -0
  205. torchrl/envs/llm/__init__.py +63 -0
  206. torchrl/envs/llm/chat.py +730 -0
  207. torchrl/envs/llm/datasets/README.md +4 -0
  208. torchrl/envs/llm/datasets/__init__.py +17 -0
  209. torchrl/envs/llm/datasets/gsm8k.py +353 -0
  210. torchrl/envs/llm/datasets/ifeval.py +274 -0
  211. torchrl/envs/llm/envs.py +789 -0
  212. torchrl/envs/llm/libs/README.md +3 -0
  213. torchrl/envs/llm/libs/__init__.py +8 -0
  214. torchrl/envs/llm/libs/mlgym.py +869 -0
  215. torchrl/envs/llm/reward/__init__.py +10 -0
  216. torchrl/envs/llm/reward/gsm8k.py +324 -0
  217. torchrl/envs/llm/reward/ifeval/README.md +13 -0
  218. torchrl/envs/llm/reward/ifeval/__init__.py +10 -0
  219. torchrl/envs/llm/reward/ifeval/_instructions.py +1667 -0
  220. torchrl/envs/llm/reward/ifeval/_instructions_main.py +131 -0
  221. torchrl/envs/llm/reward/ifeval/_instructions_registry.py +100 -0
  222. torchrl/envs/llm/reward/ifeval/_instructions_util.py +1677 -0
  223. torchrl/envs/llm/reward/ifeval/_scorer.py +454 -0
  224. torchrl/envs/llm/transforms/__init__.py +55 -0
  225. torchrl/envs/llm/transforms/browser.py +292 -0
  226. torchrl/envs/llm/transforms/dataloading.py +859 -0
  227. torchrl/envs/llm/transforms/format.py +73 -0
  228. torchrl/envs/llm/transforms/kl.py +1544 -0
  229. torchrl/envs/llm/transforms/policy_version.py +189 -0
  230. torchrl/envs/llm/transforms/reason.py +323 -0
  231. torchrl/envs/llm/transforms/tokenizer.py +321 -0
  232. torchrl/envs/llm/transforms/tools.py +1955 -0
  233. torchrl/envs/model_based/__init__.py +9 -0
  234. torchrl/envs/model_based/common.py +180 -0
  235. torchrl/envs/model_based/dreamer.py +112 -0
  236. torchrl/envs/transforms/__init__.py +147 -0
  237. torchrl/envs/transforms/functional.py +48 -0
  238. torchrl/envs/transforms/gym_transforms.py +203 -0
  239. torchrl/envs/transforms/module.py +341 -0
  240. torchrl/envs/transforms/r3m.py +372 -0
  241. torchrl/envs/transforms/ray_service.py +663 -0
  242. torchrl/envs/transforms/rb_transforms.py +214 -0
  243. torchrl/envs/transforms/transforms.py +11835 -0
  244. torchrl/envs/transforms/utils.py +94 -0
  245. torchrl/envs/transforms/vc1.py +307 -0
  246. torchrl/envs/transforms/vecnorm.py +845 -0
  247. torchrl/envs/transforms/vip.py +407 -0
  248. torchrl/envs/utils.py +1718 -0
  249. torchrl/envs/vec_envs.py +11 -0
  250. torchrl/modules/__init__.py +206 -0
  251. torchrl/modules/distributions/__init__.py +73 -0
  252. torchrl/modules/distributions/continuous.py +830 -0
  253. torchrl/modules/distributions/discrete.py +908 -0
  254. torchrl/modules/distributions/truncated_normal.py +187 -0
  255. torchrl/modules/distributions/utils.py +233 -0
  256. torchrl/modules/llm/__init__.py +62 -0
  257. torchrl/modules/llm/backends/__init__.py +65 -0
  258. torchrl/modules/llm/backends/vllm/__init__.py +94 -0
  259. torchrl/modules/llm/backends/vllm/_models.py +46 -0
  260. torchrl/modules/llm/backends/vllm/base.py +72 -0
  261. torchrl/modules/llm/backends/vllm/vllm_async.py +2075 -0
  262. torchrl/modules/llm/backends/vllm/vllm_plugin.py +22 -0
  263. torchrl/modules/llm/backends/vllm/vllm_sync.py +446 -0
  264. torchrl/modules/llm/backends/vllm/vllm_utils.py +129 -0
  265. torchrl/modules/llm/policies/__init__.py +28 -0
  266. torchrl/modules/llm/policies/common.py +1809 -0
  267. torchrl/modules/llm/policies/transformers_wrapper.py +2756 -0
  268. torchrl/modules/llm/policies/vllm_wrapper.py +2241 -0
  269. torchrl/modules/llm/utils.py +23 -0
  270. torchrl/modules/mcts/__init__.py +21 -0
  271. torchrl/modules/mcts/scores.py +579 -0
  272. torchrl/modules/models/__init__.py +86 -0
  273. torchrl/modules/models/batchrenorm.py +119 -0
  274. torchrl/modules/models/decision_transformer.py +179 -0
  275. torchrl/modules/models/exploration.py +731 -0
  276. torchrl/modules/models/llm.py +156 -0
  277. torchrl/modules/models/model_based.py +596 -0
  278. torchrl/modules/models/models.py +1712 -0
  279. torchrl/modules/models/multiagent.py +1067 -0
  280. torchrl/modules/models/recipes/impala.py +185 -0
  281. torchrl/modules/models/utils.py +162 -0
  282. torchrl/modules/planners/__init__.py +10 -0
  283. torchrl/modules/planners/cem.py +228 -0
  284. torchrl/modules/planners/common.py +73 -0
  285. torchrl/modules/planners/mppi.py +265 -0
  286. torchrl/modules/tensordict_module/__init__.py +89 -0
  287. torchrl/modules/tensordict_module/actors.py +2457 -0
  288. torchrl/modules/tensordict_module/common.py +529 -0
  289. torchrl/modules/tensordict_module/exploration.py +814 -0
  290. torchrl/modules/tensordict_module/probabilistic.py +321 -0
  291. torchrl/modules/tensordict_module/rnn.py +1639 -0
  292. torchrl/modules/tensordict_module/sequence.py +132 -0
  293. torchrl/modules/tensordict_module/world_models.py +34 -0
  294. torchrl/modules/utils/__init__.py +38 -0
  295. torchrl/modules/utils/mappings.py +9 -0
  296. torchrl/modules/utils/utils.py +89 -0
  297. torchrl/objectives/__init__.py +78 -0
  298. torchrl/objectives/a2c.py +659 -0
  299. torchrl/objectives/common.py +753 -0
  300. torchrl/objectives/cql.py +1346 -0
  301. torchrl/objectives/crossq.py +710 -0
  302. torchrl/objectives/ddpg.py +453 -0
  303. torchrl/objectives/decision_transformer.py +371 -0
  304. torchrl/objectives/deprecated.py +516 -0
  305. torchrl/objectives/dqn.py +683 -0
  306. torchrl/objectives/dreamer.py +488 -0
  307. torchrl/objectives/functional.py +48 -0
  308. torchrl/objectives/gail.py +258 -0
  309. torchrl/objectives/iql.py +996 -0
  310. torchrl/objectives/llm/__init__.py +30 -0
  311. torchrl/objectives/llm/grpo.py +846 -0
  312. torchrl/objectives/llm/sft.py +482 -0
  313. torchrl/objectives/multiagent/__init__.py +8 -0
  314. torchrl/objectives/multiagent/qmixer.py +396 -0
  315. torchrl/objectives/ppo.py +1669 -0
  316. torchrl/objectives/redq.py +683 -0
  317. torchrl/objectives/reinforce.py +530 -0
  318. torchrl/objectives/sac.py +1580 -0
  319. torchrl/objectives/td3.py +570 -0
  320. torchrl/objectives/td3_bc.py +625 -0
  321. torchrl/objectives/utils.py +782 -0
  322. torchrl/objectives/value/__init__.py +28 -0
  323. torchrl/objectives/value/advantages.py +1956 -0
  324. torchrl/objectives/value/functional.py +1459 -0
  325. torchrl/objectives/value/utils.py +360 -0
  326. torchrl/record/__init__.py +17 -0
  327. torchrl/record/loggers/__init__.py +23 -0
  328. torchrl/record/loggers/common.py +48 -0
  329. torchrl/record/loggers/csv.py +226 -0
  330. torchrl/record/loggers/mlflow.py +142 -0
  331. torchrl/record/loggers/tensorboard.py +139 -0
  332. torchrl/record/loggers/trackio.py +163 -0
  333. torchrl/record/loggers/utils.py +78 -0
  334. torchrl/record/loggers/wandb.py +214 -0
  335. torchrl/record/recorder.py +554 -0
  336. torchrl/services/__init__.py +79 -0
  337. torchrl/services/base.py +109 -0
  338. torchrl/services/ray_service.py +453 -0
  339. torchrl/testing/__init__.py +107 -0
  340. torchrl/testing/assertions.py +179 -0
  341. torchrl/testing/dist_utils.py +122 -0
  342. torchrl/testing/env_creators.py +227 -0
  343. torchrl/testing/env_helper.py +35 -0
  344. torchrl/testing/gym_helpers.py +156 -0
  345. torchrl/testing/llm_mocks.py +119 -0
  346. torchrl/testing/mocking_classes.py +2720 -0
  347. torchrl/testing/modules.py +295 -0
  348. torchrl/testing/mp_helpers.py +15 -0
  349. torchrl/testing/ray_helpers.py +293 -0
  350. torchrl/testing/utils.py +190 -0
  351. torchrl/trainers/__init__.py +42 -0
  352. torchrl/trainers/algorithms/__init__.py +11 -0
  353. torchrl/trainers/algorithms/configs/__init__.py +705 -0
  354. torchrl/trainers/algorithms/configs/collectors.py +216 -0
  355. torchrl/trainers/algorithms/configs/common.py +41 -0
  356. torchrl/trainers/algorithms/configs/data.py +308 -0
  357. torchrl/trainers/algorithms/configs/envs.py +104 -0
  358. torchrl/trainers/algorithms/configs/envs_libs.py +361 -0
  359. torchrl/trainers/algorithms/configs/logging.py +80 -0
  360. torchrl/trainers/algorithms/configs/modules.py +570 -0
  361. torchrl/trainers/algorithms/configs/objectives.py +177 -0
  362. torchrl/trainers/algorithms/configs/trainers.py +340 -0
  363. torchrl/trainers/algorithms/configs/transforms.py +955 -0
  364. torchrl/trainers/algorithms/configs/utils.py +252 -0
  365. torchrl/trainers/algorithms/configs/weight_sync_schemes.py +191 -0
  366. torchrl/trainers/algorithms/configs/weight_update.py +159 -0
  367. torchrl/trainers/algorithms/ppo.py +373 -0
  368. torchrl/trainers/algorithms/sac.py +308 -0
  369. torchrl/trainers/helpers/__init__.py +40 -0
  370. torchrl/trainers/helpers/collectors.py +416 -0
  371. torchrl/trainers/helpers/envs.py +573 -0
  372. torchrl/trainers/helpers/logger.py +33 -0
  373. torchrl/trainers/helpers/losses.py +132 -0
  374. torchrl/trainers/helpers/models.py +658 -0
  375. torchrl/trainers/helpers/replay_buffer.py +59 -0
  376. torchrl/trainers/helpers/trainers.py +301 -0
  377. torchrl/trainers/trainers.py +2052 -0
  378. torchrl/weight_update/__init__.py +33 -0
  379. torchrl/weight_update/_distributed.py +749 -0
  380. torchrl/weight_update/_mp.py +624 -0
  381. torchrl/weight_update/_noupdate.py +102 -0
  382. torchrl/weight_update/_ray.py +1032 -0
  383. torchrl/weight_update/_rpc.py +284 -0
  384. torchrl/weight_update/_shared.py +891 -0
  385. torchrl/weight_update/llm/__init__.py +32 -0
  386. torchrl/weight_update/llm/vllm_double_buffer.py +370 -0
  387. torchrl/weight_update/llm/vllm_nccl.py +710 -0
  388. torchrl/weight_update/utils.py +73 -0
  389. torchrl/weight_update/weight_sync_schemes.py +1244 -0
  390. torchrl-0.11.0.dist-info/METADATA +1308 -0
  391. torchrl-0.11.0.dist-info/RECORD +395 -0
  392. torchrl-0.11.0.dist-info/WHEEL +5 -0
  393. torchrl-0.11.0.dist-info/entry_points.txt +2 -0
  394. torchrl-0.11.0.dist-info/licenses/LICENSE +21 -0
  395. torchrl-0.11.0.dist-info/top_level.txt +7 -0
@@ -0,0 +1,813 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from __future__ import annotations
6
+
7
+ import importlib.util
8
+
9
+ import torch
10
+ from tensordict import LazyStackedTensorDict, TensorDict, TensorDictBase
11
+
12
+ from torchrl.data.tensor_specs import (
13
+ Bounded,
14
+ Categorical,
15
+ Composite,
16
+ DEVICE_TYPING,
17
+ MultiCategorical,
18
+ MultiOneHot,
19
+ OneHot,
20
+ StackedComposite,
21
+ TensorSpec,
22
+ Unbounded,
23
+ )
24
+ from torchrl.data.utils import numpy_to_torch_dtype_dict
25
+ from torchrl.envs.common import _EnvWrapper, EnvBase
26
+ from torchrl.envs.libs.gym import gym_backend, set_gym_backend
27
+ from torchrl.envs.utils import (
28
+ _classproperty,
29
+ _selective_unsqueeze,
30
+ check_marl_grouping,
31
+ MarlGroupMapType,
32
+ )
33
+
34
+ _has_vmas = importlib.util.find_spec("vmas") is not None
35
+
36
+
37
+ __all__ = ["VmasWrapper", "VmasEnv"]
38
+
39
+
40
+ def _get_envs():
41
+ if not _has_vmas:
42
+ raise ImportError("VMAS is not installed in your virtual environment.")
43
+ import vmas
44
+
45
+ all_scenarios = vmas.scenarios + vmas.mpe_scenarios + vmas.debug_scenarios
46
+
47
+ return all_scenarios
48
+
49
+
50
+ @set_gym_backend("gym")
51
+ def _vmas_to_torchrl_spec_transform(
52
+ spec,
53
+ device,
54
+ categorical_action_encoding,
55
+ ) -> TensorSpec:
56
+ gym_spaces = gym_backend("spaces")
57
+ if isinstance(spec, gym_spaces.discrete.Discrete):
58
+ action_space_cls = Categorical if categorical_action_encoding else OneHot
59
+ dtype = (
60
+ numpy_to_torch_dtype_dict[spec.dtype]
61
+ if categorical_action_encoding
62
+ else torch.long
63
+ )
64
+ return action_space_cls(spec.n, device=device, dtype=dtype)
65
+ elif isinstance(spec, gym_spaces.multi_discrete.MultiDiscrete):
66
+ dtype = (
67
+ numpy_to_torch_dtype_dict[spec.dtype]
68
+ if categorical_action_encoding
69
+ else torch.long
70
+ )
71
+ return (
72
+ MultiCategorical(spec.nvec, device=device, dtype=dtype)
73
+ if categorical_action_encoding
74
+ else MultiOneHot(spec.nvec, device=device, dtype=dtype)
75
+ )
76
+ elif isinstance(spec, gym_spaces.Box):
77
+ shape = spec.shape
78
+ if not len(shape):
79
+ shape = torch.Size([1])
80
+ dtype = numpy_to_torch_dtype_dict[spec.dtype]
81
+ low = torch.tensor(spec.low, device=device, dtype=dtype)
82
+ high = torch.tensor(spec.high, device=device, dtype=dtype)
83
+ is_unbounded = low.isinf().all() and high.isinf().all()
84
+ return (
85
+ Unbounded(shape, device=device, dtype=dtype)
86
+ if is_unbounded
87
+ else Bounded(
88
+ low,
89
+ high,
90
+ shape,
91
+ dtype=dtype,
92
+ device=device,
93
+ )
94
+ )
95
+ elif isinstance(spec, gym_spaces.Dict):
96
+ spec_out = {}
97
+ for key in spec.keys():
98
+ spec_out[key] = _vmas_to_torchrl_spec_transform(
99
+ spec[key],
100
+ device=device,
101
+ categorical_action_encoding=categorical_action_encoding,
102
+ )
103
+ # the batch-size must be set later
104
+ return Composite(spec_out, device=device)
105
+ else:
106
+ raise NotImplementedError(
107
+ f"spec of type {type(spec).__name__} is currently unaccounted for vmas"
108
+ )
109
+
110
+
111
+ class VmasWrapper(_EnvWrapper):
112
+ """Vmas environment wrapper.
113
+
114
+ GitHub: https://github.com/proroklab/VectorizedMultiAgentSimulator
115
+
116
+ Paper: https://arxiv.org/abs/2207.03530
117
+
118
+ Args:
119
+ env (``vmas.simulator.environment.environment.Environment``): the vmas environment to wrap.
120
+
121
+ Keyword Args:
122
+ num_envs (int): Number of vectorized simulation environments. VMAS performs vectorized simulations using PyTorch.
123
+ This argument indicates the number of vectorized environments that should be simulated in a batch. It will also
124
+ determine the batch size of the environment.
125
+ device (torch.device, optional): Device for simulation. Defaults to the default device. All the tensors created by VMAS
126
+ will be placed on this device.
127
+ continuous_actions (bool, optional): Whether to use continuous actions. Defaults to ``True``. If ``False``, actions
128
+ will be discrete. The number of actions and their size will depend on the chosen scenario.
129
+ See the VMAS repository for more info.
130
+ max_steps (int, optional): Horizon of the task. Defaults to ``None`` (infinite horizon). Each VMAS scenario can
131
+ be terminating or not. If ``max_steps`` is specified,
132
+ the scenario is also terminated (and the ``"terminated"`` flag is set) whenever this horizon is reached.
133
+ Unlike gym's ``TimeLimit`` transform or torchrl's :class:`~torchrl.envs.transforms.StepCounter`,
134
+ this argument will not set the ``"truncated"`` entry in the tensordict.
135
+ categorical_actions (bool, optional): if the environment actions are discrete, whether to transform
136
+ them to categorical or one-hot. Defaults to ``True``.
137
+ group_map (MarlGroupMapType or Dict[str, List[str]], optional): how to group agents in tensordicts for
138
+ input/output. By default, if the agent names follow the ``"<name>_<int>"``
139
+ convention, they will be grouped by ``"<name>"``. If they do not follow this convention, they will be all put
140
+ in one group named ``"agents"``.
141
+ Otherwise, a group map can be specified or selected from some premade options.
142
+ See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
143
+
144
+ Attributes:
145
+ group_map (Dict[str, List[str]]): how to group agents in tensordicts for
146
+ input/output. See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
147
+ agent_names (list of str): names of the agent in the environment
148
+ agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the environment
149
+ full_action_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
150
+ full_observation_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
151
+ full_reward_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
152
+ full_done_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
153
+ het_specs (bool): whether the environment has any lazy spec
154
+ het_specs_map (Dict[str, bool]): dictionary mapping each group to a flag representing of the group has lazy specs
155
+ available_envs (List[str]): the list of the scenarios available to build.
156
+
157
+ .. warning::
158
+ VMAS returns a single ``done`` flag which does not distinguish between
159
+ when the env reached ``max_steps`` and termination.
160
+ If you deem the ``truncation`` signal necessary, set ``max_steps`` to
161
+ ``None`` and use a :class:`~torchrl.envs.transforms.StepCounter` transform.
162
+
163
+ Examples:
164
+ >>> env = VmasWrapper(
165
+ ... vmas.make_env(
166
+ ... scenario="flocking",
167
+ ... num_envs=32,
168
+ ... continuous_actions=True,
169
+ ... max_steps=200,
170
+ ... device="cpu",
171
+ ... seed=None,
172
+ ... # Scenario kwargs
173
+ ... n_agents=5,
174
+ ... )
175
+ ... )
176
+ >>> print(env.rollout(10))
177
+ TensorDict(
178
+ fields={
179
+ agents: TensorDict(
180
+ fields={
181
+ action: Tensor(shape=torch.Size([32, 10, 5, 2]), device=cpu, dtype=torch.float32, is_shared=False),
182
+ info: TensorDict(
183
+ fields={
184
+ agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
185
+ agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
186
+ batch_size=torch.Size([32, 10, 5]),
187
+ device=cpu,
188
+ is_shared=False),
189
+ observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False)},
190
+ batch_size=torch.Size([32, 10, 5]),
191
+ device=cpu,
192
+ is_shared=False),
193
+ done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
194
+ next: TensorDict(
195
+ fields={
196
+ agents: TensorDict(
197
+ fields={
198
+ info: TensorDict(
199
+ fields={
200
+ agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
201
+ agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
202
+ batch_size=torch.Size([32, 10, 5]),
203
+ device=cpu,
204
+ is_shared=False),
205
+ observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False),
206
+ reward: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
207
+ batch_size=torch.Size([32, 10, 5]),
208
+ device=cpu,
209
+ is_shared=False),
210
+ done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
211
+ terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
212
+ batch_size=torch.Size([32, 10]),
213
+ device=cpu,
214
+ is_shared=False),
215
+ terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
216
+ batch_size=torch.Size([32, 10]),
217
+ device=cpu,
218
+ is_shared=False)
219
+ """
220
+
221
+ git_url = "https://github.com/proroklab/VectorizedMultiAgentSimulator"
222
+ libname = "vmas"
223
+
224
+ @property
225
+ def lib(self):
226
+ import vmas
227
+
228
+ return vmas
229
+
230
+ @_classproperty
231
+ def available_envs(cls):
232
+ if not _has_vmas:
233
+ return []
234
+ return list(_get_envs())
235
+
236
+ def __init__(
237
+ self,
238
+ env: vmas.simulator.environment.environment.Environment = None, # noqa
239
+ categorical_actions: bool = True,
240
+ group_map: MarlGroupMapType | dict[str, list[str]] | None = None,
241
+ **kwargs,
242
+ ):
243
+ if env is not None:
244
+ kwargs["env"] = env
245
+ if "device" in kwargs.keys() and kwargs["device"] != str(env.device):
246
+ raise TypeError("Env device is different from vmas device")
247
+ kwargs["device"] = str(env.device)
248
+ self.group_map = group_map
249
+ self.categorical_actions = categorical_actions
250
+ super().__init__(**kwargs, allow_done_after_reset=True)
251
+
252
+ def _build_env(
253
+ self,
254
+ env: vmas.simulator.environment.environment.Environment, # noqa
255
+ from_pixels: bool = False,
256
+ pixels_only: bool = False,
257
+ ):
258
+ self.from_pixels = from_pixels
259
+ self.pixels_only = pixels_only
260
+
261
+ # TODO pixels
262
+ if self.from_pixels:
263
+ raise NotImplementedError("vmas rendering not yet implemented")
264
+
265
+ # Adjust batch size
266
+ if len(self.batch_size) == 0:
267
+ # Batch size not set
268
+ self.batch_size = torch.Size((env.num_envs,))
269
+ elif len(self.batch_size) == 1:
270
+ # Batch size is set
271
+ if not self.batch_size[0] == env.num_envs:
272
+ raise TypeError(
273
+ "Batch size used in constructor does not match vmas batch size."
274
+ )
275
+ else:
276
+ raise TypeError(
277
+ "Batch size used in constructor is not compatible with vmas."
278
+ )
279
+
280
+ return env
281
+
282
+ def _get_default_group_map(self, agent_names: list[str]):
283
+ # This function performs the default grouping in vmas.
284
+ # Agents with names "<name>_<int>" will be grouped in group name "<name>".
285
+ # If any of the agents does not follow the naming convention, we fall back
286
+ # back on having all agents in one group named "agents".
287
+ group_map = {}
288
+ follows_convention = True
289
+ for agent_name in agent_names:
290
+ # See if the agent follows the convention "<name>_<int>"
291
+ agent_name_split = agent_name.split("_")
292
+ if len(agent_name_split) == 1:
293
+ follows_convention = False
294
+ follows_convention = follows_convention and agent_name_split[-1].isdigit()
295
+
296
+ if not follows_convention:
297
+ break
298
+
299
+ # Group it with other agents that follow the same convention
300
+ group_name = "_".join(agent_name_split[:-1])
301
+ if group_name in group_map:
302
+ group_map[group_name].append(agent_name)
303
+ else:
304
+ group_map[group_name] = [agent_name]
305
+
306
+ if not follows_convention:
307
+ group_map = MarlGroupMapType.ALL_IN_ONE_GROUP.get_group_map(agent_names)
308
+
309
+ # For BC-compatibility rename the "agent" group to "agents"
310
+ if "agent" in group_map and len(group_map) == 1:
311
+ agent_group = group_map["agent"]
312
+ group_map["agents"] = agent_group
313
+ del group_map["agent"]
314
+ return group_map
315
+
316
+ def _make_specs(
317
+ self,
318
+ env: vmas.simulator.environment.environment.Environment, # noqa
319
+ ) -> None:
320
+ # Create and check group map
321
+ self.agent_names = [agent.name for agent in self.agents]
322
+ self.agent_names_to_indices_map = {
323
+ agent.name: i for i, agent in enumerate(self.agents)
324
+ }
325
+ if self.group_map is None:
326
+ self.group_map = self._get_default_group_map(self.agent_names)
327
+ elif isinstance(self.group_map, MarlGroupMapType):
328
+ self.group_map = self.group_map.get_group_map(self.agent_names)
329
+ check_marl_grouping(self.group_map, self.agent_names)
330
+
331
+ full_action_spec_unbatched = Composite(device=self.device)
332
+ full_observation_spec_unbatched = Composite(device=self.device)
333
+ full_reward_spec_unbatched = Composite(device=self.device)
334
+
335
+ self.het_specs = False
336
+ self.het_specs_map = {}
337
+ for group in self.group_map.keys():
338
+ (
339
+ group_observation_spec,
340
+ group_action_spec,
341
+ group_reward_spec,
342
+ group_info_spec,
343
+ ) = self._make_unbatched_group_specs(group)
344
+ full_action_spec_unbatched[group] = group_action_spec
345
+ full_observation_spec_unbatched[group] = group_observation_spec
346
+ full_reward_spec_unbatched[group] = group_reward_spec
347
+ if group_info_spec is not None:
348
+ full_observation_spec_unbatched[(group, "info")] = group_info_spec
349
+ group_het_specs = isinstance(
350
+ group_observation_spec, StackedComposite
351
+ ) or isinstance(group_action_spec, StackedComposite)
352
+ self.het_specs_map[group] = group_het_specs
353
+ self.het_specs = self.het_specs or group_het_specs
354
+
355
+ full_done_spec_unbatched = Composite(
356
+ {
357
+ "done": Categorical(
358
+ n=2,
359
+ shape=torch.Size((1,)),
360
+ dtype=torch.bool,
361
+ device=self.device,
362
+ ),
363
+ },
364
+ )
365
+
366
+ self.full_action_spec_unbatched = full_action_spec_unbatched
367
+ self.full_observation_spec_unbatched = full_observation_spec_unbatched
368
+ self.full_reward_spec_unbatched = full_reward_spec_unbatched
369
+ self.full_done_spec_unbatched = full_done_spec_unbatched
370
+
371
+ def _make_unbatched_group_specs(self, group: str):
372
+ # Agent specs
373
+ action_specs = []
374
+ observation_specs = []
375
+ reward_specs = []
376
+ info_specs = []
377
+ for agent_name in self.group_map[group]:
378
+ agent_index = self.agent_names_to_indices_map[agent_name]
379
+ agent = self.agents[agent_index]
380
+ action_specs.append(
381
+ Composite(
382
+ {
383
+ "action": _vmas_to_torchrl_spec_transform(
384
+ self.action_space[agent_index],
385
+ categorical_action_encoding=self.categorical_actions,
386
+ device=self.device,
387
+ ) # shape = (n_actions_per_agent,)
388
+ },
389
+ )
390
+ )
391
+ observation_specs.append(
392
+ Composite(
393
+ {
394
+ "observation": _vmas_to_torchrl_spec_transform(
395
+ self.observation_space[agent_index],
396
+ device=self.device,
397
+ categorical_action_encoding=self.categorical_actions,
398
+ ) # shape = (n_obs_per_agent,)
399
+ },
400
+ )
401
+ )
402
+ reward_specs.append(
403
+ Composite(
404
+ {
405
+ "reward": Unbounded(
406
+ shape=torch.Size((1,)),
407
+ device=self.device,
408
+ ) # shape = (1,)
409
+ }
410
+ )
411
+ )
412
+ agent_info = self.scenario.info(agent)
413
+ if len(agent_info):
414
+ info_specs.append(
415
+ Composite(
416
+ {
417
+ key: Unbounded(
418
+ shape=_selective_unsqueeze(
419
+ value, batch_size=self.batch_size
420
+ ).shape[1:],
421
+ device=self.device,
422
+ dtype=torch.float32,
423
+ )
424
+ for key, value in agent_info.items()
425
+ },
426
+ ).to(self.device)
427
+ )
428
+
429
+ # Create multi-agent specs
430
+ group_action_spec = torch.stack(
431
+ action_specs, dim=0
432
+ ) # shape = (n_agents, n_actions_per_agent)
433
+ group_observation_spec = torch.stack(
434
+ observation_specs, dim=0
435
+ ) # shape = (n_agents, n_obs_per_agent)
436
+ group_reward_spec = torch.stack(reward_specs, dim=0) # shape = (n_agents, 1)
437
+ group_info_spec = None
438
+ if len(info_specs):
439
+ group_info_spec = torch.stack(info_specs, dim=0)
440
+
441
+ return (
442
+ group_observation_spec,
443
+ group_action_spec,
444
+ group_reward_spec,
445
+ group_info_spec,
446
+ )
447
+
448
+ def _check_kwargs(self, kwargs: dict):
449
+ vmas = self.lib
450
+
451
+ if "env" not in kwargs:
452
+ raise TypeError("Could not find environment key 'env' in kwargs.")
453
+ env = kwargs["env"]
454
+ if not isinstance(env, vmas.simulator.environment.Environment):
455
+ raise TypeError(
456
+ "env is not of type 'vmas.simulator.environment.Environment'."
457
+ )
458
+
459
+ def _init_env(self) -> int | None:
460
+ pass
461
+
462
+ def _set_seed(self, seed: int | None) -> None:
463
+ self._env.seed(seed)
464
+
465
+ def _reset(
466
+ self, tensordict: TensorDictBase | None = None, **kwargs
467
+ ) -> TensorDictBase:
468
+ if tensordict is not None and "_reset" in tensordict.keys():
469
+ _reset = tensordict.get("_reset")
470
+ envs_to_reset = _reset.squeeze(-1)
471
+ if envs_to_reset.all():
472
+ self._env.reset(return_observations=False)
473
+ else:
474
+ for env_index, to_reset in enumerate(envs_to_reset):
475
+ if to_reset:
476
+ self._env.reset_at(env_index, return_observations=False)
477
+ else:
478
+ self._env.reset(return_observations=False)
479
+
480
+ obs, dones, infos = self._env.get_from_scenario(
481
+ get_observations=True,
482
+ get_infos=True,
483
+ get_rewards=False,
484
+ get_dones=True,
485
+ )
486
+ dones = self.read_done(dones)
487
+
488
+ source = {"done": dones, "terminated": dones.clone()}
489
+ for group, agent_names in self.group_map.items():
490
+ agent_tds = []
491
+ for agent_name in agent_names:
492
+ i = self.agent_names_to_indices_map[agent_name]
493
+
494
+ agent_obs = self.read_obs(obs[i])
495
+ agent_info = self.read_info(infos[i])
496
+ agent_td = TensorDict(
497
+ source={
498
+ "observation": agent_obs,
499
+ },
500
+ batch_size=self.batch_size,
501
+ device=self.device,
502
+ )
503
+ if agent_info is not None:
504
+ agent_td.set("info", agent_info)
505
+ agent_tds.append(agent_td)
506
+
507
+ agent_tds = LazyStackedTensorDict.maybe_dense_stack(agent_tds, dim=1)
508
+ if not self.het_specs_map[group]:
509
+ agent_tds = agent_tds.to_tensordict()
510
+ source.update({group: agent_tds})
511
+
512
+ tensordict_out = TensorDict(
513
+ source=source,
514
+ batch_size=self.batch_size,
515
+ device=self.device,
516
+ )
517
+ return tensordict_out
518
+
519
+ def _step(
520
+ self,
521
+ tensordict: TensorDictBase,
522
+ ) -> TensorDictBase:
523
+ agent_indices = {}
524
+ action_list = []
525
+ n_agents = 0
526
+ for group, agent_names in self.group_map.items():
527
+ group_action = tensordict.get((group, "action"))
528
+ group_action_list = list(self.read_action(group_action, group=group))
529
+ agent_indices.update(
530
+ {
531
+ self.agent_names_to_indices_map[agent_name]: i + n_agents
532
+ for i, agent_name in enumerate(agent_names)
533
+ }
534
+ )
535
+ n_agents += len(agent_names)
536
+ action_list += group_action_list
537
+ action = [action_list[agent_indices[i]] for i in range(self.n_agents)]
538
+
539
+ obs, rews, dones, infos = self._env.step(action)
540
+
541
+ dones = self.read_done(dones)
542
+
543
+ source = {"done": dones, "terminated": dones.clone()}
544
+ for group, agent_names in self.group_map.items():
545
+ agent_tds = []
546
+ for agent_name in agent_names:
547
+ i = self.agent_names_to_indices_map[agent_name]
548
+
549
+ agent_obs = self.read_obs(obs[i])
550
+ agent_rew = self.read_reward(rews[i])
551
+ agent_info = self.read_info(infos[i])
552
+
553
+ agent_td = TensorDict(
554
+ source={
555
+ "observation": agent_obs,
556
+ "reward": agent_rew,
557
+ },
558
+ batch_size=self.batch_size,
559
+ device=self.device,
560
+ )
561
+ if agent_info is not None:
562
+ agent_td.set("info", agent_info)
563
+ agent_tds.append(agent_td)
564
+
565
+ agent_tds = LazyStackedTensorDict.maybe_dense_stack(agent_tds, dim=1)
566
+ if not self.het_specs_map[group]:
567
+ agent_tds = agent_tds.to_tensordict()
568
+ source.update({group: agent_tds})
569
+
570
+ tensordict_out = TensorDict(
571
+ source=source,
572
+ batch_size=self.batch_size,
573
+ device=self.device,
574
+ )
575
+ return tensordict_out
576
+
577
+ def read_obs(self, observations: dict | torch.Tensor) -> dict | torch.Tensor:
578
+ if isinstance(observations, torch.Tensor):
579
+ return _selective_unsqueeze(observations, batch_size=self.batch_size)
580
+ return TensorDict(
581
+ source={key: self.read_obs(value) for key, value in observations.items()},
582
+ batch_size=self.batch_size,
583
+ )
584
+
585
+ def read_info(self, infos: dict[str, torch.Tensor]) -> torch.Tensor:
586
+ if len(infos) == 0:
587
+ return None
588
+ infos = TensorDict(
589
+ source={
590
+ key: _selective_unsqueeze(
591
+ value.to(torch.float32), batch_size=self.batch_size
592
+ )
593
+ for key, value in infos.items()
594
+ },
595
+ batch_size=self.batch_size,
596
+ device=self.device,
597
+ )
598
+
599
+ return infos
600
+
601
+ def read_done(self, done):
602
+ done = _selective_unsqueeze(done, batch_size=self.batch_size)
603
+ return done
604
+
605
+ def read_reward(self, rewards):
606
+ rewards = _selective_unsqueeze(rewards, batch_size=self.batch_size)
607
+ return rewards
608
+
609
+ def read_action(self, action, group: str = "agents"):
610
+ if not self.continuous_actions and not self.categorical_actions:
611
+ action = self.full_action_spec_unbatched[group, "action"].to_categorical(
612
+ action
613
+ )
614
+ agent_actions = action.unbind(dim=1)
615
+ return agent_actions
616
+
617
+ def __repr__(self) -> str:
618
+ return (
619
+ f"{self.__class__.__name__}(num_envs={self.num_envs}, n_agents={self.n_agents},"
620
+ f" batch_size={self.batch_size}, device={self.device})"
621
+ )
622
+
623
+ def to(self, device: DEVICE_TYPING) -> EnvBase:
624
+ self._env.to(device)
625
+ return super().to(device)
626
+
627
+
628
+ class VmasEnv(VmasWrapper):
629
+ """Vmas environment wrapper.
630
+
631
+ GitHub: https://github.com/proroklab/VectorizedMultiAgentSimulator
632
+
633
+ Paper: https://arxiv.org/abs/2207.03530
634
+
635
+ Args:
636
+ scenario (str or vmas.simulator.scenario.BaseScenario): the vmas scenario to build.
637
+ Must be one of :attr:`~.available_envs`. For a description and rendering of available scenarios see
638
+ `the README <https://github.com/proroklab/VectorizedMultiAgentSimulator/tree/VMAS-1.3.3?tab=readme-ov-file#main-scenarios>`__.
639
+
640
+
641
+ Keyword Args:
642
+ num_envs (int): Number of vectorized simulation environments. VMAS performs vectorized simulations using PyTorch.
643
+ This argument indicates the number of vectorized environments that should be simulated in a batch. It will also
644
+ determine the batch size of the environment.
645
+ device (torch.device, optional): Device for simulation. Defaults to the defaultt device. All the tensors created by VMAS
646
+ will be placed on this device.
647
+ continuous_actions (bool, optional): Whether to use continuous actions. Defaults to ``True``. If ``False``, actions
648
+ will be discrete. The number of actions and their size will depend on the chosen scenario.
649
+ See the VMAS repository for more info.
650
+ max_steps (int, optional): Horizon of the task. Defaults to ``None`` (infinite horizon). Each VMAS scenario can
651
+ be terminating or not. If ``max_steps`` is specified,
652
+ the scenario is also terminated (and the ``"terminated"`` flag is set) whenever this horizon is reached.
653
+ Unlike gym's ``TimeLimit`` transform or torchrl's :class:`~torchrl.envs.transforms.StepCounter`,
654
+ this argument will not set the ``"truncated"`` entry in the tensordict.
655
+ categorical_actions (bool, optional): if the environment actions are discrete, whether to transform
656
+ them to categorical or one-hot. Defaults to ``True``.
657
+ group_map (MarlGroupMapType or Dict[str, List[str]], optional): how to group agents in tensordicts for
658
+ input/output. By default, if the agent names follow the ``"<name>_<int>"``
659
+ convention, they will be grouped by ``"<name>"``. If they do not follow this convention, they will be all put
660
+ in one group named ``"agents"``.
661
+ Otherwise, a group map can be specified or selected from some premade options.
662
+ See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
663
+ **kwargs (Dict, optional): These are additional arguments that can be passed to the VMAS scenario constructor.
664
+ (e.g., number of agents, reward sparsity). The available arguments will vary based on the chosen scenario.
665
+ To see the available arguments for a specific scenario, see the constructor in its file from
666
+ `the scenario folder <https://github.com/proroklab/VectorizedMultiAgentSimulator/tree/VMAS-1.3.3/vmas/scenarios>`__.
667
+
668
+
669
+ Attributes:
670
+ group_map (Dict[str, List[str]]): how to group agents in tensordicts for
671
+ input/output. See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info.
672
+ agent_names (list of str): names of the agent in the environment
673
+ agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the environment
674
+ full_action_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
675
+ full_observation_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
676
+ full_reward_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
677
+ full_done_spec_unbatched (TensorSpec): version of the spec without the vectorized dimension
678
+ het_specs (bool): whether the environment has any lazy spec
679
+ het_specs_map (Dict[str, bool]): dictionary mapping each group to a flag representing of the group has lazy specs
680
+ available_envs (List[str]): the list of the scenarios available to build.
681
+
682
+ .. warning::
683
+ VMAS returns a single ``done`` flag which does not distinguish between
684
+ when the env reached ``max_steps`` and termination.
685
+ If you deem the ``truncation`` signal necessary, set ``max_steps`` to
686
+ ``None`` and use a :class:`~torchrl.envs.transforms.StepCounter` transform.
687
+
688
+ Examples:
689
+ >>> env = VmasEnv(
690
+ ... scenario="flocking",
691
+ ... num_envs=32,
692
+ ... continuous_actions=True,
693
+ ... max_steps=200,
694
+ ... device="cpu",
695
+ ... seed=None,
696
+ ... # Scenario kwargs
697
+ ... n_agents=5,
698
+ ... )
699
+ >>> print(env.rollout(10))
700
+ TensorDict(
701
+ fields={
702
+ agents: TensorDict(
703
+ fields={
704
+ action: Tensor(shape=torch.Size([32, 10, 5, 2]), device=cpu, dtype=torch.float32, is_shared=False),
705
+ info: TensorDict(
706
+ fields={
707
+ agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
708
+ agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
709
+ batch_size=torch.Size([32, 10, 5]),
710
+ device=cpu,
711
+ is_shared=False),
712
+ observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False)},
713
+ batch_size=torch.Size([32, 10, 5]),
714
+ device=cpu,
715
+ is_shared=False),
716
+ done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
717
+ next: TensorDict(
718
+ fields={
719
+ agents: TensorDict(
720
+ fields={
721
+ info: TensorDict(
722
+ fields={
723
+ agent_collision_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False),
724
+ agent_distance_rew: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
725
+ batch_size=torch.Size([32, 10, 5]),
726
+ device=cpu,
727
+ is_shared=False),
728
+ observation: Tensor(shape=torch.Size([32, 10, 5, 18]), device=cpu, dtype=torch.float32, is_shared=False),
729
+ reward: Tensor(shape=torch.Size([32, 10, 5, 1]), device=cpu, dtype=torch.float32, is_shared=False)},
730
+ batch_size=torch.Size([32, 10, 5]),
731
+ device=cpu,
732
+ is_shared=False),
733
+ done: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False),
734
+ terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
735
+ batch_size=torch.Size([32, 10]),
736
+ device=cpu,
737
+ is_shared=False),
738
+ terminated: Tensor(shape=torch.Size([32, 10, 1]), device=cpu, dtype=torch.bool, is_shared=False)},
739
+ batch_size=torch.Size([32, 10]),
740
+ device=cpu,
741
+ is_shared=False)
742
+ """
743
+
744
+ def __init__(
745
+ self,
746
+ scenario: str | vmas.simulator.scenario.BaseScenario, # noqa
747
+ *,
748
+ num_envs: int,
749
+ continuous_actions: bool = True,
750
+ max_steps: int | None = None,
751
+ categorical_actions: bool = True,
752
+ seed: int | None = None,
753
+ group_map: MarlGroupMapType | dict[str, list[str]] | None = None,
754
+ **kwargs,
755
+ ):
756
+ if not _has_vmas:
757
+ raise ImportError(
758
+ f"vmas python package was not found. Please install this dependency. "
759
+ f"More info: {self.git_url}."
760
+ )
761
+ super().__init__(
762
+ scenario=scenario,
763
+ num_envs=num_envs,
764
+ continuous_actions=continuous_actions,
765
+ max_steps=max_steps,
766
+ seed=seed,
767
+ categorical_actions=categorical_actions,
768
+ group_map=group_map,
769
+ **kwargs,
770
+ )
771
+
772
+ def _check_kwargs(self, kwargs: dict):
773
+ if "scenario" not in kwargs:
774
+ raise TypeError("Could not find environment key 'scenario' in kwargs.")
775
+ if "num_envs" not in kwargs:
776
+ raise TypeError("Could not find environment key 'num_envs' in kwargs.")
777
+
778
+ def _build_env(
779
+ self,
780
+ scenario: str | vmas.simulator.scenario.BaseScenario, # noqa
781
+ num_envs: int,
782
+ continuous_actions: bool,
783
+ max_steps: int | None,
784
+ seed: int | None,
785
+ **scenario_kwargs,
786
+ ) -> vmas.simulator.environment.environment.Environment: # noqa
787
+ vmas = self.lib
788
+
789
+ self.scenario_name = scenario
790
+ from_pixels = scenario_kwargs.pop("from_pixels", False)
791
+ pixels_only = scenario_kwargs.pop("pixels_only", False)
792
+
793
+ return super()._build_env(
794
+ env=vmas.make_env(
795
+ scenario=scenario,
796
+ num_envs=num_envs,
797
+ device=self.device
798
+ if self.device is not None
799
+ else getattr(
800
+ torch, "get_default_device", lambda: torch.device("cpu")
801
+ )(),
802
+ continuous_actions=continuous_actions,
803
+ max_steps=max_steps,
804
+ seed=seed,
805
+ wrapper=None,
806
+ **scenario_kwargs,
807
+ ),
808
+ pixels_only=pixels_only,
809
+ from_pixels=from_pixels,
810
+ )
811
+
812
+ def __repr__(self):
813
+ return f"{super().__repr__()} (scenario={self.scenario_name})"