torchrl 0.11.0__cp314-cp314-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (394) hide show
  1. benchmarks/benchmark_batched_envs.py +104 -0
  2. benchmarks/conftest.py +91 -0
  3. benchmarks/ecosystem/gym_env_throughput.py +321 -0
  4. benchmarks/ecosystem/vmas_rllib_vs_torchrl_sampling_performance.py +231 -0
  5. benchmarks/requirements.txt +7 -0
  6. benchmarks/storage/benchmark_sample_latency_over_rpc.py +193 -0
  7. benchmarks/test_collectors_benchmark.py +240 -0
  8. benchmarks/test_compressed_storage_benchmark.py +145 -0
  9. benchmarks/test_envs_benchmark.py +133 -0
  10. benchmarks/test_llm.py +101 -0
  11. benchmarks/test_non_tensor_env_benchmark.py +70 -0
  12. benchmarks/test_objectives_benchmarks.py +1199 -0
  13. benchmarks/test_replaybuffer_benchmark.py +254 -0
  14. sota-check/README.md +35 -0
  15. sota-implementations/README.md +142 -0
  16. sota-implementations/a2c/README.md +39 -0
  17. sota-implementations/a2c/a2c_atari.py +291 -0
  18. sota-implementations/a2c/a2c_mujoco.py +273 -0
  19. sota-implementations/a2c/utils_atari.py +240 -0
  20. sota-implementations/a2c/utils_mujoco.py +160 -0
  21. sota-implementations/bandits/README.md +7 -0
  22. sota-implementations/bandits/dqn.py +126 -0
  23. sota-implementations/cql/cql_offline.py +198 -0
  24. sota-implementations/cql/cql_online.py +249 -0
  25. sota-implementations/cql/discrete_cql_offline.py +180 -0
  26. sota-implementations/cql/discrete_cql_online.py +227 -0
  27. sota-implementations/cql/utils.py +471 -0
  28. sota-implementations/crossq/crossq.py +271 -0
  29. sota-implementations/crossq/utils.py +320 -0
  30. sota-implementations/ddpg/ddpg.py +231 -0
  31. sota-implementations/ddpg/utils.py +325 -0
  32. sota-implementations/decision_transformer/dt.py +163 -0
  33. sota-implementations/decision_transformer/lamb.py +167 -0
  34. sota-implementations/decision_transformer/online_dt.py +178 -0
  35. sota-implementations/decision_transformer/utils.py +562 -0
  36. sota-implementations/discrete_sac/discrete_sac.py +243 -0
  37. sota-implementations/discrete_sac/utils.py +324 -0
  38. sota-implementations/dqn/README.md +30 -0
  39. sota-implementations/dqn/dqn_atari.py +272 -0
  40. sota-implementations/dqn/dqn_cartpole.py +236 -0
  41. sota-implementations/dqn/utils_atari.py +132 -0
  42. sota-implementations/dqn/utils_cartpole.py +90 -0
  43. sota-implementations/dreamer/README.md +129 -0
  44. sota-implementations/dreamer/dreamer.py +586 -0
  45. sota-implementations/dreamer/dreamer_utils.py +1107 -0
  46. sota-implementations/expert-iteration/README.md +352 -0
  47. sota-implementations/expert-iteration/ei_utils.py +770 -0
  48. sota-implementations/expert-iteration/expert-iteration-async.py +512 -0
  49. sota-implementations/expert-iteration/expert-iteration-sync.py +508 -0
  50. sota-implementations/expert-iteration/requirements_gsm8k.txt +13 -0
  51. sota-implementations/expert-iteration/requirements_ifeval.txt +16 -0
  52. sota-implementations/gail/gail.py +327 -0
  53. sota-implementations/gail/gail_utils.py +68 -0
  54. sota-implementations/gail/ppo_utils.py +157 -0
  55. sota-implementations/grpo/README.md +273 -0
  56. sota-implementations/grpo/grpo-async.py +437 -0
  57. sota-implementations/grpo/grpo-sync.py +435 -0
  58. sota-implementations/grpo/grpo_utils.py +843 -0
  59. sota-implementations/grpo/requirements_gsm8k.txt +11 -0
  60. sota-implementations/grpo/requirements_ifeval.txt +16 -0
  61. sota-implementations/impala/README.md +33 -0
  62. sota-implementations/impala/impala_multi_node_ray.py +292 -0
  63. sota-implementations/impala/impala_multi_node_submitit.py +284 -0
  64. sota-implementations/impala/impala_single_node.py +261 -0
  65. sota-implementations/impala/utils.py +184 -0
  66. sota-implementations/iql/discrete_iql.py +230 -0
  67. sota-implementations/iql/iql_offline.py +164 -0
  68. sota-implementations/iql/iql_online.py +225 -0
  69. sota-implementations/iql/utils.py +437 -0
  70. sota-implementations/multiagent/README.md +74 -0
  71. sota-implementations/multiagent/iql.py +237 -0
  72. sota-implementations/multiagent/maddpg_iddpg.py +266 -0
  73. sota-implementations/multiagent/mappo_ippo.py +267 -0
  74. sota-implementations/multiagent/qmix_vdn.py +271 -0
  75. sota-implementations/multiagent/sac.py +337 -0
  76. sota-implementations/multiagent/utils/__init__.py +4 -0
  77. sota-implementations/multiagent/utils/logging.py +151 -0
  78. sota-implementations/multiagent/utils/utils.py +43 -0
  79. sota-implementations/ppo/README.md +29 -0
  80. sota-implementations/ppo/ppo_atari.py +305 -0
  81. sota-implementations/ppo/ppo_mujoco.py +293 -0
  82. sota-implementations/ppo/utils_atari.py +238 -0
  83. sota-implementations/ppo/utils_mujoco.py +152 -0
  84. sota-implementations/ppo_trainer/train.py +21 -0
  85. sota-implementations/redq/README.md +7 -0
  86. sota-implementations/redq/redq.py +199 -0
  87. sota-implementations/redq/utils.py +1060 -0
  88. sota-implementations/sac/sac-async.py +266 -0
  89. sota-implementations/sac/sac.py +239 -0
  90. sota-implementations/sac/utils.py +381 -0
  91. sota-implementations/sac_trainer/train.py +16 -0
  92. sota-implementations/td3/td3.py +254 -0
  93. sota-implementations/td3/utils.py +319 -0
  94. sota-implementations/td3_bc/td3_bc.py +177 -0
  95. sota-implementations/td3_bc/utils.py +251 -0
  96. torchrl/__init__.py +144 -0
  97. torchrl/_extension.py +74 -0
  98. torchrl/_torchrl.cpython-314-aarch64-linux-gnu.so +0 -0
  99. torchrl/_utils.py +1431 -0
  100. torchrl/collectors/__init__.py +48 -0
  101. torchrl/collectors/_base.py +1058 -0
  102. torchrl/collectors/_constants.py +88 -0
  103. torchrl/collectors/_multi_async.py +324 -0
  104. torchrl/collectors/_multi_base.py +1805 -0
  105. torchrl/collectors/_multi_sync.py +464 -0
  106. torchrl/collectors/_runner.py +581 -0
  107. torchrl/collectors/_single.py +2009 -0
  108. torchrl/collectors/_single_async.py +259 -0
  109. torchrl/collectors/collectors.py +62 -0
  110. torchrl/collectors/distributed/__init__.py +32 -0
  111. torchrl/collectors/distributed/default_configs.py +133 -0
  112. torchrl/collectors/distributed/generic.py +1306 -0
  113. torchrl/collectors/distributed/ray.py +1092 -0
  114. torchrl/collectors/distributed/rpc.py +1006 -0
  115. torchrl/collectors/distributed/sync.py +731 -0
  116. torchrl/collectors/distributed/utils.py +160 -0
  117. torchrl/collectors/llm/__init__.py +10 -0
  118. torchrl/collectors/llm/base.py +494 -0
  119. torchrl/collectors/llm/ray_collector.py +275 -0
  120. torchrl/collectors/llm/utils.py +36 -0
  121. torchrl/collectors/llm/weight_update/__init__.py +10 -0
  122. torchrl/collectors/llm/weight_update/vllm.py +348 -0
  123. torchrl/collectors/llm/weight_update/vllm_v2.py +311 -0
  124. torchrl/collectors/utils.py +433 -0
  125. torchrl/collectors/weight_update.py +591 -0
  126. torchrl/csrc/numpy_utils.h +38 -0
  127. torchrl/csrc/pybind.cpp +27 -0
  128. torchrl/csrc/segment_tree.h +458 -0
  129. torchrl/csrc/torch_utils.h +34 -0
  130. torchrl/csrc/utils.cpp +48 -0
  131. torchrl/csrc/utils.h +31 -0
  132. torchrl/data/__init__.py +187 -0
  133. torchrl/data/datasets/__init__.py +58 -0
  134. torchrl/data/datasets/atari_dqn.py +878 -0
  135. torchrl/data/datasets/common.py +281 -0
  136. torchrl/data/datasets/d4rl.py +489 -0
  137. torchrl/data/datasets/d4rl_infos.py +187 -0
  138. torchrl/data/datasets/gen_dgrl.py +375 -0
  139. torchrl/data/datasets/minari_data.py +643 -0
  140. torchrl/data/datasets/openml.py +177 -0
  141. torchrl/data/datasets/openx.py +798 -0
  142. torchrl/data/datasets/roboset.py +363 -0
  143. torchrl/data/datasets/utils.py +11 -0
  144. torchrl/data/datasets/vd4rl.py +432 -0
  145. torchrl/data/llm/__init__.py +34 -0
  146. torchrl/data/llm/dataset.py +491 -0
  147. torchrl/data/llm/history.py +1378 -0
  148. torchrl/data/llm/prompt.py +198 -0
  149. torchrl/data/llm/reward.py +225 -0
  150. torchrl/data/llm/topk.py +186 -0
  151. torchrl/data/llm/utils.py +543 -0
  152. torchrl/data/map/__init__.py +21 -0
  153. torchrl/data/map/hash.py +185 -0
  154. torchrl/data/map/query.py +204 -0
  155. torchrl/data/map/tdstorage.py +363 -0
  156. torchrl/data/map/tree.py +1434 -0
  157. torchrl/data/map/utils.py +103 -0
  158. torchrl/data/postprocs/__init__.py +8 -0
  159. torchrl/data/postprocs/postprocs.py +391 -0
  160. torchrl/data/replay_buffers/__init__.py +99 -0
  161. torchrl/data/replay_buffers/checkpointers.py +622 -0
  162. torchrl/data/replay_buffers/ray_buffer.py +292 -0
  163. torchrl/data/replay_buffers/replay_buffers.py +2376 -0
  164. torchrl/data/replay_buffers/samplers.py +2578 -0
  165. torchrl/data/replay_buffers/scheduler.py +265 -0
  166. torchrl/data/replay_buffers/storages.py +2412 -0
  167. torchrl/data/replay_buffers/utils.py +1042 -0
  168. torchrl/data/replay_buffers/writers.py +781 -0
  169. torchrl/data/tensor_specs.py +7101 -0
  170. torchrl/data/utils.py +334 -0
  171. torchrl/envs/__init__.py +265 -0
  172. torchrl/envs/async_envs.py +1105 -0
  173. torchrl/envs/batched_envs.py +3093 -0
  174. torchrl/envs/common.py +4241 -0
  175. torchrl/envs/custom/__init__.py +11 -0
  176. torchrl/envs/custom/chess.py +617 -0
  177. torchrl/envs/custom/llm.py +214 -0
  178. torchrl/envs/custom/pendulum.py +401 -0
  179. torchrl/envs/custom/san_moves.txt +29274 -0
  180. torchrl/envs/custom/tictactoeenv.py +288 -0
  181. torchrl/envs/env_creator.py +263 -0
  182. torchrl/envs/gym_like.py +752 -0
  183. torchrl/envs/libs/__init__.py +68 -0
  184. torchrl/envs/libs/_gym_utils.py +326 -0
  185. torchrl/envs/libs/brax.py +846 -0
  186. torchrl/envs/libs/dm_control.py +544 -0
  187. torchrl/envs/libs/envpool.py +447 -0
  188. torchrl/envs/libs/gym.py +2239 -0
  189. torchrl/envs/libs/habitat.py +138 -0
  190. torchrl/envs/libs/isaac_lab.py +87 -0
  191. torchrl/envs/libs/isaacgym.py +203 -0
  192. torchrl/envs/libs/jax_utils.py +166 -0
  193. torchrl/envs/libs/jumanji.py +963 -0
  194. torchrl/envs/libs/meltingpot.py +599 -0
  195. torchrl/envs/libs/openml.py +153 -0
  196. torchrl/envs/libs/openspiel.py +652 -0
  197. torchrl/envs/libs/pettingzoo.py +1042 -0
  198. torchrl/envs/libs/procgen.py +351 -0
  199. torchrl/envs/libs/robohive.py +429 -0
  200. torchrl/envs/libs/smacv2.py +645 -0
  201. torchrl/envs/libs/unity_mlagents.py +891 -0
  202. torchrl/envs/libs/utils.py +147 -0
  203. torchrl/envs/libs/vmas.py +813 -0
  204. torchrl/envs/llm/__init__.py +63 -0
  205. torchrl/envs/llm/chat.py +730 -0
  206. torchrl/envs/llm/datasets/README.md +4 -0
  207. torchrl/envs/llm/datasets/__init__.py +17 -0
  208. torchrl/envs/llm/datasets/gsm8k.py +353 -0
  209. torchrl/envs/llm/datasets/ifeval.py +274 -0
  210. torchrl/envs/llm/envs.py +789 -0
  211. torchrl/envs/llm/libs/README.md +3 -0
  212. torchrl/envs/llm/libs/__init__.py +8 -0
  213. torchrl/envs/llm/libs/mlgym.py +869 -0
  214. torchrl/envs/llm/reward/__init__.py +10 -0
  215. torchrl/envs/llm/reward/gsm8k.py +324 -0
  216. torchrl/envs/llm/reward/ifeval/README.md +13 -0
  217. torchrl/envs/llm/reward/ifeval/__init__.py +10 -0
  218. torchrl/envs/llm/reward/ifeval/_instructions.py +1667 -0
  219. torchrl/envs/llm/reward/ifeval/_instructions_main.py +131 -0
  220. torchrl/envs/llm/reward/ifeval/_instructions_registry.py +100 -0
  221. torchrl/envs/llm/reward/ifeval/_instructions_util.py +1677 -0
  222. torchrl/envs/llm/reward/ifeval/_scorer.py +454 -0
  223. torchrl/envs/llm/transforms/__init__.py +55 -0
  224. torchrl/envs/llm/transforms/browser.py +292 -0
  225. torchrl/envs/llm/transforms/dataloading.py +859 -0
  226. torchrl/envs/llm/transforms/format.py +73 -0
  227. torchrl/envs/llm/transforms/kl.py +1544 -0
  228. torchrl/envs/llm/transforms/policy_version.py +189 -0
  229. torchrl/envs/llm/transforms/reason.py +323 -0
  230. torchrl/envs/llm/transforms/tokenizer.py +321 -0
  231. torchrl/envs/llm/transforms/tools.py +1955 -0
  232. torchrl/envs/model_based/__init__.py +9 -0
  233. torchrl/envs/model_based/common.py +180 -0
  234. torchrl/envs/model_based/dreamer.py +112 -0
  235. torchrl/envs/transforms/__init__.py +147 -0
  236. torchrl/envs/transforms/functional.py +48 -0
  237. torchrl/envs/transforms/gym_transforms.py +203 -0
  238. torchrl/envs/transforms/module.py +341 -0
  239. torchrl/envs/transforms/r3m.py +372 -0
  240. torchrl/envs/transforms/ray_service.py +663 -0
  241. torchrl/envs/transforms/rb_transforms.py +214 -0
  242. torchrl/envs/transforms/transforms.py +11835 -0
  243. torchrl/envs/transforms/utils.py +94 -0
  244. torchrl/envs/transforms/vc1.py +307 -0
  245. torchrl/envs/transforms/vecnorm.py +845 -0
  246. torchrl/envs/transforms/vip.py +407 -0
  247. torchrl/envs/utils.py +1718 -0
  248. torchrl/envs/vec_envs.py +11 -0
  249. torchrl/modules/__init__.py +206 -0
  250. torchrl/modules/distributions/__init__.py +73 -0
  251. torchrl/modules/distributions/continuous.py +830 -0
  252. torchrl/modules/distributions/discrete.py +908 -0
  253. torchrl/modules/distributions/truncated_normal.py +187 -0
  254. torchrl/modules/distributions/utils.py +233 -0
  255. torchrl/modules/llm/__init__.py +62 -0
  256. torchrl/modules/llm/backends/__init__.py +65 -0
  257. torchrl/modules/llm/backends/vllm/__init__.py +94 -0
  258. torchrl/modules/llm/backends/vllm/_models.py +46 -0
  259. torchrl/modules/llm/backends/vllm/base.py +72 -0
  260. torchrl/modules/llm/backends/vllm/vllm_async.py +2075 -0
  261. torchrl/modules/llm/backends/vllm/vllm_plugin.py +22 -0
  262. torchrl/modules/llm/backends/vllm/vllm_sync.py +446 -0
  263. torchrl/modules/llm/backends/vllm/vllm_utils.py +129 -0
  264. torchrl/modules/llm/policies/__init__.py +28 -0
  265. torchrl/modules/llm/policies/common.py +1809 -0
  266. torchrl/modules/llm/policies/transformers_wrapper.py +2756 -0
  267. torchrl/modules/llm/policies/vllm_wrapper.py +2241 -0
  268. torchrl/modules/llm/utils.py +23 -0
  269. torchrl/modules/mcts/__init__.py +21 -0
  270. torchrl/modules/mcts/scores.py +579 -0
  271. torchrl/modules/models/__init__.py +86 -0
  272. torchrl/modules/models/batchrenorm.py +119 -0
  273. torchrl/modules/models/decision_transformer.py +179 -0
  274. torchrl/modules/models/exploration.py +731 -0
  275. torchrl/modules/models/llm.py +156 -0
  276. torchrl/modules/models/model_based.py +596 -0
  277. torchrl/modules/models/models.py +1712 -0
  278. torchrl/modules/models/multiagent.py +1067 -0
  279. torchrl/modules/models/recipes/impala.py +185 -0
  280. torchrl/modules/models/utils.py +162 -0
  281. torchrl/modules/planners/__init__.py +10 -0
  282. torchrl/modules/planners/cem.py +228 -0
  283. torchrl/modules/planners/common.py +73 -0
  284. torchrl/modules/planners/mppi.py +265 -0
  285. torchrl/modules/tensordict_module/__init__.py +89 -0
  286. torchrl/modules/tensordict_module/actors.py +2457 -0
  287. torchrl/modules/tensordict_module/common.py +529 -0
  288. torchrl/modules/tensordict_module/exploration.py +814 -0
  289. torchrl/modules/tensordict_module/probabilistic.py +321 -0
  290. torchrl/modules/tensordict_module/rnn.py +1639 -0
  291. torchrl/modules/tensordict_module/sequence.py +132 -0
  292. torchrl/modules/tensordict_module/world_models.py +34 -0
  293. torchrl/modules/utils/__init__.py +38 -0
  294. torchrl/modules/utils/mappings.py +9 -0
  295. torchrl/modules/utils/utils.py +89 -0
  296. torchrl/objectives/__init__.py +78 -0
  297. torchrl/objectives/a2c.py +659 -0
  298. torchrl/objectives/common.py +753 -0
  299. torchrl/objectives/cql.py +1346 -0
  300. torchrl/objectives/crossq.py +710 -0
  301. torchrl/objectives/ddpg.py +453 -0
  302. torchrl/objectives/decision_transformer.py +371 -0
  303. torchrl/objectives/deprecated.py +516 -0
  304. torchrl/objectives/dqn.py +683 -0
  305. torchrl/objectives/dreamer.py +488 -0
  306. torchrl/objectives/functional.py +48 -0
  307. torchrl/objectives/gail.py +258 -0
  308. torchrl/objectives/iql.py +996 -0
  309. torchrl/objectives/llm/__init__.py +30 -0
  310. torchrl/objectives/llm/grpo.py +846 -0
  311. torchrl/objectives/llm/sft.py +482 -0
  312. torchrl/objectives/multiagent/__init__.py +8 -0
  313. torchrl/objectives/multiagent/qmixer.py +396 -0
  314. torchrl/objectives/ppo.py +1669 -0
  315. torchrl/objectives/redq.py +683 -0
  316. torchrl/objectives/reinforce.py +530 -0
  317. torchrl/objectives/sac.py +1580 -0
  318. torchrl/objectives/td3.py +570 -0
  319. torchrl/objectives/td3_bc.py +625 -0
  320. torchrl/objectives/utils.py +782 -0
  321. torchrl/objectives/value/__init__.py +28 -0
  322. torchrl/objectives/value/advantages.py +1956 -0
  323. torchrl/objectives/value/functional.py +1459 -0
  324. torchrl/objectives/value/utils.py +360 -0
  325. torchrl/record/__init__.py +17 -0
  326. torchrl/record/loggers/__init__.py +23 -0
  327. torchrl/record/loggers/common.py +48 -0
  328. torchrl/record/loggers/csv.py +226 -0
  329. torchrl/record/loggers/mlflow.py +142 -0
  330. torchrl/record/loggers/tensorboard.py +139 -0
  331. torchrl/record/loggers/trackio.py +163 -0
  332. torchrl/record/loggers/utils.py +78 -0
  333. torchrl/record/loggers/wandb.py +214 -0
  334. torchrl/record/recorder.py +554 -0
  335. torchrl/services/__init__.py +79 -0
  336. torchrl/services/base.py +109 -0
  337. torchrl/services/ray_service.py +453 -0
  338. torchrl/testing/__init__.py +107 -0
  339. torchrl/testing/assertions.py +179 -0
  340. torchrl/testing/dist_utils.py +122 -0
  341. torchrl/testing/env_creators.py +227 -0
  342. torchrl/testing/env_helper.py +35 -0
  343. torchrl/testing/gym_helpers.py +156 -0
  344. torchrl/testing/llm_mocks.py +119 -0
  345. torchrl/testing/mocking_classes.py +2720 -0
  346. torchrl/testing/modules.py +295 -0
  347. torchrl/testing/mp_helpers.py +15 -0
  348. torchrl/testing/ray_helpers.py +293 -0
  349. torchrl/testing/utils.py +190 -0
  350. torchrl/trainers/__init__.py +42 -0
  351. torchrl/trainers/algorithms/__init__.py +11 -0
  352. torchrl/trainers/algorithms/configs/__init__.py +705 -0
  353. torchrl/trainers/algorithms/configs/collectors.py +216 -0
  354. torchrl/trainers/algorithms/configs/common.py +41 -0
  355. torchrl/trainers/algorithms/configs/data.py +308 -0
  356. torchrl/trainers/algorithms/configs/envs.py +104 -0
  357. torchrl/trainers/algorithms/configs/envs_libs.py +361 -0
  358. torchrl/trainers/algorithms/configs/logging.py +80 -0
  359. torchrl/trainers/algorithms/configs/modules.py +570 -0
  360. torchrl/trainers/algorithms/configs/objectives.py +177 -0
  361. torchrl/trainers/algorithms/configs/trainers.py +340 -0
  362. torchrl/trainers/algorithms/configs/transforms.py +955 -0
  363. torchrl/trainers/algorithms/configs/utils.py +252 -0
  364. torchrl/trainers/algorithms/configs/weight_sync_schemes.py +191 -0
  365. torchrl/trainers/algorithms/configs/weight_update.py +159 -0
  366. torchrl/trainers/algorithms/ppo.py +373 -0
  367. torchrl/trainers/algorithms/sac.py +308 -0
  368. torchrl/trainers/helpers/__init__.py +40 -0
  369. torchrl/trainers/helpers/collectors.py +416 -0
  370. torchrl/trainers/helpers/envs.py +573 -0
  371. torchrl/trainers/helpers/logger.py +33 -0
  372. torchrl/trainers/helpers/losses.py +132 -0
  373. torchrl/trainers/helpers/models.py +658 -0
  374. torchrl/trainers/helpers/replay_buffer.py +59 -0
  375. torchrl/trainers/helpers/trainers.py +301 -0
  376. torchrl/trainers/trainers.py +2052 -0
  377. torchrl/weight_update/__init__.py +33 -0
  378. torchrl/weight_update/_distributed.py +749 -0
  379. torchrl/weight_update/_mp.py +624 -0
  380. torchrl/weight_update/_noupdate.py +102 -0
  381. torchrl/weight_update/_ray.py +1032 -0
  382. torchrl/weight_update/_rpc.py +284 -0
  383. torchrl/weight_update/_shared.py +891 -0
  384. torchrl/weight_update/llm/__init__.py +32 -0
  385. torchrl/weight_update/llm/vllm_double_buffer.py +370 -0
  386. torchrl/weight_update/llm/vllm_nccl.py +710 -0
  387. torchrl/weight_update/utils.py +73 -0
  388. torchrl/weight_update/weight_sync_schemes.py +1244 -0
  389. torchrl-0.11.0.dist-info/METADATA +1308 -0
  390. torchrl-0.11.0.dist-info/RECORD +394 -0
  391. torchrl-0.11.0.dist-info/WHEEL +5 -0
  392. torchrl-0.11.0.dist-info/entry_points.txt +2 -0
  393. torchrl-0.11.0.dist-info/licenses/LICENSE +21 -0
  394. torchrl-0.11.0.dist-info/top_level.txt +7 -0
torchrl/envs/utils.py ADDED
@@ -0,0 +1,1718 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from __future__ import annotations
6
+
7
+ import abc
8
+ import contextlib
9
+ import functools
10
+
11
+ import importlib.util
12
+ import inspect
13
+ import os
14
+ import re
15
+ import warnings
16
+ from enum import Enum
17
+ from typing import Any, Literal
18
+
19
+ import torch
20
+
21
+ from tensordict import (
22
+ is_tensor_collection,
23
+ LazyStackedTensorDict,
24
+ NonTensorData,
25
+ NonTensorStack,
26
+ TensorDict,
27
+ TensorDictBase,
28
+ unravel_key,
29
+ )
30
+ from tensordict.base import _default_is_leaf, _is_leaf_nontensor
31
+ from tensordict.nn import TensorDictModule, TensorDictModuleBase
32
+ from tensordict.nn.probabilistic import ( # noqa
33
+ interaction_type as exploration_type,
34
+ InteractionType as ExplorationType,
35
+ set_interaction_type as set_exploration_type,
36
+ )
37
+ from tensordict.utils import is_non_tensor, NestedKey
38
+ from torch import nn as nn
39
+ from torch.utils._pytree import tree_map
40
+ from torchrl._utils import _replace_last, _rng_decorator, logger as torchrl_logger
41
+
42
+ from torchrl.data.tensor_specs import (
43
+ Composite,
44
+ NO_DEFAULT_RL as NO_DEFAULT,
45
+ NonTensor,
46
+ TensorSpec,
47
+ Unbounded,
48
+ )
49
+ from torchrl.data.utils import check_no_exclusive_keys, CloudpickleWrapper
50
+ from torchrl.modules.tensordict_module.exploration import RandomPolicy # noqa
51
+
52
+ __all__ = [
53
+ "exploration_type",
54
+ "set_exploration_type",
55
+ "ExplorationType",
56
+ "check_env_specs",
57
+ "step_mdp",
58
+ "make_composite_from_td",
59
+ "MarlGroupMapType",
60
+ "check_marl_grouping",
61
+ ]
62
+
63
+ ACTION_MASK_ERROR = RuntimeError(
64
+ "An out-of-bounds actions has been provided to an env with an 'action_mask' output. "
65
+ "If you are using a custom policy, make sure to take the action mask into account when computing the output. "
66
+ "If you are using a default policy, please add the torchrl.envs.transforms.ActionMask transform to your environment. "
67
+ "If you are using a ParallelEnv or another batched inventor, "
68
+ "make sure to add the transform to the ParallelEnv (and not to the sub-environments). "
69
+ "For more info on using action masks, see the docs at: "
70
+ "https://pytorch.org/rl/main/reference/envs.html#environments-with-masked-actions"
71
+ )
72
+
73
+
74
+ class _classproperty(property):
75
+ def __get__(self, cls, owner):
76
+ return classmethod(self.fget).__get__(None, owner)()
77
+
78
+
79
+ class _StepMDP:
80
+ """Stateful version of :func:`~torchrl.envs.step_mdp`.
81
+
82
+ Precomputes the list of keys to include and exclude during a call to step_mdp
83
+ to reduce runtime.
84
+
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ env,
90
+ *,
91
+ keep_other: bool = True,
92
+ exclude_reward: bool = True,
93
+ exclude_done: bool = False,
94
+ exclude_action: bool = True,
95
+ ):
96
+ action_keys = env._action_keys_step_mdp
97
+ done_keys = env._done_keys_step_mdp
98
+ reward_keys = env._reward_keys_step_mdp
99
+ observation_keys = env._observation_keys_step_mdp
100
+ state_keys = env._state_keys_step_mdp
101
+ self.action_keys = [unravel_key(key) for key in action_keys]
102
+ self.done_keys = [unravel_key(key) for key in done_keys]
103
+ self.observation_keys = list(observation_keys)
104
+ self.state_keys = list(state_keys)
105
+ self.reward_keys = [unravel_key(key) for key in reward_keys]
106
+ self.reward_keys_filt = list(set(self.reward_keys) - set(self.state_keys))
107
+
108
+ excluded = set()
109
+ if exclude_reward:
110
+ # If a reward is also a state, it must be in the input
111
+ excluded = excluded.union(self.reward_keys_filt)
112
+ if exclude_done:
113
+ excluded = excluded.union(self.done_keys)
114
+ if exclude_action:
115
+ excluded = excluded.union(self.action_keys)
116
+
117
+ self.excluded = [unravel_key(key) for key in excluded]
118
+
119
+ self.keep_other = keep_other
120
+ self.exclude_action = exclude_action
121
+
122
+ self.exclude_from_root = ["next", *self.done_keys]
123
+ self.keys_from_next = list(self.observation_keys)
124
+ if not exclude_reward:
125
+ self.keys_from_next += self.reward_keys
126
+ else:
127
+ self.keys_from_next += [
128
+ reward_key
129
+ for reward_key in self.reward_keys
130
+ if reward_key in self.state_keys
131
+ ]
132
+ if not exclude_done:
133
+ self.keys_from_next += self.done_keys
134
+ self.keys_from_root = []
135
+ if not exclude_action:
136
+ self.keys_from_root += self.action_keys
137
+ else:
138
+ self.exclude_from_root += self.action_keys
139
+ if keep_other:
140
+ self.keys_from_root += self.state_keys
141
+ else:
142
+ self.exclude_from_root += self.state_keys
143
+
144
+ reset_keys = {_replace_last(key, "_reset") for key in self.done_keys}
145
+ self.exclude_from_root += list(reset_keys)
146
+ self.exclude_from_root += self.reward_keys_filt
147
+
148
+ self.exclude_from_root = self._repr_key_list_as_tree(self.exclude_from_root)
149
+ self.keys_from_root = self._repr_key_list_as_tree(self.keys_from_root)
150
+ self.keys_from_next = self._repr_key_list_as_tree(self.keys_from_next)
151
+ self.validated = True
152
+
153
+ # Model based envs can have missing keys
154
+ # TODO: do we want to always allow this? check_env_specs should catch these or downstream ops
155
+ self._allow_absent_keys = True
156
+
157
+ def validate(self, tensordict):
158
+ # Deprecated - leaving dormant
159
+ if self.validated:
160
+ return True
161
+ if self.validated is None:
162
+ # check that the key set of the tensordict matches what is expected
163
+ expected = (
164
+ self.state_keys
165
+ + self.action_keys
166
+ + self.done_keys
167
+ + self.observation_keys
168
+ + [unravel_key(("next", key)) for key in self.observation_keys]
169
+ + [unravel_key(("next", key)) for key in self.done_keys]
170
+ + [unravel_key(("next", key)) for key in self.reward_keys]
171
+ )
172
+
173
+ def _is_reset(key: NestedKey):
174
+ if isinstance(key, str):
175
+ return key == "_reset"
176
+ return key[-1] == "_reset"
177
+
178
+ actual = {
179
+ key
180
+ for key in tensordict.keys(True, True, is_leaf=_is_leaf_nontensor)
181
+ if not _is_reset(key)
182
+ }
183
+ expected = set(expected)
184
+ # Actual (the input td) can have more keys, like loc and scale etc
185
+ # But we cannot have keys missing: if there's a key in expected that is not in actual
186
+ # it is a problem.
187
+ self.validated = expected.intersection(actual) == expected
188
+ if not self.validated:
189
+ warnings.warn(
190
+ "The expected key set and actual key set differ (all expected keys must be present, "
191
+ "extra keys can be present in the input TensorDict). "
192
+ "As a result, step_mdp will need to run extra key checks at each iteration. "
193
+ f"{{Expected keys}}-{{Actual keys}}={set(expected) - actual} (<= this set should be empty), \n"
194
+ f"{{Actual keys}}-{{Expected keys}}={actual - set(expected)}."
195
+ )
196
+ return self.validated
197
+
198
+ @staticmethod
199
+ def _repr_key_list_as_tree(key_list):
200
+ """Represents the keys as a tree to facilitate iteration."""
201
+ if not key_list:
202
+ return {}
203
+ key_dict = {key: torch.zeros((0,)) for key in key_list}
204
+ td = TensorDict(key_dict, batch_size=torch.Size([]))
205
+ return tree_map(lambda x: None, td.to_dict())
206
+
207
+ @classmethod
208
+ def _grab_and_place(
209
+ cls,
210
+ nested_key_dict: dict,
211
+ data_in: TensorDictBase,
212
+ data_out: TensorDictBase,
213
+ _allow_absent_keys: bool,
214
+ ):
215
+ for key, subdict in nested_key_dict.items():
216
+ val = data_in._get_str(key, NO_DEFAULT)
217
+ if subdict is not None:
218
+ val_out = data_out._get_str(key, None)
219
+ if val_out is None or val_out.batch_size != val.batch_size:
220
+ val_out = val.empty(batch_size=val.batch_size)
221
+ if isinstance(val, LazyStackedTensorDict):
222
+
223
+ val = LazyStackedTensorDict.lazy_stack(
224
+ [
225
+ cls._grab_and_place(
226
+ subdict,
227
+ _val,
228
+ _val_out,
229
+ _allow_absent_keys=_allow_absent_keys,
230
+ )
231
+ for (_val, _val_out) in zip(
232
+ val.unbind(val.stack_dim),
233
+ val_out.unbind(val_out.stack_dim),
234
+ )
235
+ ],
236
+ dim=val.stack_dim,
237
+ )
238
+ else:
239
+ val = cls._grab_and_place(
240
+ subdict, val, val_out, _allow_absent_keys=_allow_absent_keys
241
+ )
242
+ if val is NO_DEFAULT:
243
+ if not _allow_absent_keys:
244
+ raise KeyError(f"key {key} not found.")
245
+ else:
246
+ if is_non_tensor(val):
247
+ val = val.clone()
248
+ if is_tensor_collection(val):
249
+ val = val.copy()
250
+ data_out._set_str(
251
+ key, val, validated=True, inplace=False, non_blocking=False
252
+ )
253
+ return data_out
254
+
255
+ @classmethod
256
+ def _exclude(
257
+ cls, nested_key_dict: dict, data_in: TensorDictBase, out: TensorDictBase | None
258
+ ) -> None:
259
+ """Copies the entries if they're not part of the list of keys to exclude."""
260
+ if isinstance(data_in, LazyStackedTensorDict):
261
+ if out is None:
262
+ out = data_in.empty()
263
+ for td, td_out in zip(data_in.tensordicts, out.tensordicts):
264
+ cls._exclude(nested_key_dict, td, td_out)
265
+ return out
266
+ has_set = False
267
+ for key, value in data_in.items(is_leaf=_is_leaf_nontensor):
268
+ subdict = nested_key_dict.get(key, NO_DEFAULT)
269
+ if subdict is NO_DEFAULT:
270
+ value = value.copy() if is_tensor_collection(value) else value
271
+ if not has_set and out is None:
272
+ out = data_in.empty()
273
+ out._set_str(key, value, validated=True, inplace=False)
274
+ has_set = True
275
+ elif subdict is not None:
276
+ value = cls._exclude(subdict, value, None)
277
+ if value is not None:
278
+ if not has_set and out is None:
279
+ out = data_in.empty()
280
+ out._set_str(key, value, validated=True, inplace=False)
281
+ has_set = True
282
+ if has_set:
283
+ return out
284
+
285
+ def __call__(self, tensordict):
286
+ if isinstance(tensordict, LazyStackedTensorDict):
287
+ out = LazyStackedTensorDict.lazy_stack(
288
+ [self.__call__(td) for td in tensordict.tensordicts],
289
+ tensordict.stack_dim,
290
+ )
291
+ return out
292
+ next_td = tensordict._get_str("next", None)
293
+ if self.keep_other:
294
+ out = self._exclude(self.exclude_from_root, tensordict, out=None)
295
+ if out is None:
296
+ out = tensordict.empty()
297
+ else:
298
+ out = next_td.empty()
299
+ self._grab_and_place(
300
+ self.keys_from_root,
301
+ tensordict,
302
+ out,
303
+ _allow_absent_keys=self._allow_absent_keys,
304
+ )
305
+ if isinstance(next_td, LazyStackedTensorDict):
306
+ if not isinstance(out, LazyStackedTensorDict):
307
+ out = LazyStackedTensorDict.lazy_stack(
308
+ list(out.unbind(next_td.stack_dim)), dim=next_td.stack_dim
309
+ )
310
+ for _next_td, _out in zip(next_td.tensordicts, out.tensordicts):
311
+ self._grab_and_place(
312
+ self.keys_from_next,
313
+ _next_td,
314
+ _out,
315
+ _allow_absent_keys=self._allow_absent_keys,
316
+ )
317
+ else:
318
+ self._grab_and_place(
319
+ self.keys_from_next,
320
+ next_td,
321
+ out,
322
+ _allow_absent_keys=self._allow_absent_keys,
323
+ )
324
+ return out
325
+
326
+
327
+ def step_mdp(
328
+ tensordict: TensorDictBase,
329
+ next_tensordict: TensorDictBase = None,
330
+ keep_other: bool = True,
331
+ exclude_reward: bool = True,
332
+ exclude_done: bool = False,
333
+ exclude_action: bool = True,
334
+ reward_keys: NestedKey | list[NestedKey] = "reward",
335
+ done_keys: NestedKey | list[NestedKey] = "done",
336
+ action_keys: NestedKey | list[NestedKey] = "action",
337
+ ) -> TensorDictBase:
338
+ """Creates a new tensordict that reflects a step in time of the input tensordict.
339
+
340
+ Given a tensordict retrieved after a step, returns the :obj:`"next"` indexed-tensordict.
341
+ The arguments allow for precise control over what should be kept and what
342
+ should be copied from the ``"next"`` entry. The default behavior is:
343
+ move the observation entries, reward, and done states to the root, exclude
344
+ the current action, and keep all extra keys (non-action, non-done, non-reward).
345
+
346
+ Args:
347
+ tensordict (TensorDictBase): The tensordict with keys to be renamed.
348
+ next_tensordict (TensorDictBase, optional): The destination tensordict. If `None`, a new tensordict is created.
349
+ keep_other (bool, optional): If ``True``, all keys that do not start with :obj:`'next_'` will be kept.
350
+ Default is ``True``.
351
+ exclude_reward (bool, optional): If ``True``, the :obj:`"reward"` key will be discarded
352
+ from the resulting tensordict. If ``False``, it will be copied (and replaced)
353
+ from the ``"next"`` entry (if present). Default is ``True``.
354
+ exclude_done (bool, optional): If ``True``, the :obj:`"done"` key will be discarded
355
+ from the resulting tensordict. If ``False``, it will be copied (and replaced)
356
+ from the ``"next"`` entry (if present). Default is ``False``.
357
+ exclude_action (bool, optional): If ``True``, the :obj:`"action"` key will
358
+ be discarded from the resulting tensordict. If ``False``, it will
359
+ be kept in the root tensordict (since it should not be present in
360
+ the ``"next"`` entry). Default is ``True``.
361
+ reward_keys (NestedKey or list of NestedKey, optional): The keys where the reward is written. Defaults
362
+ to "reward".
363
+ done_keys (NestedKey or list of NestedKey, optional): The keys where the done is written. Defaults
364
+ to "done".
365
+ action_keys (NestedKey or list of NestedKey, optional): The keys where the action is written. Defaults
366
+ to "action".
367
+
368
+ Returns:
369
+ TensorDictBase: A new tensordict (or `next_tensordict` if provided) containing the tensors of the t+1 step.
370
+
371
+ .. seealso:: :meth:`EnvBase.step_mdp` is the class-based version of this free function. It will attempt to cache the
372
+ key values to reduce the overhead of making a step in the MDP.
373
+
374
+ Examples:
375
+ >>> from tensordict import TensorDict
376
+ >>> import torch
377
+ >>> td = TensorDict({
378
+ ... "done": torch.zeros((), dtype=torch.bool),
379
+ ... "reward": torch.zeros(()),
380
+ ... "extra": torch.zeros(()),
381
+ ... "next": TensorDict({
382
+ ... "done": torch.zeros((), dtype=torch.bool),
383
+ ... "reward": torch.zeros(()),
384
+ ... "obs": torch.zeros(()),
385
+ ... }, []),
386
+ ... "obs": torch.zeros(()),
387
+ ... "action": torch.zeros(()),
388
+ ... }, [])
389
+ >>> print(step_mdp(td))
390
+ TensorDict(
391
+ fields={
392
+ done: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.bool, is_shared=False),
393
+ extra: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
394
+ obs: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False)},
395
+ batch_size=torch.Size([]),
396
+ device=None,
397
+ is_shared=False)
398
+ >>> print(step_mdp(td, exclude_done=True)) # "done" is dropped
399
+ TensorDict(
400
+ fields={
401
+ extra: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
402
+ obs: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False)},
403
+ batch_size=torch.Size([]),
404
+ device=None,
405
+ is_shared=False)
406
+ >>> print(step_mdp(td, exclude_reward=False)) # "reward" is kept
407
+ TensorDict(
408
+ fields={
409
+ done: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.bool, is_shared=False),
410
+ extra: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
411
+ obs: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
412
+ reward: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False)},
413
+ batch_size=torch.Size([]),
414
+ device=None,
415
+ is_shared=False)
416
+ >>> print(step_mdp(td, exclude_action=False)) # "action" persists at the root
417
+ TensorDict(
418
+ fields={
419
+ action: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
420
+ done: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.bool, is_shared=False),
421
+ extra: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
422
+ obs: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False)},
423
+ batch_size=torch.Size([]),
424
+ device=None,
425
+ is_shared=False)
426
+ >>> print(step_mdp(td, keep_other=False)) # "extra" is missing
427
+ TensorDict(
428
+ fields={
429
+ done: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.bool, is_shared=False),
430
+ obs: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False)},
431
+ batch_size=torch.Size([]),
432
+ device=None,
433
+ is_shared=False)
434
+
435
+ .. warning:: This function will not work properly if the reward key is also part of the input key when
436
+ the reward keys are excluded. This is why the :class:`~torchrl.envs.RewardSum` transform registers
437
+ the episode reward in the observation and not the reward spec by default.
438
+ When using the fast, cached version of this function (``_StepMDP``), this issue should not
439
+ be observed.
440
+
441
+ """
442
+ if isinstance(tensordict, LazyStackedTensorDict):
443
+ if next_tensordict is not None:
444
+ next_tensordicts = next_tensordict.unbind(tensordict.stack_dim)
445
+ else:
446
+ next_tensordicts = [None] * len(tensordict.tensordicts)
447
+ out = LazyStackedTensorDict.lazy_stack(
448
+ [
449
+ step_mdp(
450
+ td,
451
+ next_tensordict=ntd,
452
+ keep_other=keep_other,
453
+ exclude_reward=exclude_reward,
454
+ exclude_done=exclude_done,
455
+ exclude_action=exclude_action,
456
+ reward_keys=reward_keys,
457
+ done_keys=done_keys,
458
+ action_keys=action_keys,
459
+ )
460
+ for td, ntd in zip(tensordict.tensordicts, next_tensordicts)
461
+ ],
462
+ tensordict.stack_dim,
463
+ )
464
+ if next_tensordict is not None:
465
+ next_tensordict.update(out)
466
+ return next_tensordict
467
+ return out
468
+
469
+ if not isinstance(action_keys, list):
470
+ action_keys = [action_keys]
471
+ if not isinstance(done_keys, list):
472
+ done_keys = [done_keys]
473
+ if not isinstance(reward_keys, list):
474
+ reward_keys = [reward_keys]
475
+
476
+ excluded = set()
477
+ if exclude_reward:
478
+ excluded = excluded.union(reward_keys)
479
+ if exclude_done:
480
+ excluded = excluded.union(done_keys)
481
+ if exclude_action:
482
+ excluded = excluded.union(action_keys)
483
+ next_td = tensordict.get("next")
484
+ out = next_td.empty()
485
+
486
+ total_key = ()
487
+ if keep_other:
488
+ for key in tensordict.keys():
489
+ if key != "next":
490
+ _set(tensordict, out, key, total_key, excluded)
491
+ elif not exclude_action:
492
+ for action_key in action_keys:
493
+ _set_single_key(tensordict, out, action_key)
494
+ for key in next_td.keys():
495
+ _set(next_td, out, key, total_key, excluded)
496
+ if next_tensordict is not None:
497
+ return next_tensordict.update(out)
498
+ else:
499
+ return out
500
+
501
+
502
+ def _set_single_key(
503
+ source: TensorDictBase,
504
+ dest: TensorDictBase,
505
+ key: str | tuple,
506
+ clone: bool = False,
507
+ device=None,
508
+ ):
509
+ # key should be already unraveled
510
+ if isinstance(key, str):
511
+ key = (key,)
512
+ for k in key:
513
+ # TODO: we can do better than try/except by leveraging the as_list / as_nested_tensor feature
514
+ try:
515
+ val = source._get_str(k, None)
516
+ if is_tensor_collection(val):
517
+ new_val = dest._get_str(k, None)
518
+ if new_val is None:
519
+ new_val = val.empty()
520
+ dest._set_str(
521
+ k, new_val, inplace=False, validated=True, non_blocking=False
522
+ )
523
+ source = val
524
+ dest = new_val
525
+ else:
526
+ if device is not None and val.device != device:
527
+ val = val.to(device, non_blocking=True)
528
+ elif clone:
529
+ val = val.clone()
530
+ dest._set_str(k, val, inplace=False, validated=True, non_blocking=False)
531
+ # This is a temporary solution to understand if a key is heterogeneous
532
+ # while not having performance impact when the exception is not raised
533
+ except RuntimeError as err:
534
+ if re.match(r"Failed to stack tensors within a tensordict", str(err)):
535
+ # this is a het key
536
+ for s_td, d_td in zip(source.tensordicts, dest.tensordicts):
537
+ _set_single_key(s_td, d_td, k, clone=clone, device=device)
538
+ break
539
+ else:
540
+ raise err
541
+
542
+
543
+ def _set(source, dest, key, total_key, excluded):
544
+ total_key = total_key + (key,)
545
+ non_empty = False
546
+ if unravel_key(total_key) not in excluded:
547
+ # TODO: we can do better than try/except by leveraging the as_list / as_nested_tensor feature
548
+ try:
549
+ val = source.get(key)
550
+ if is_tensor_collection(val) and not isinstance(
551
+ val, (NonTensorData, NonTensorStack)
552
+ ):
553
+ # if val is a tensordict we need to copy the structure
554
+ new_val = dest.get(key, None)
555
+ if new_val is None:
556
+ new_val = val.empty()
557
+ non_empty_local = False
558
+ for subkey in val.keys():
559
+ non_empty_local = (
560
+ _set(val, new_val, subkey, total_key, excluded)
561
+ or non_empty_local
562
+ )
563
+ if non_empty_local:
564
+ # dest.set(key, new_val)
565
+ dest._set_str(
566
+ key, new_val, inplace=False, validated=True, non_blocking=False
567
+ )
568
+ non_empty = non_empty_local
569
+ else:
570
+ non_empty = True
571
+ # dest.set(key, val)
572
+ dest._set_str(
573
+ key, val, inplace=False, validated=True, non_blocking=False
574
+ )
575
+ # This is a temporary solution to understand if a key is heterogeneous
576
+ # while not having performance impact when the exception is not raised
577
+ except RuntimeError as err:
578
+ if re.match(r"Failed to stack tensors within a tensordict", str(err)):
579
+ # this is a het key
580
+ non_empty_local = False
581
+ for s_td, d_td in zip(source.tensordicts, dest.tensordicts):
582
+ non_empty_local = (
583
+ _set(s_td, d_td, key, total_key, excluded) or non_empty_local
584
+ )
585
+ non_empty = non_empty_local
586
+ else:
587
+ raise err
588
+
589
+ return non_empty
590
+
591
+
592
+ def get_available_libraries():
593
+ """Returns all the supported libraries."""
594
+ return SUPPORTED_LIBRARIES
595
+
596
+
597
+ def _check_gym():
598
+ """Returns True if the gym library is installed."""
599
+ return importlib.util.find_spec("gym") is not None
600
+
601
+
602
+ def _check_gym_atari():
603
+ """Returns True if the gym library is installed and atari envs can be found."""
604
+ if not _check_gym():
605
+ return False
606
+ return importlib.util.find_spec("atari-py") is not None
607
+
608
+
609
+ def _check_mario():
610
+ """Returns True if the "gym-super-mario-bros" library is installed."""
611
+ return importlib.util.find_spec("gym-super-mario-bros") is not None
612
+
613
+
614
+ def _check_dmcontrol():
615
+ """Returns True if the "dm-control" library is installed."""
616
+ return importlib.util.find_spec("dm_control") is not None
617
+
618
+
619
+ def _check_dmlab():
620
+ """Returns True if the "deepmind-lab" library is installed."""
621
+ return importlib.util.find_spec("deepmind_lab") is not None
622
+
623
+
624
+ SUPPORTED_LIBRARIES = {
625
+ "gym": _check_gym(), # OpenAI
626
+ "gym[atari]": _check_gym_atari(), #
627
+ "dm_control": _check_dmcontrol(),
628
+ "habitat": None,
629
+ "gym-super-mario-bros": _check_mario(),
630
+ # "vizdoom": None, # gym based, https://github.com/mwydmuch/ViZDoom
631
+ # "openspiel": None, # DM, https://github.com/deepmind/open_spiel
632
+ # "pysc2": None, # DM, https://github.com/deepmind/pysc2
633
+ # "deepmind_lab": _check_dmlab(),
634
+ # DM, https://github.com/deepmind/lab, https://github.com/deepmind/lab/tree/master/python/pip_package
635
+ # "serpent.ai": None, # https://github.com/SerpentAI/SerpentAI
636
+ # "gfootball": None, # 2.8k G, https://github.com/google-research/football
637
+ # DM, https://github.com/deepmind/dm_control
638
+ # FB, https://github.com/facebookresearch/habitat-sim
639
+ # "meta-world": None, # https://github.com/rlworkgroup/metaworld
640
+ # "minerl": None, # https://github.com/minerllabs/minerl
641
+ # "multi-agent-emergence-environments": None,
642
+ # OpenAI, https://github.com/openai/multi-agent-emergence-environments
643
+ # "procgen": None, # OpenAI, https://github.com/openai/procgen
644
+ # "pybullet": None, # https://github.com/benelot/pybullet-gym
645
+ # "realworld_rl_suite": None,
646
+ # G, https://github.com/google-research/realworldrl_suite
647
+ # "rlcard": None, # https://github.com/datamllab/rlcard
648
+ # "screeps": None, # https://github.com/screeps/screeps
649
+ # "ml-agents": None,
650
+ }
651
+
652
+
653
+ def _per_level_env_check(data0, data1, check_dtype):
654
+ """Checks shape and dtype of two tensordicts, accounting for lazy stacks."""
655
+ if isinstance(data0, LazyStackedTensorDict):
656
+ for _data0, _data1 in zip(data0.tensordicts, data1.unbind(data0.stack_dim)):
657
+ _per_level_env_check(_data0, _data1, check_dtype=check_dtype)
658
+ return
659
+ if isinstance(data1, LazyStackedTensorDict):
660
+ for _data0, _data1 in zip(data0.unbind(data1.stack_dim), data1.tensordicts):
661
+ _per_level_env_check(_data0, _data1, check_dtype=check_dtype)
662
+ return
663
+ else:
664
+ keys0 = set(data0.keys())
665
+ keys1 = set(data1.keys())
666
+ if keys0 != keys1:
667
+ raise AssertionError(f"Keys mismatch: {keys0} vs {keys1}")
668
+ for key in keys0:
669
+ _data0 = data0[key]
670
+ _data1 = data1[key]
671
+ if _data0.shape != _data1.shape:
672
+ raise AssertionError(
673
+ f"The shapes of the real and fake tensordict don't match for key {key}. "
674
+ f"Got fake={_data0.shape} and real={_data1.shape}."
675
+ )
676
+ if isinstance(_data0, TensorDictBase):
677
+ _per_level_env_check(_data0, _data1, check_dtype=check_dtype)
678
+ else:
679
+ if check_dtype and (_data0.dtype != _data1.dtype):
680
+ raise AssertionError(
681
+ f"The dtypes of the real and fake tensordict don't match for key {key}. "
682
+ f"Got fake={_data0.dtype} and real={_data1.dtype}."
683
+ )
684
+
685
+
686
+ def check_env_specs(
687
+ env: torchrl.envs.EnvBase, # noqa
688
+ return_contiguous: bool | None = None,
689
+ check_dtype=True,
690
+ seed: int | None = None,
691
+ tensordict: TensorDictBase | None = None,
692
+ break_when_any_done: bool | Literal["both"] | None = None,
693
+ ):
694
+ """Tests an environment specs against the results of short rollout.
695
+
696
+ This test function should be used as a sanity check for an env wrapped with
697
+ torchrl's EnvBase subclasses: any discrepancy between the expected data and
698
+ the data collected should raise an assertion error.
699
+
700
+ A broken environment spec will likely make it impossible to use parallel
701
+ environments.
702
+
703
+ Args:
704
+ env (EnvBase): the env for which the specs have to be checked against data.
705
+ return_contiguous (bool, optional): if ``True``, the random rollout will be called with
706
+ return_contiguous=True. This will fail in some cases (e.g. heterogeneous shapes
707
+ of inputs/outputs). Defaults to ``None`` (determined by the presence of dynamic specs).
708
+ check_dtype (bool, optional): if False, dtype checks will be skipped.
709
+ Defaults to `True`.
710
+ seed (int, optional): for reproducibility, a seed can be set.
711
+ The seed will be set in pytorch temporarily, then the RNG state will
712
+ be reverted to what it was before. For the env, we set the seed but since
713
+ setting the rng state back to what is was isn't a feature of most environment,
714
+ we leave it to the user to accomplish that.
715
+ Defaults to ``None``.
716
+ tensordict (TensorDict, optional): an optional tensordict instance to use for reset.
717
+ break_when_any_done (bool or str, optional): value for ``break_when_any_done`` in :meth:`~torchrl.envs.EnvBase.rollout`.
718
+ If ``"both"``, the test is run on both `True` and `False`.
719
+
720
+ Caution: this function resets the env seed. It should be used "offline" to
721
+ check that an env is adequately constructed, but it may affect the seeding
722
+ of an experiment and as such should be kept out of training scripts.
723
+
724
+ """
725
+ if return_contiguous is None:
726
+ return_contiguous = not env._has_dynamic_specs
727
+ if break_when_any_done == "both":
728
+ check_env_specs(
729
+ env,
730
+ return_contiguous=return_contiguous,
731
+ check_dtype=check_dtype,
732
+ seed=seed,
733
+ tensordict=tensordict,
734
+ break_when_any_done=True,
735
+ )
736
+ return check_env_specs(
737
+ env,
738
+ return_contiguous=return_contiguous,
739
+ check_dtype=check_dtype,
740
+ seed=seed,
741
+ tensordict=tensordict,
742
+ break_when_any_done=False,
743
+ )
744
+ if seed is not None:
745
+ device = (
746
+ env.device if env.device is not None and env.device.type == "cuda" else None
747
+ )
748
+ with _rng_decorator(seed, device=device):
749
+ env.set_seed(seed)
750
+ return check_env_specs(
751
+ env, return_contiguous=return_contiguous, check_dtype=check_dtype
752
+ )
753
+
754
+ fake_tensordict = env.fake_tensordict()
755
+ if not env.batch_locked and tensordict is not None:
756
+ shape = torch.broadcast_shapes(fake_tensordict.shape, tensordict.shape)
757
+ fake_tensordict = fake_tensordict.expand(shape)
758
+ tensordict = tensordict.expand(shape)
759
+ real_tensordict = env.rollout(
760
+ 3,
761
+ return_contiguous=return_contiguous,
762
+ tensordict=tensordict,
763
+ auto_reset=tensordict is None,
764
+ break_when_any_done=break_when_any_done,
765
+ )
766
+
767
+ if return_contiguous:
768
+ fake_tensordict = fake_tensordict.unsqueeze(real_tensordict.batch_dims - 1)
769
+ fake_tensordict = fake_tensordict.expand(*real_tensordict.shape)
770
+ else:
771
+ fake_tensordict = LazyStackedTensorDict.lazy_stack(
772
+ [fake_tensordict.clone() for _ in range(3)], -1
773
+ )
774
+ # eliminate empty containers
775
+ fake_tensordict_select = fake_tensordict.select(
776
+ *fake_tensordict.keys(True, True, is_leaf=_default_is_leaf)
777
+ )
778
+ real_tensordict_select = real_tensordict.select(
779
+ *real_tensordict.keys(True, True, is_leaf=_default_is_leaf)
780
+ )
781
+ # check keys
782
+ fake_tensordict_keys = set(
783
+ fake_tensordict.keys(True, True, is_leaf=_is_leaf_nontensor)
784
+ )
785
+ real_tensordict_keys = set(
786
+ real_tensordict.keys(True, True, is_leaf=_is_leaf_nontensor)
787
+ )
788
+ if fake_tensordict_keys != real_tensordict_keys:
789
+ keys_in_real_not_in_fake = real_tensordict_keys - fake_tensordict_keys
790
+ keys_in_fake_not_in_real = fake_tensordict_keys - real_tensordict_keys
791
+ raise AssertionError(
792
+ f"""The keys of the specs and data do not match:
793
+ - List of keys present in real but not in fake: {keys_in_real_not_in_fake=},
794
+ - List of keys present in fake but not in real: {keys_in_fake_not_in_real=}.
795
+ """
796
+ )
797
+
798
+ def zeroing_err_msg():
799
+ return (
800
+ "zeroing the two tensordicts did not make them identical. "
801
+ f"Check for discrepancies:\nFake=\n{fake_tensordict}\nReal=\n{real_tensordict}"
802
+ )
803
+
804
+ from torchrl.envs.common import _has_dynamic_specs
805
+
806
+ if _has_dynamic_specs(env.specs):
807
+ for real, fake in zip(
808
+ real_tensordict_select.filter_non_tensor_data().unbind(-1),
809
+ fake_tensordict_select.filter_non_tensor_data().unbind(-1),
810
+ ):
811
+
812
+ def expand(name, x, y):
813
+ try:
814
+ return x.expand_as(y)
815
+ except Exception as e:
816
+ raise RuntimeError(
817
+ f"Failed to expand fake tensor {name} with shape {x.shape} to real shape {y.shape}"
818
+ ) from e
819
+
820
+ fake = fake.apply(expand, real, named=True, nested_keys=True)
821
+ if (torch.zeros_like(real) != torch.zeros_like(fake)).any():
822
+ raise AssertionError(zeroing_err_msg())
823
+
824
+ # Checks shapes and eventually dtypes of keys at all nesting levels
825
+ _per_level_env_check(fake, real, check_dtype=check_dtype)
826
+
827
+ else:
828
+ if (
829
+ torch.zeros_like(fake_tensordict_select)
830
+ != torch.zeros_like(real_tensordict_select)
831
+ ).any():
832
+ raise AssertionError(zeroing_err_msg())
833
+
834
+ # Checks shapes and eventually dtypes of keys at all nesting levels
835
+ _per_level_env_check(
836
+ fake_tensordict_select, real_tensordict_select, check_dtype=check_dtype
837
+ )
838
+
839
+ # Check specs
840
+ last_td = real_tensordict[..., -1]
841
+ last_td = env.rand_action(last_td)
842
+ full_action_spec = env.input_spec["full_action_spec"]
843
+ full_state_spec = env.input_spec["full_state_spec"]
844
+ full_observation_spec = env.output_spec["full_observation_spec"]
845
+ full_reward_spec = env.output_spec["full_reward_spec"]
846
+ full_done_spec = env.output_spec["full_done_spec"]
847
+ for name, spec in (
848
+ ("action", full_action_spec),
849
+ ("state", full_state_spec),
850
+ ("done", full_done_spec),
851
+ ("obs", full_observation_spec),
852
+ ):
853
+ if not check_no_exclusive_keys(spec):
854
+ raise AssertionError(
855
+ "It appears you are using some StackedComposite specs with exclusive keys "
856
+ "(keys present in some but not all of the stacked specs). To use such heterogeneous specs, "
857
+ "you will need to first pass your stack through `torchrl.data.consolidate_spec`."
858
+ )
859
+ if spec is None:
860
+ spec = Composite(shape=env.batch_size, device=env.device)
861
+ td = last_td.select(*spec.keys(True, True), strict=True)
862
+ if not spec.contains(td):
863
+ raise AssertionError(
864
+ f"spec check failed at root for spec {name}={spec} and data {td}."
865
+ )
866
+ for name, spec in (
867
+ ("reward", full_reward_spec),
868
+ ("done", full_done_spec),
869
+ ("obs", full_observation_spec),
870
+ ):
871
+ if spec is None:
872
+ spec = Composite(shape=env.batch_size, device=env.device)
873
+ td = last_td.get("next").select(*spec.keys(True, True), strict=True)
874
+ if not spec.contains(td):
875
+ raise AssertionError(
876
+ f"spec check failed at root for spec {name}={spec} and data {td}."
877
+ )
878
+
879
+ torchrl_logger.info("check_env_specs succeeded!")
880
+
881
+
882
+ def _selective_unsqueeze(tensor: torch.Tensor, batch_size: torch.Size, dim: int = -1):
883
+ shape_len = len(tensor.shape)
884
+
885
+ if shape_len < len(batch_size):
886
+ raise RuntimeError(
887
+ f"Tensor has less dims than batch_size. shape:{tensor.shape}, batch_size: {batch_size}"
888
+ )
889
+ if tensor.shape[: len(batch_size)] != batch_size:
890
+ raise RuntimeError(
891
+ f"Tensor does not have given batch_size. shape:{tensor.shape}, batch_size: {batch_size}"
892
+ )
893
+
894
+ if shape_len == len(batch_size):
895
+ return tensor.unsqueeze(dim=dim)
896
+ return tensor
897
+
898
+
899
+ def _sort_keys(element):
900
+ if isinstance(element, tuple):
901
+ element = unravel_key(element)
902
+ return "_-|-_".join(element)
903
+ return element
904
+
905
+
906
+ def make_composite_from_td(
907
+ data, *, unsqueeze_null_shapes: bool = True, dynamic_shape: bool = False
908
+ ):
909
+ """Creates a Composite instance from a tensordict, assuming all values are unbounded.
910
+
911
+ Args:
912
+ data (tensordict.TensorDict): a tensordict to be mapped onto a Composite.
913
+
914
+ Keyword Args:
915
+ unsqueeze_null_shapes (bool, optional): if ``True``, every empty shape will be
916
+ unsqueezed to (1,). Defaults to ``True``.
917
+ dynamic_shape (bool, optional): if ``True``, all tensors will be assumed to have a dynamic shape
918
+ along the last dimension. Defaults to ``False``.
919
+
920
+ Examples:
921
+ >>> from tensordict import TensorDict
922
+ >>> data = TensorDict({
923
+ ... "obs": torch.randn(3),
924
+ ... "action": torch.zeros(2, dtype=torch.int),
925
+ ... "next": {"obs": torch.randn(3), "reward": torch.randn(1)}
926
+ ... }, [])
927
+ >>> spec = make_composite_from_td(data)
928
+ >>> print(spec)
929
+ Composite(
930
+ obs: UnboundedContinuous(
931
+ shape=torch.Size([3]), space=None, device=cpu, dtype=torch.float32, domain=continuous),
932
+ action: UnboundedContinuous(
933
+ shape=torch.Size([2]), space=None, device=cpu, dtype=torch.int32, domain=continuous),
934
+ next: Composite(
935
+ obs: UnboundedContinuous(
936
+ shape=torch.Size([3]), space=None, device=cpu, dtype=torch.float32, domain=continuous),
937
+ reward: UnboundedContinuous(
938
+ shape=torch.Size([1]), space=ContinuousBox(low=Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, contiguous=True), high=Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, contiguous=True)), device=cpu, dtype=torch.float32, domain=continuous), device=cpu, shape=torch.Size([])), device=cpu, shape=torch.Size([]))
939
+ >>> assert (spec.zero() == data.zero_()).all()
940
+ """
941
+ # custom function to convert a tensordict in a similar spec structure
942
+ # of unbounded values.
943
+ def make_shape(shape):
944
+ if shape or not unsqueeze_null_shapes:
945
+ if dynamic_shape and shape:
946
+ return shape[:-1] + (-1,)
947
+ else:
948
+ return shape
949
+ return torch.Size([1])
950
+
951
+ composite = Composite(
952
+ {
953
+ key: make_composite_from_td(
954
+ tensor,
955
+ unsqueeze_null_shapes=unsqueeze_null_shapes,
956
+ dynamic_shape=dynamic_shape,
957
+ )
958
+ if is_tensor_collection(tensor) and not is_non_tensor(tensor)
959
+ else NonTensor(
960
+ shape=tensor.shape,
961
+ # Assume all the non-tensors have the same datatype
962
+ example_data=tensor.view(-1)[0].data,
963
+ device=tensor.device,
964
+ feature_dims=len(tensor.shape) - len(data.shape),
965
+ )
966
+ if is_non_tensor(tensor)
967
+ else Unbounded(
968
+ dtype=tensor.dtype, device=tensor.device, shape=make_shape(tensor.shape)
969
+ )
970
+ for key, tensor in data.items()
971
+ },
972
+ shape=data.shape,
973
+ data_cls=type(data),
974
+ )
975
+ return composite
976
+
977
+
978
+ @contextlib.contextmanager
979
+ def clear_mpi_env_vars():
980
+ """Clears the MPI of environment variables.
981
+
982
+ `from mpi4py import MPI` will call `MPI_Init` by default.
983
+ If the child process has MPI environment variables, MPI will think that the child process
984
+ is an MPI process just like the parent and do bad things such as hang.
985
+
986
+ This context manager is a hacky way to clear those environment variables
987
+ temporarily such as when we are starting multiprocessing Processes.
988
+
989
+ Yields:
990
+ Yields for the context manager
991
+ """
992
+ removed_environment = {}
993
+ for k, v in list(os.environ.items()):
994
+ for prefix in ["OMPI_", "PMI_"]:
995
+ if k.startswith(prefix):
996
+ removed_environment[k] = v
997
+ del os.environ[k]
998
+ try:
999
+ yield
1000
+ finally:
1001
+ os.environ.update(removed_environment)
1002
+
1003
+
1004
+ class MarlGroupMapType(Enum):
1005
+ """Marl Group Map Type.
1006
+
1007
+ As a feature of torchrl multiagent, you are able to control the grouping of agents in your environment.
1008
+ You can group agents together (stacking their tensors) to leverage vectorization when passing them through the same
1009
+ neural network. You can split agents in different groups where they are heterogenous or should be processed by
1010
+ different neural networks. To group, you just need to pass a ``group_map`` at env constructiuon time.
1011
+
1012
+ Otherwise, you can choose one of the premade grouping strategies from this class.
1013
+
1014
+ - With ``group_map=MarlGroupMapType.ALL_IN_ONE_GROUP`` and
1015
+ agents ``["agent_0", "agent_1", "agent_2", "agent_3"]``,
1016
+ the tensordicts coming and going from your environment will look
1017
+ something like:
1018
+
1019
+ >>> print(env.rand_action(env.reset()))
1020
+ TensorDict(
1021
+ fields={
1022
+ agents: TensorDict(
1023
+ fields={
1024
+ action: Tensor(shape=torch.Size([4, 9]), device=cpu, dtype=torch.int64, is_shared=False),
1025
+ done: Tensor(shape=torch.Size([4, 1]), device=cpu, dtype=torch.bool, is_shared=False),
1026
+ observation: Tensor(shape=torch.Size([4, 3, 3, 2]), device=cpu, dtype=torch.int8, is_shared=False)},
1027
+ batch_size=torch.Size([4]))},
1028
+ batch_size=torch.Size([]))
1029
+ >>> print(env.group_map)
1030
+ {"agents": ["agent_0", "agent_1", "agent_2", "agent_3]}
1031
+
1032
+ - With ``group_map=MarlGroupMapType.ONE_GROUP_PER_AGENT`` and
1033
+ agents ``["agent_0", "agent_1", "agent_2", "agent_3"]``,
1034
+ the tensordicts coming and going from your environment will look
1035
+ something like:
1036
+
1037
+ >>> print(env.rand_action(env.reset()))
1038
+ TensorDict(
1039
+ fields={
1040
+ agent_0: TensorDict(
1041
+ fields={
1042
+ action: Tensor(shape=torch.Size([9]), device=cpu, dtype=torch.int64, is_shared=False),
1043
+ done: Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.bool, is_shared=False),
1044
+ observation: Tensor(shape=torch.Size([3, 3, 2]), device=cpu, dtype=torch.int8, is_shared=False)},
1045
+ batch_size=torch.Size([]))},
1046
+ agent_1: TensorDict(
1047
+ fields={
1048
+ action: Tensor(shape=torch.Size([9]), device=cpu, dtype=torch.int64, is_shared=False),
1049
+ done: Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.bool, is_shared=False),
1050
+ observation: Tensor(shape=torch.Size([3, 3, 2]), device=cpu, dtype=torch.int8, is_shared=False)},
1051
+ batch_size=torch.Size([]))},
1052
+ agent_2: TensorDict(
1053
+ fields={
1054
+ action: Tensor(shape=torch.Size([9]), device=cpu, dtype=torch.int64, is_shared=False),
1055
+ done: Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.bool, is_shared=False),
1056
+ observation: Tensor(shape=torch.Size([3, 3, 2]), device=cpu, dtype=torch.int8, is_shared=False)},
1057
+ batch_size=torch.Size([]))},
1058
+ agent_3: TensorDict(
1059
+ fields={
1060
+ action: Tensor(shape=torch.Size([9]), device=cpu, dtype=torch.int64, is_shared=False),
1061
+ done: Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.bool, is_shared=False),
1062
+ observation: Tensor(shape=torch.Size([3, 3, 2]), device=cpu, dtype=torch.int8, is_shared=False)},
1063
+ batch_size=torch.Size([]))},
1064
+ batch_size=torch.Size([]))
1065
+ >>> print(env.group_map)
1066
+ {"agent_0": ["agent_0"], "agent_1": ["agent_1"], "agent_2": ["agent_2"], "agent_3": ["agent_3"]}
1067
+ """
1068
+
1069
+ ALL_IN_ONE_GROUP = 1
1070
+ ONE_GROUP_PER_AGENT = 2
1071
+
1072
+ def get_group_map(self, agent_names: list[str]):
1073
+ if self == MarlGroupMapType.ALL_IN_ONE_GROUP:
1074
+ return {"agents": agent_names}
1075
+ elif self == MarlGroupMapType.ONE_GROUP_PER_AGENT:
1076
+ return {agent_name: [agent_name] for agent_name in agent_names}
1077
+
1078
+
1079
+ def check_marl_grouping(group_map: dict[str, list[str]], agent_names: list[str]):
1080
+ """Check MARL group map.
1081
+
1082
+ Performs checks on the group map of a marl environment to assess its validity.
1083
+ Raises an error in cas of an invalid group_map.
1084
+
1085
+ Args:
1086
+ group_map (Dict[str, List[str]]): the group map mapping group names to list of agent names in the group
1087
+ agent_names (List[str]): a list of all the agent names in the environment4
1088
+
1089
+ Examples:
1090
+ >>> from torchrl.envs.utils import MarlGroupMapType, check_marl_grouping
1091
+ >>> agent_names = ["agent_0", "agent_1", "agent_2"]
1092
+ >>> check_marl_grouping(MarlGroupMapType.ALL_IN_ONE_GROUP.get_group_map(agent_names), agent_names)
1093
+
1094
+ """
1095
+ n_agents = len(agent_names)
1096
+ if n_agents == 0:
1097
+ raise ValueError("No agents passed")
1098
+ if len(set(agent_names)) != n_agents:
1099
+ raise ValueError("There are agents with the same name")
1100
+ if len(group_map.keys()) > n_agents:
1101
+ raise ValueError(
1102
+ f"Number of groups {len(group_map.keys())} greater than number of agents {n_agents}"
1103
+ )
1104
+ found_agents = {agent_name: False for agent_name in agent_names}
1105
+ for group_name, group in group_map.items():
1106
+ if not len(group):
1107
+ raise ValueError(f"Group {group_name} is empty")
1108
+ for agent_name in group:
1109
+ if agent_name not in found_agents:
1110
+ raise ValueError(f"Agent {agent_name} wasn't present in environment")
1111
+ if not found_agents[agent_name]:
1112
+ found_agents[agent_name] = True
1113
+ else:
1114
+ raise ValueError(f"Agent {agent_name} present more than once")
1115
+ for agent_name, found in found_agents.items():
1116
+ if not found:
1117
+ raise ValueError(f"Agent {agent_name} wasn't found in any group")
1118
+
1119
+
1120
+ def _terminated_or_truncated(
1121
+ data: TensorDictBase,
1122
+ full_done_spec: TensorSpec | None = None,
1123
+ key: str | None = "_reset",
1124
+ write_full_false: bool = False,
1125
+ ) -> bool:
1126
+ """Reads the done / terminated / truncated keys within a tensordict, and writes a new tensor where the values of both signals are aggregated.
1127
+
1128
+ The modification occurs in-place within the TensorDict instance provided.
1129
+ This function can be used to compute the `"_reset"` signals in batched
1130
+ or multiagent settings, hence the default name of the output key.
1131
+
1132
+ Args:
1133
+ data (TensorDictBase): the input data, generally resulting from a call
1134
+ to :meth:`~torchrl.envs.EnvBase.step`.
1135
+ full_done_spec (TensorSpec, optional): the done_spec from the env,
1136
+ indicating where the done leaves have to be found.
1137
+ If not provided, the default
1138
+ ``"done"``, ``"terminated"`` and ``"truncated"`` entries will be
1139
+ searched for in the data.
1140
+ key (NestedKey, optional): where the aggregated result should be written.
1141
+ If ``None``, then the function will not write any key but just output
1142
+ whether any of the done values was true.
1143
+
1144
+ .. note:: if a value is already present for the ``key`` entry,
1145
+ the previous value will prevail and no update will be achieved.
1146
+
1147
+ write_full_false (bool, optional): if ``True``, the reset keys will be
1148
+ written even if the output is ``False`` (ie, no done is ``True``
1149
+ in the provided data structure).
1150
+ Defaults to ``False``.
1151
+
1152
+ Returns: a boolean value indicating whether any of the done states found in the data
1153
+ contained a ``True``.
1154
+
1155
+ Examples:
1156
+ >>> from torchrl.data.tensor_specs import Categorical
1157
+ >>> from tensordict import TensorDict
1158
+ >>> spec = Composite(
1159
+ ... done=Categorical(2, dtype=torch.bool),
1160
+ ... truncated=Categorical(2, dtype=torch.bool),
1161
+ ... nested=Composite(
1162
+ ... done=Categorical(2, dtype=torch.bool),
1163
+ ... truncated=Categorical(2, dtype=torch.bool),
1164
+ ... )
1165
+ ... )
1166
+ >>> data = TensorDict({
1167
+ ... "done": True, "truncated": False,
1168
+ ... "nested": {"done": False, "truncated": True}},
1169
+ ... batch_size=[]
1170
+ ... )
1171
+ >>> data = _terminated_or_truncated(data, spec)
1172
+ >>> print(data["_reset"])
1173
+ tensor(True)
1174
+ >>> print(data["nested", "_reset"])
1175
+ tensor(True)
1176
+ """
1177
+ list_of_keys = []
1178
+
1179
+ def inner_terminated_or_truncated(data, full_done_spec, key, curr_done_key=()):
1180
+ any_eot = False
1181
+ aggregate = None
1182
+ if full_done_spec is None:
1183
+ tds = {}
1184
+ found_leaf = 0
1185
+ for eot_key, item in data.items():
1186
+ if eot_key in ("terminated", "truncated", "done"):
1187
+ done = item
1188
+ if aggregate is None:
1189
+ aggregate = False
1190
+ aggregate = aggregate | done
1191
+ found_leaf += 1
1192
+ elif isinstance(item, TensorDictBase):
1193
+ tds[eot_key] = item
1194
+ # The done signals in a root td prevail over done in the leaves
1195
+ if tds:
1196
+ for eot_key, item in tds.items():
1197
+ any_eot_td = inner_terminated_or_truncated(
1198
+ data=item,
1199
+ full_done_spec=None,
1200
+ key=key,
1201
+ curr_done_key=curr_done_key + (eot_key,),
1202
+ )
1203
+ if not found_leaf:
1204
+ any_eot = any_eot | any_eot_td
1205
+ else:
1206
+ composite_spec = {}
1207
+ found_leaf = 0
1208
+ for eot_key, item in full_done_spec.items():
1209
+ if isinstance(item, Composite):
1210
+ composite_spec[eot_key] = item
1211
+ else:
1212
+ found_leaf += 1
1213
+ stop = data.get(eot_key, None)
1214
+ if stop is None:
1215
+ stop = torch.zeros(
1216
+ (*data.shape, 1), dtype=torch.bool, device=data.device
1217
+ )
1218
+ if aggregate is None:
1219
+ aggregate = False
1220
+ aggregate = aggregate | stop
1221
+ # The done signals in a root td prevail over done in the leaves
1222
+ if composite_spec:
1223
+ for eot_key, item in composite_spec.items():
1224
+ any_eot_td = inner_terminated_or_truncated(
1225
+ data=data.get(eot_key),
1226
+ full_done_spec=item,
1227
+ key=key,
1228
+ curr_done_key=curr_done_key + (eot_key,),
1229
+ )
1230
+ if not found_leaf:
1231
+ any_eot = any_eot_td | any_eot
1232
+
1233
+ if aggregate is not None:
1234
+ if key is not None:
1235
+ data.set(key, aggregate)
1236
+ list_of_keys.append(curr_done_key + (key,))
1237
+ any_eot = any_eot | aggregate.any()
1238
+ return any_eot
1239
+
1240
+ any_eot = inner_terminated_or_truncated(data, full_done_spec, key)
1241
+ if not any_eot and not write_full_false:
1242
+ # remove the list of reset keys
1243
+ data.exclude(*list_of_keys, inplace=True)
1244
+ return any_eot
1245
+
1246
+
1247
+ def terminated_or_truncated(
1248
+ data: TensorDictBase,
1249
+ full_done_spec: TensorSpec | None = None,
1250
+ key: str = "_reset",
1251
+ write_full_false: bool = False,
1252
+ ) -> bool:
1253
+ """Reads the done / terminated / truncated keys within a tensordict, and writes a new tensor where the values of both signals are aggregated.
1254
+
1255
+ The modification occurs in-place within the TensorDict instance provided.
1256
+ This function can be used to compute the `"_reset"` signals in batched
1257
+ or multiagent settings, hence the default name of the output key.
1258
+
1259
+ Args:
1260
+ data (TensorDictBase): the input data, generally resulting from a call
1261
+ to :meth:`~torchrl.envs.EnvBase.step`.
1262
+ full_done_spec (TensorSpec, optional): the done_spec from the env,
1263
+ indicating where the done leaves have to be found.
1264
+ If not provided, the default
1265
+ ``"done"``, ``"terminated"`` and ``"truncated"`` entries will be
1266
+ searched for in the data.
1267
+ key (NestedKey, optional): where the aggregated result should be written.
1268
+ If ``None``, then the function will not write any key but just output
1269
+ whether any of the done values was true.
1270
+
1271
+ .. note:: if a value is already present for the ``key`` entry,
1272
+ the previous value will prevail and no update will be achieved.
1273
+
1274
+ write_full_false (bool, optional): if ``True``, the reset keys will be
1275
+ written even if the output is ``False`` (ie, no done is ``True``
1276
+ in the provided data structure).
1277
+ Defaults to ``False``.
1278
+
1279
+ Returns: a boolean value indicating whether any of the done states found in the data
1280
+ contained a ``True``.
1281
+
1282
+ Examples:
1283
+ >>> from torchrl.data.tensor_specs import Categorical
1284
+ >>> from tensordict import TensorDict
1285
+ >>> spec = Composite(
1286
+ ... done=Categorical(2, dtype=torch.bool),
1287
+ ... truncated=Categorical(2, dtype=torch.bool),
1288
+ ... nested=Composite(
1289
+ ... done=Categorical(2, dtype=torch.bool),
1290
+ ... truncated=Categorical(2, dtype=torch.bool),
1291
+ ... )
1292
+ ... )
1293
+ >>> data = TensorDict({
1294
+ ... "done": True, "truncated": False,
1295
+ ... "nested": {"done": False, "truncated": True}},
1296
+ ... batch_size=[]
1297
+ ... )
1298
+ >>> data = _terminated_or_truncated(data, spec)
1299
+ >>> print(data["_reset"])
1300
+ tensor(True)
1301
+ >>> print(data["nested", "_reset"])
1302
+ tensor(True)
1303
+ """
1304
+ list_of_keys = []
1305
+
1306
+ def inner_terminated_or_truncated(data, full_done_spec, key, curr_done_key=()):
1307
+ any_eot = False
1308
+ aggregate = None
1309
+ if full_done_spec is None:
1310
+ for eot_key, item in data.items():
1311
+ if eot_key == "done":
1312
+ done = data.get(eot_key, None)
1313
+ if done is None:
1314
+ done = torch.zeros(
1315
+ (*data.shape, 1), dtype=torch.bool, device=data.device
1316
+ )
1317
+ if aggregate is None:
1318
+ aggregate = torch.tensor(False, device=done.device)
1319
+ aggregate = aggregate | done
1320
+ elif eot_key in ("terminated", "truncated"):
1321
+ done = data.get(eot_key, None)
1322
+ if done is None:
1323
+ done = torch.zeros(
1324
+ (*data.shape, 1), dtype=torch.bool, device=data.device
1325
+ )
1326
+ if aggregate is None:
1327
+ aggregate = torch.tensor(False, device=done.device)
1328
+ aggregate = aggregate | done
1329
+ elif isinstance(item, TensorDictBase):
1330
+ any_eot = any_eot | inner_terminated_or_truncated(
1331
+ data=item,
1332
+ full_done_spec=None,
1333
+ key=key,
1334
+ curr_done_key=curr_done_key + (eot_key,),
1335
+ )
1336
+ else:
1337
+ for eot_key, item in full_done_spec.items():
1338
+ if isinstance(item, Composite):
1339
+ any_eot = any_eot | inner_terminated_or_truncated(
1340
+ data=data.get(eot_key),
1341
+ full_done_spec=item,
1342
+ key=key,
1343
+ curr_done_key=curr_done_key + (eot_key,),
1344
+ )
1345
+ else:
1346
+ sop = data.get(eot_key, None)
1347
+ if sop is None:
1348
+ sop = torch.zeros(
1349
+ (*data.shape, 1), dtype=torch.bool, device=data.device
1350
+ )
1351
+ if aggregate is None:
1352
+ aggregate = torch.tensor(False, device=sop.device)
1353
+ aggregate = aggregate | sop
1354
+ if aggregate is not None:
1355
+ if key is not None:
1356
+ data.set(key, aggregate)
1357
+ list_of_keys.append(curr_done_key + (key,))
1358
+ any_eot = any_eot | aggregate.any()
1359
+ return any_eot
1360
+
1361
+ any_eot = inner_terminated_or_truncated(data, full_done_spec, key)
1362
+ if not any_eot and not write_full_false:
1363
+ # remove the list of reset keys
1364
+ data.exclude(*list_of_keys, inplace=True)
1365
+ return any_eot
1366
+
1367
+
1368
+ PARTIAL_MISSING_ERR = "Some reset keys were present but not all. Either all the `'_reset'` entries must be present, or none."
1369
+
1370
+
1371
+ def _aggregate_end_of_traj(
1372
+ data: TensorDictBase, reset_keys=None, done_keys=None
1373
+ ) -> torch.Tensor:
1374
+ # goes through the tensordict and brings the _reset information to
1375
+ # a boolean tensor of the shape of the tensordict.
1376
+ batch_size = data.batch_size
1377
+ n = len(batch_size)
1378
+ if done_keys is not None and reset_keys is None:
1379
+ reset_keys = {_replace_last(key, "done") for key in done_keys}
1380
+ if reset_keys is not None:
1381
+ reset = False
1382
+ has_missing = None
1383
+ for key in reset_keys:
1384
+ local_reset = data.get(key, None)
1385
+ if local_reset is None:
1386
+ if has_missing is False:
1387
+ raise ValueError(PARTIAL_MISSING_ERR)
1388
+ has_missing = True
1389
+ continue
1390
+ elif has_missing:
1391
+ raise ValueError(PARTIAL_MISSING_ERR)
1392
+ has_missing = False
1393
+ if local_reset.ndim > n:
1394
+ local_reset = local_reset.flatten(n, local_reset.ndim - 1)
1395
+ local_reset = local_reset.any(-1)
1396
+ reset = reset | local_reset
1397
+ if has_missing:
1398
+ return torch.ones(batch_size, dtype=torch.bool, device=data.device)
1399
+ return reset
1400
+
1401
+ reset = torch.tensor(False, device=data.device)
1402
+
1403
+ def skim_through(td, reset=reset):
1404
+ for key in td.keys():
1405
+ if key == "_reset":
1406
+ local_reset = td.get(key)
1407
+ if local_reset.ndim > n:
1408
+ local_reset = local_reset.flatten(n, local_reset.ndim - 1)
1409
+ local_reset = local_reset.any(-1)
1410
+ reset = reset | local_reset
1411
+ # we need to check the entry class without getting the value,
1412
+ # because some lazy tensordicts may prevent calls to items().
1413
+ # This introduces some slight overhead as when we encounter a
1414
+ # tensordict item, we'll need to get it twice.
1415
+ elif is_tensor_collection(td.entry_class(key)):
1416
+ value = td.get(key)
1417
+ reset = skim_through(value, reset=reset)
1418
+ return reset
1419
+
1420
+ reset = skim_through(data)
1421
+ return reset
1422
+
1423
+
1424
+ def _update_during_reset(
1425
+ tensordict_reset: TensorDictBase,
1426
+ tensordict: TensorDictBase,
1427
+ reset_keys: list[NestedKey],
1428
+ ):
1429
+ """Updates the input tensordict with the reset data, based on the reset keys."""
1430
+ if not reset_keys:
1431
+ return tensordict.update(tensordict_reset)
1432
+ roots = set()
1433
+ for reset_key in reset_keys:
1434
+ # get the node of the reset key
1435
+ if isinstance(reset_key, tuple):
1436
+ # the reset key *must* have gone through unravel_key
1437
+ # we don't test it to avoid induced overhead
1438
+ node_key = reset_key[:-1]
1439
+ node_reset = tensordict_reset.get(node_key)
1440
+ node = tensordict.get(node_key)
1441
+ reset_key_tuple = reset_key
1442
+ else:
1443
+ node_reset = tensordict_reset.exclude(reset_key)
1444
+ node = tensordict
1445
+ reset_key_tuple = (reset_key,)
1446
+ # get the reset signal
1447
+ reset = tensordict.pop(reset_key, None)
1448
+
1449
+ # check if this reset should be ignored -- this happens whenever the
1450
+ # root node has already been updated
1451
+ root = () if isinstance(reset_key, str) else reset_key[:-1]
1452
+ processed = any(reset_key_tuple[: len(x)] == x for x in roots)
1453
+ roots.add(root)
1454
+ if processed:
1455
+ continue
1456
+
1457
+ if reset is None or reset.all():
1458
+ # perform simple update, at a single level.
1459
+ # by contract, a reset signal at one level cannot
1460
+ # be followed by other resets at nested levels, so it's safe to
1461
+ # simply update
1462
+ node.update(node_reset, update_batch_size=True)
1463
+ else:
1464
+ # there can be two cases: (1) the key is present in both tds,
1465
+ # in which case we use the reset mask to update
1466
+ # (2) the key is not present in the input tensordict, in which
1467
+ # case we just return the data
1468
+
1469
+ # empty tensordicts won't be returned
1470
+ if reset.ndim > node.ndim:
1471
+ reset = reset.flatten(node.ndim, reset.ndim - 1)
1472
+ reset = reset.any(-1)
1473
+ reset = reset.reshape(node.shape)
1474
+ # node.update(node.where(~reset, other=node_reset, pad=0))
1475
+ node.where(
1476
+ ~reset, other=node_reset, out=node, pad=0, update_batch_size=True
1477
+ )
1478
+ # node = node.clone()
1479
+ # idx = reset.nonzero(as_tuple=True)[0]
1480
+ # node[idx].update(node_reset[idx])
1481
+ # node["done"] = torch.zeros((*node.shape, 1), dtype=torch.bool)
1482
+
1483
+ return tensordict
1484
+
1485
+
1486
+ def _repr_by_depth(key):
1487
+ """Used to sort keys based on nesting level."""
1488
+ key = unravel_key(key)
1489
+ if isinstance(key, str):
1490
+ return (0, key)
1491
+ else:
1492
+ return (len(key) - 1, ".".join(key))
1493
+
1494
+
1495
+ def _make_compatible_policy(
1496
+ policy,
1497
+ observation_spec,
1498
+ env=None,
1499
+ fast_wrap=False,
1500
+ trust_policy=False,
1501
+ env_maker=None,
1502
+ env_maker_kwargs=None,
1503
+ ):
1504
+ if trust_policy or isinstance(policy, torch._dynamo.eval_frame.OptimizedModule):
1505
+ return policy
1506
+ if policy is None:
1507
+ input_spec = None
1508
+ if env_maker is not None:
1509
+ from torchrl.envs import EnvBase, EnvCreator
1510
+
1511
+ if isinstance(env_maker, EnvBase):
1512
+ env = env_maker
1513
+ input_spec = env.input_spec["full_action_spec"]
1514
+ elif isinstance(env_maker, EnvCreator):
1515
+ input_spec = env_maker._meta_data.specs[
1516
+ "input_spec", "full_action_spec"
1517
+ ]
1518
+ else:
1519
+ env = env_maker(**env_maker_kwargs)
1520
+ input_spec = env.full_action_spec
1521
+ if input_spec is None:
1522
+ if env is not None:
1523
+ input_spec = env.input_spec["full_action_spec"]
1524
+ else:
1525
+ raise ValueError(
1526
+ "env must be provided to _get_policy_and_device if policy is None"
1527
+ )
1528
+
1529
+ policy = RandomPolicy(input_spec)
1530
+
1531
+ # make sure policy is an nn.Module - this will return the same policy if conditions are met
1532
+ # policy = CloudpickleWrapper(policy)
1533
+
1534
+ caller = getattr(policy, "forward", policy)
1535
+
1536
+ if not _policy_is_tensordict_compatible(policy):
1537
+ if observation_spec is None:
1538
+ if env is not None:
1539
+ observation_spec = env.observation_spec
1540
+ elif env_maker is not None:
1541
+ from torchrl.envs import EnvBase, EnvCreator
1542
+
1543
+ if isinstance(env_maker, EnvBase):
1544
+ observation_spec = env_maker.observation_spec
1545
+ elif isinstance(env_maker, EnvCreator):
1546
+ observation_spec = env_maker._meta_data.specs[
1547
+ "output_spec", "full_observation_spec"
1548
+ ]
1549
+ else:
1550
+ observation_spec = env_maker(**env_maker_kwargs).observation_spec
1551
+
1552
+ # policy is a nn.Module that doesn't operate on tensordicts directly
1553
+ # so we attempt to auto-wrap policy with TensorDictModule
1554
+ if observation_spec is None:
1555
+ raise ValueError(
1556
+ "Unable to read observation_spec from the environment. This is "
1557
+ "required to check compatibility of the environment and policy "
1558
+ "since the policy is a nn.Module that operates on tensors "
1559
+ "rather than a TensorDictModule or a nn.Module that accepts a "
1560
+ "TensorDict as input and defines in_keys and out_keys. "
1561
+ "If your policy is compatible with the environment, you can solve this warning by setting "
1562
+ "trust_policy=True in the constructor."
1563
+ )
1564
+
1565
+ try:
1566
+ sig = caller.__signature__
1567
+ except AttributeError:
1568
+ sig = inspect.signature(caller)
1569
+ # we check if all the mandatory params are there
1570
+ params = list(sig.parameters.keys())
1571
+ if (
1572
+ set(sig.parameters) == {"tensordict"}
1573
+ or set(sig.parameters) == {"td"}
1574
+ or (
1575
+ len(params) == 1
1576
+ and is_tensor_collection(sig.parameters[params[0]].annotation)
1577
+ )
1578
+ ):
1579
+ return policy
1580
+ if fast_wrap:
1581
+ in_keys = list(observation_spec.keys())
1582
+ out_keys = list(env.action_keys)
1583
+ return TensorDictModule(policy, in_keys=in_keys, out_keys=out_keys)
1584
+
1585
+ required_kwargs = {
1586
+ str(k) for k, p in sig.parameters.items() if p.default is inspect._empty
1587
+ }
1588
+ next_observation = {
1589
+ key: value for key, value in observation_spec.rand().items()
1590
+ }
1591
+ if not required_kwargs.difference(set(next_observation)):
1592
+ in_keys = [str(k) for k in sig.parameters if k in next_observation]
1593
+ if env is None:
1594
+ out_keys = ["action"]
1595
+ else:
1596
+ out_keys = list(env.action_keys)
1597
+ for p in getattr(policy, "parameters", list)():
1598
+ policy_device = p.device
1599
+ break
1600
+ else:
1601
+ policy_device = None
1602
+ if policy_device:
1603
+ next_observation = tree_map(
1604
+ lambda x: x.to(policy_device), next_observation
1605
+ )
1606
+
1607
+ output = policy(**next_observation)
1608
+
1609
+ if isinstance(output, tuple):
1610
+ out_keys.extend(f"output{i + 1}" for i in range(len(output) - 1))
1611
+
1612
+ policy = TensorDictModule(policy, in_keys=in_keys, out_keys=out_keys)
1613
+ else:
1614
+ raise TypeError(
1615
+ f"""This error is raised because TorchRL tried to automatically wrap your policy in
1616
+ a TensorDictModule. If you're confident the policy can directly process environment outputs, set
1617
+ the `trust_policy` argument to `True` in the constructor.
1618
+
1619
+ Arguments to policy.forward are incompatible with entries in
1620
+ env.observation_spec (got incongruent signatures:
1621
+ the function signature is {set(sig.parameters)} but the specs have keys {set(next_observation)}).
1622
+ If you want TorchRL to automatically wrap your policy with a TensorDictModule
1623
+ then the arguments to policy.forward must correspond one-to-one with entries
1624
+ in env.observation_spec.
1625
+ For more complex behavior and more control you can consider writing your
1626
+ own TensorDictModule.
1627
+ Check the collector documentation to know more about accepted policies.
1628
+ """
1629
+ )
1630
+ return policy
1631
+
1632
+
1633
+ def _policy_is_tensordict_compatible(policy: nn.Module):
1634
+ def is_compatible(policy):
1635
+ return isinstance(policy, (RandomPolicy, TensorDictModuleBase))
1636
+
1637
+ if (
1638
+ is_compatible(policy)
1639
+ or (
1640
+ isinstance(policy, _NonParametricPolicyWrapper)
1641
+ and is_compatible(policy.policy)
1642
+ )
1643
+ or (isinstance(policy, CloudpickleWrapper) and is_compatible(policy.fn))
1644
+ ):
1645
+ return True
1646
+
1647
+ sig = inspect.signature(getattr(policy, "forward", policy))
1648
+
1649
+ if (
1650
+ len(sig.parameters) == 1
1651
+ and hasattr(policy, "in_keys")
1652
+ and hasattr(policy, "out_keys")
1653
+ ):
1654
+ raise RuntimeError(
1655
+ "Passing a policy that is not a tensordict.nn.TensorDictModuleBase subclass but has in_keys and out_keys "
1656
+ "is deprecated. Users should inherit from this class (which "
1657
+ "has very few restrictions) to make the experience smoother. "
1658
+ "Simply change your policy from `class Policy(nn.Module)` to `Policy(tensordict.nn.TensorDictModuleBase)` "
1659
+ "and this error should disappear.",
1660
+ )
1661
+ elif not hasattr(policy, "in_keys") and not hasattr(policy, "out_keys"):
1662
+ # if it's not a TensorDictModule, and in_keys and out_keys are not defined then
1663
+ # we assume no TensorDict compatibility and will try to wrap it.
1664
+ return False
1665
+
1666
+ # if in_keys or out_keys were defined but policy is not a TensorDictModule or
1667
+ # accepts multiple arguments then it's likely the user is trying to do something
1668
+ # that will have undetermined behavior, we raise an error
1669
+ raise TypeError(
1670
+ "Received a policy that defines in_keys or out_keys and also expects multiple "
1671
+ "arguments to policy.forward. If the policy is compatible with TensorDict, it "
1672
+ "should take a single argument of type TensorDict to policy.forward and define "
1673
+ "both in_keys and out_keys. Alternatively, policy.forward can accept "
1674
+ "arbitrarily many tensor inputs and leave in_keys and out_keys undefined and "
1675
+ "TorchRL will attempt to automatically wrap the policy with a TensorDictModule."
1676
+ )
1677
+
1678
+
1679
+ class _PolicyMetaClass(abc.ABCMeta):
1680
+ def __call__(cls, *args, **kwargs):
1681
+ # no kwargs
1682
+ if isinstance(args[0], nn.Module):
1683
+ return args[0]
1684
+ return super().__call__(*args)
1685
+
1686
+
1687
+ class _NonParametricPolicyWrapper(nn.Module, metaclass=_PolicyMetaClass):
1688
+ """A wrapper for non-parametric policies."""
1689
+
1690
+ def __init__(self, policy):
1691
+ super().__init__()
1692
+ functools.update_wrapper(self, policy)
1693
+ self.policy = CloudpickleWrapper(policy)
1694
+ if hasattr(policy, "forward"):
1695
+ self.forward = self.policy.forward
1696
+
1697
+ def __getattr__(self, attr: str) -> Any:
1698
+ if attr in self.__dir__():
1699
+ return self.__getattribute__(
1700
+ attr
1701
+ ) # make sure that appropriate exceptions are raised
1702
+
1703
+ elif attr.startswith("__"):
1704
+ raise AttributeError(
1705
+ "passing built-in private methods is "
1706
+ f"not permitted with type {type(self)}. "
1707
+ f"Got attribute {attr}."
1708
+ )
1709
+
1710
+ elif "policy" in self.__dir__():
1711
+ policy = self.__getattribute__("policy")
1712
+ return getattr(policy, attr)
1713
+ try:
1714
+ super().__getattr__(attr)
1715
+ except Exception:
1716
+ raise AttributeError(
1717
+ f"The policy wasn't set in {self.__class__.__name__}, cannot access {attr}."
1718
+ )