torchrl 0.11.0__cp314-cp314-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. benchmarks/benchmark_batched_envs.py +104 -0
  2. benchmarks/conftest.py +91 -0
  3. benchmarks/ecosystem/gym_env_throughput.py +321 -0
  4. benchmarks/ecosystem/vmas_rllib_vs_torchrl_sampling_performance.py +231 -0
  5. benchmarks/requirements.txt +7 -0
  6. benchmarks/storage/benchmark_sample_latency_over_rpc.py +193 -0
  7. benchmarks/test_collectors_benchmark.py +240 -0
  8. benchmarks/test_compressed_storage_benchmark.py +145 -0
  9. benchmarks/test_envs_benchmark.py +133 -0
  10. benchmarks/test_llm.py +101 -0
  11. benchmarks/test_non_tensor_env_benchmark.py +70 -0
  12. benchmarks/test_objectives_benchmarks.py +1199 -0
  13. benchmarks/test_replaybuffer_benchmark.py +254 -0
  14. sota-check/README.md +35 -0
  15. sota-implementations/README.md +142 -0
  16. sota-implementations/a2c/README.md +39 -0
  17. sota-implementations/a2c/a2c_atari.py +291 -0
  18. sota-implementations/a2c/a2c_mujoco.py +273 -0
  19. sota-implementations/a2c/utils_atari.py +240 -0
  20. sota-implementations/a2c/utils_mujoco.py +160 -0
  21. sota-implementations/bandits/README.md +7 -0
  22. sota-implementations/bandits/dqn.py +126 -0
  23. sota-implementations/cql/cql_offline.py +198 -0
  24. sota-implementations/cql/cql_online.py +249 -0
  25. sota-implementations/cql/discrete_cql_offline.py +180 -0
  26. sota-implementations/cql/discrete_cql_online.py +227 -0
  27. sota-implementations/cql/utils.py +471 -0
  28. sota-implementations/crossq/crossq.py +271 -0
  29. sota-implementations/crossq/utils.py +320 -0
  30. sota-implementations/ddpg/ddpg.py +231 -0
  31. sota-implementations/ddpg/utils.py +325 -0
  32. sota-implementations/decision_transformer/dt.py +163 -0
  33. sota-implementations/decision_transformer/lamb.py +167 -0
  34. sota-implementations/decision_transformer/online_dt.py +178 -0
  35. sota-implementations/decision_transformer/utils.py +562 -0
  36. sota-implementations/discrete_sac/discrete_sac.py +243 -0
  37. sota-implementations/discrete_sac/utils.py +324 -0
  38. sota-implementations/dqn/README.md +30 -0
  39. sota-implementations/dqn/dqn_atari.py +272 -0
  40. sota-implementations/dqn/dqn_cartpole.py +236 -0
  41. sota-implementations/dqn/utils_atari.py +132 -0
  42. sota-implementations/dqn/utils_cartpole.py +90 -0
  43. sota-implementations/dreamer/README.md +129 -0
  44. sota-implementations/dreamer/dreamer.py +586 -0
  45. sota-implementations/dreamer/dreamer_utils.py +1107 -0
  46. sota-implementations/expert-iteration/README.md +352 -0
  47. sota-implementations/expert-iteration/ei_utils.py +770 -0
  48. sota-implementations/expert-iteration/expert-iteration-async.py +512 -0
  49. sota-implementations/expert-iteration/expert-iteration-sync.py +508 -0
  50. sota-implementations/expert-iteration/requirements_gsm8k.txt +13 -0
  51. sota-implementations/expert-iteration/requirements_ifeval.txt +16 -0
  52. sota-implementations/gail/gail.py +327 -0
  53. sota-implementations/gail/gail_utils.py +68 -0
  54. sota-implementations/gail/ppo_utils.py +157 -0
  55. sota-implementations/grpo/README.md +273 -0
  56. sota-implementations/grpo/grpo-async.py +437 -0
  57. sota-implementations/grpo/grpo-sync.py +435 -0
  58. sota-implementations/grpo/grpo_utils.py +843 -0
  59. sota-implementations/grpo/requirements_gsm8k.txt +11 -0
  60. sota-implementations/grpo/requirements_ifeval.txt +16 -0
  61. sota-implementations/impala/README.md +33 -0
  62. sota-implementations/impala/impala_multi_node_ray.py +292 -0
  63. sota-implementations/impala/impala_multi_node_submitit.py +284 -0
  64. sota-implementations/impala/impala_single_node.py +261 -0
  65. sota-implementations/impala/utils.py +184 -0
  66. sota-implementations/iql/discrete_iql.py +230 -0
  67. sota-implementations/iql/iql_offline.py +164 -0
  68. sota-implementations/iql/iql_online.py +225 -0
  69. sota-implementations/iql/utils.py +437 -0
  70. sota-implementations/multiagent/README.md +74 -0
  71. sota-implementations/multiagent/iql.py +237 -0
  72. sota-implementations/multiagent/maddpg_iddpg.py +266 -0
  73. sota-implementations/multiagent/mappo_ippo.py +267 -0
  74. sota-implementations/multiagent/qmix_vdn.py +271 -0
  75. sota-implementations/multiagent/sac.py +337 -0
  76. sota-implementations/multiagent/utils/__init__.py +4 -0
  77. sota-implementations/multiagent/utils/logging.py +151 -0
  78. sota-implementations/multiagent/utils/utils.py +43 -0
  79. sota-implementations/ppo/README.md +29 -0
  80. sota-implementations/ppo/ppo_atari.py +305 -0
  81. sota-implementations/ppo/ppo_mujoco.py +293 -0
  82. sota-implementations/ppo/utils_atari.py +238 -0
  83. sota-implementations/ppo/utils_mujoco.py +152 -0
  84. sota-implementations/ppo_trainer/train.py +21 -0
  85. sota-implementations/redq/README.md +7 -0
  86. sota-implementations/redq/redq.py +199 -0
  87. sota-implementations/redq/utils.py +1060 -0
  88. sota-implementations/sac/sac-async.py +266 -0
  89. sota-implementations/sac/sac.py +239 -0
  90. sota-implementations/sac/utils.py +381 -0
  91. sota-implementations/sac_trainer/train.py +16 -0
  92. sota-implementations/td3/td3.py +254 -0
  93. sota-implementations/td3/utils.py +319 -0
  94. sota-implementations/td3_bc/td3_bc.py +177 -0
  95. sota-implementations/td3_bc/utils.py +251 -0
  96. torchrl/.dylibs/libc++.1.0.dylib +0 -0
  97. torchrl/__init__.py +144 -0
  98. torchrl/_extension.py +74 -0
  99. torchrl/_torchrl.cpython-314-darwin.so +0 -0
  100. torchrl/_utils.py +1431 -0
  101. torchrl/collectors/__init__.py +48 -0
  102. torchrl/collectors/_base.py +1058 -0
  103. torchrl/collectors/_constants.py +88 -0
  104. torchrl/collectors/_multi_async.py +324 -0
  105. torchrl/collectors/_multi_base.py +1805 -0
  106. torchrl/collectors/_multi_sync.py +464 -0
  107. torchrl/collectors/_runner.py +581 -0
  108. torchrl/collectors/_single.py +2009 -0
  109. torchrl/collectors/_single_async.py +259 -0
  110. torchrl/collectors/collectors.py +62 -0
  111. torchrl/collectors/distributed/__init__.py +32 -0
  112. torchrl/collectors/distributed/default_configs.py +133 -0
  113. torchrl/collectors/distributed/generic.py +1306 -0
  114. torchrl/collectors/distributed/ray.py +1092 -0
  115. torchrl/collectors/distributed/rpc.py +1006 -0
  116. torchrl/collectors/distributed/sync.py +731 -0
  117. torchrl/collectors/distributed/utils.py +160 -0
  118. torchrl/collectors/llm/__init__.py +10 -0
  119. torchrl/collectors/llm/base.py +494 -0
  120. torchrl/collectors/llm/ray_collector.py +275 -0
  121. torchrl/collectors/llm/utils.py +36 -0
  122. torchrl/collectors/llm/weight_update/__init__.py +10 -0
  123. torchrl/collectors/llm/weight_update/vllm.py +348 -0
  124. torchrl/collectors/llm/weight_update/vllm_v2.py +311 -0
  125. torchrl/collectors/utils.py +433 -0
  126. torchrl/collectors/weight_update.py +591 -0
  127. torchrl/csrc/numpy_utils.h +38 -0
  128. torchrl/csrc/pybind.cpp +27 -0
  129. torchrl/csrc/segment_tree.h +458 -0
  130. torchrl/csrc/torch_utils.h +34 -0
  131. torchrl/csrc/utils.cpp +48 -0
  132. torchrl/csrc/utils.h +31 -0
  133. torchrl/data/__init__.py +187 -0
  134. torchrl/data/datasets/__init__.py +58 -0
  135. torchrl/data/datasets/atari_dqn.py +878 -0
  136. torchrl/data/datasets/common.py +281 -0
  137. torchrl/data/datasets/d4rl.py +489 -0
  138. torchrl/data/datasets/d4rl_infos.py +187 -0
  139. torchrl/data/datasets/gen_dgrl.py +375 -0
  140. torchrl/data/datasets/minari_data.py +643 -0
  141. torchrl/data/datasets/openml.py +177 -0
  142. torchrl/data/datasets/openx.py +798 -0
  143. torchrl/data/datasets/roboset.py +363 -0
  144. torchrl/data/datasets/utils.py +11 -0
  145. torchrl/data/datasets/vd4rl.py +432 -0
  146. torchrl/data/llm/__init__.py +34 -0
  147. torchrl/data/llm/dataset.py +491 -0
  148. torchrl/data/llm/history.py +1378 -0
  149. torchrl/data/llm/prompt.py +198 -0
  150. torchrl/data/llm/reward.py +225 -0
  151. torchrl/data/llm/topk.py +186 -0
  152. torchrl/data/llm/utils.py +543 -0
  153. torchrl/data/map/__init__.py +21 -0
  154. torchrl/data/map/hash.py +185 -0
  155. torchrl/data/map/query.py +204 -0
  156. torchrl/data/map/tdstorage.py +363 -0
  157. torchrl/data/map/tree.py +1434 -0
  158. torchrl/data/map/utils.py +103 -0
  159. torchrl/data/postprocs/__init__.py +8 -0
  160. torchrl/data/postprocs/postprocs.py +391 -0
  161. torchrl/data/replay_buffers/__init__.py +99 -0
  162. torchrl/data/replay_buffers/checkpointers.py +622 -0
  163. torchrl/data/replay_buffers/ray_buffer.py +292 -0
  164. torchrl/data/replay_buffers/replay_buffers.py +2376 -0
  165. torchrl/data/replay_buffers/samplers.py +2578 -0
  166. torchrl/data/replay_buffers/scheduler.py +265 -0
  167. torchrl/data/replay_buffers/storages.py +2412 -0
  168. torchrl/data/replay_buffers/utils.py +1042 -0
  169. torchrl/data/replay_buffers/writers.py +781 -0
  170. torchrl/data/tensor_specs.py +7101 -0
  171. torchrl/data/utils.py +334 -0
  172. torchrl/envs/__init__.py +265 -0
  173. torchrl/envs/async_envs.py +1105 -0
  174. torchrl/envs/batched_envs.py +3093 -0
  175. torchrl/envs/common.py +4241 -0
  176. torchrl/envs/custom/__init__.py +11 -0
  177. torchrl/envs/custom/chess.py +617 -0
  178. torchrl/envs/custom/llm.py +214 -0
  179. torchrl/envs/custom/pendulum.py +401 -0
  180. torchrl/envs/custom/san_moves.txt +29274 -0
  181. torchrl/envs/custom/tictactoeenv.py +288 -0
  182. torchrl/envs/env_creator.py +263 -0
  183. torchrl/envs/gym_like.py +752 -0
  184. torchrl/envs/libs/__init__.py +68 -0
  185. torchrl/envs/libs/_gym_utils.py +326 -0
  186. torchrl/envs/libs/brax.py +846 -0
  187. torchrl/envs/libs/dm_control.py +544 -0
  188. torchrl/envs/libs/envpool.py +447 -0
  189. torchrl/envs/libs/gym.py +2239 -0
  190. torchrl/envs/libs/habitat.py +138 -0
  191. torchrl/envs/libs/isaac_lab.py +87 -0
  192. torchrl/envs/libs/isaacgym.py +203 -0
  193. torchrl/envs/libs/jax_utils.py +166 -0
  194. torchrl/envs/libs/jumanji.py +963 -0
  195. torchrl/envs/libs/meltingpot.py +599 -0
  196. torchrl/envs/libs/openml.py +153 -0
  197. torchrl/envs/libs/openspiel.py +652 -0
  198. torchrl/envs/libs/pettingzoo.py +1042 -0
  199. torchrl/envs/libs/procgen.py +351 -0
  200. torchrl/envs/libs/robohive.py +429 -0
  201. torchrl/envs/libs/smacv2.py +645 -0
  202. torchrl/envs/libs/unity_mlagents.py +891 -0
  203. torchrl/envs/libs/utils.py +147 -0
  204. torchrl/envs/libs/vmas.py +813 -0
  205. torchrl/envs/llm/__init__.py +63 -0
  206. torchrl/envs/llm/chat.py +730 -0
  207. torchrl/envs/llm/datasets/README.md +4 -0
  208. torchrl/envs/llm/datasets/__init__.py +17 -0
  209. torchrl/envs/llm/datasets/gsm8k.py +353 -0
  210. torchrl/envs/llm/datasets/ifeval.py +274 -0
  211. torchrl/envs/llm/envs.py +789 -0
  212. torchrl/envs/llm/libs/README.md +3 -0
  213. torchrl/envs/llm/libs/__init__.py +8 -0
  214. torchrl/envs/llm/libs/mlgym.py +869 -0
  215. torchrl/envs/llm/reward/__init__.py +10 -0
  216. torchrl/envs/llm/reward/gsm8k.py +324 -0
  217. torchrl/envs/llm/reward/ifeval/README.md +13 -0
  218. torchrl/envs/llm/reward/ifeval/__init__.py +10 -0
  219. torchrl/envs/llm/reward/ifeval/_instructions.py +1667 -0
  220. torchrl/envs/llm/reward/ifeval/_instructions_main.py +131 -0
  221. torchrl/envs/llm/reward/ifeval/_instructions_registry.py +100 -0
  222. torchrl/envs/llm/reward/ifeval/_instructions_util.py +1677 -0
  223. torchrl/envs/llm/reward/ifeval/_scorer.py +454 -0
  224. torchrl/envs/llm/transforms/__init__.py +55 -0
  225. torchrl/envs/llm/transforms/browser.py +292 -0
  226. torchrl/envs/llm/transforms/dataloading.py +859 -0
  227. torchrl/envs/llm/transforms/format.py +73 -0
  228. torchrl/envs/llm/transforms/kl.py +1544 -0
  229. torchrl/envs/llm/transforms/policy_version.py +189 -0
  230. torchrl/envs/llm/transforms/reason.py +323 -0
  231. torchrl/envs/llm/transforms/tokenizer.py +321 -0
  232. torchrl/envs/llm/transforms/tools.py +1955 -0
  233. torchrl/envs/model_based/__init__.py +9 -0
  234. torchrl/envs/model_based/common.py +180 -0
  235. torchrl/envs/model_based/dreamer.py +112 -0
  236. torchrl/envs/transforms/__init__.py +147 -0
  237. torchrl/envs/transforms/functional.py +48 -0
  238. torchrl/envs/transforms/gym_transforms.py +203 -0
  239. torchrl/envs/transforms/module.py +341 -0
  240. torchrl/envs/transforms/r3m.py +372 -0
  241. torchrl/envs/transforms/ray_service.py +663 -0
  242. torchrl/envs/transforms/rb_transforms.py +214 -0
  243. torchrl/envs/transforms/transforms.py +11835 -0
  244. torchrl/envs/transforms/utils.py +94 -0
  245. torchrl/envs/transforms/vc1.py +307 -0
  246. torchrl/envs/transforms/vecnorm.py +845 -0
  247. torchrl/envs/transforms/vip.py +407 -0
  248. torchrl/envs/utils.py +1718 -0
  249. torchrl/envs/vec_envs.py +11 -0
  250. torchrl/modules/__init__.py +206 -0
  251. torchrl/modules/distributions/__init__.py +73 -0
  252. torchrl/modules/distributions/continuous.py +830 -0
  253. torchrl/modules/distributions/discrete.py +908 -0
  254. torchrl/modules/distributions/truncated_normal.py +187 -0
  255. torchrl/modules/distributions/utils.py +233 -0
  256. torchrl/modules/llm/__init__.py +62 -0
  257. torchrl/modules/llm/backends/__init__.py +65 -0
  258. torchrl/modules/llm/backends/vllm/__init__.py +94 -0
  259. torchrl/modules/llm/backends/vllm/_models.py +46 -0
  260. torchrl/modules/llm/backends/vllm/base.py +72 -0
  261. torchrl/modules/llm/backends/vllm/vllm_async.py +2075 -0
  262. torchrl/modules/llm/backends/vllm/vllm_plugin.py +22 -0
  263. torchrl/modules/llm/backends/vllm/vllm_sync.py +446 -0
  264. torchrl/modules/llm/backends/vllm/vllm_utils.py +129 -0
  265. torchrl/modules/llm/policies/__init__.py +28 -0
  266. torchrl/modules/llm/policies/common.py +1809 -0
  267. torchrl/modules/llm/policies/transformers_wrapper.py +2756 -0
  268. torchrl/modules/llm/policies/vllm_wrapper.py +2241 -0
  269. torchrl/modules/llm/utils.py +23 -0
  270. torchrl/modules/mcts/__init__.py +21 -0
  271. torchrl/modules/mcts/scores.py +579 -0
  272. torchrl/modules/models/__init__.py +86 -0
  273. torchrl/modules/models/batchrenorm.py +119 -0
  274. torchrl/modules/models/decision_transformer.py +179 -0
  275. torchrl/modules/models/exploration.py +731 -0
  276. torchrl/modules/models/llm.py +156 -0
  277. torchrl/modules/models/model_based.py +596 -0
  278. torchrl/modules/models/models.py +1712 -0
  279. torchrl/modules/models/multiagent.py +1067 -0
  280. torchrl/modules/models/recipes/impala.py +185 -0
  281. torchrl/modules/models/utils.py +162 -0
  282. torchrl/modules/planners/__init__.py +10 -0
  283. torchrl/modules/planners/cem.py +228 -0
  284. torchrl/modules/planners/common.py +73 -0
  285. torchrl/modules/planners/mppi.py +265 -0
  286. torchrl/modules/tensordict_module/__init__.py +89 -0
  287. torchrl/modules/tensordict_module/actors.py +2457 -0
  288. torchrl/modules/tensordict_module/common.py +529 -0
  289. torchrl/modules/tensordict_module/exploration.py +814 -0
  290. torchrl/modules/tensordict_module/probabilistic.py +321 -0
  291. torchrl/modules/tensordict_module/rnn.py +1639 -0
  292. torchrl/modules/tensordict_module/sequence.py +132 -0
  293. torchrl/modules/tensordict_module/world_models.py +34 -0
  294. torchrl/modules/utils/__init__.py +38 -0
  295. torchrl/modules/utils/mappings.py +9 -0
  296. torchrl/modules/utils/utils.py +89 -0
  297. torchrl/objectives/__init__.py +78 -0
  298. torchrl/objectives/a2c.py +659 -0
  299. torchrl/objectives/common.py +753 -0
  300. torchrl/objectives/cql.py +1346 -0
  301. torchrl/objectives/crossq.py +710 -0
  302. torchrl/objectives/ddpg.py +453 -0
  303. torchrl/objectives/decision_transformer.py +371 -0
  304. torchrl/objectives/deprecated.py +516 -0
  305. torchrl/objectives/dqn.py +683 -0
  306. torchrl/objectives/dreamer.py +488 -0
  307. torchrl/objectives/functional.py +48 -0
  308. torchrl/objectives/gail.py +258 -0
  309. torchrl/objectives/iql.py +996 -0
  310. torchrl/objectives/llm/__init__.py +30 -0
  311. torchrl/objectives/llm/grpo.py +846 -0
  312. torchrl/objectives/llm/sft.py +482 -0
  313. torchrl/objectives/multiagent/__init__.py +8 -0
  314. torchrl/objectives/multiagent/qmixer.py +396 -0
  315. torchrl/objectives/ppo.py +1669 -0
  316. torchrl/objectives/redq.py +683 -0
  317. torchrl/objectives/reinforce.py +530 -0
  318. torchrl/objectives/sac.py +1580 -0
  319. torchrl/objectives/td3.py +570 -0
  320. torchrl/objectives/td3_bc.py +625 -0
  321. torchrl/objectives/utils.py +782 -0
  322. torchrl/objectives/value/__init__.py +28 -0
  323. torchrl/objectives/value/advantages.py +1956 -0
  324. torchrl/objectives/value/functional.py +1459 -0
  325. torchrl/objectives/value/utils.py +360 -0
  326. torchrl/record/__init__.py +17 -0
  327. torchrl/record/loggers/__init__.py +23 -0
  328. torchrl/record/loggers/common.py +48 -0
  329. torchrl/record/loggers/csv.py +226 -0
  330. torchrl/record/loggers/mlflow.py +142 -0
  331. torchrl/record/loggers/tensorboard.py +139 -0
  332. torchrl/record/loggers/trackio.py +163 -0
  333. torchrl/record/loggers/utils.py +78 -0
  334. torchrl/record/loggers/wandb.py +214 -0
  335. torchrl/record/recorder.py +554 -0
  336. torchrl/services/__init__.py +79 -0
  337. torchrl/services/base.py +109 -0
  338. torchrl/services/ray_service.py +453 -0
  339. torchrl/testing/__init__.py +107 -0
  340. torchrl/testing/assertions.py +179 -0
  341. torchrl/testing/dist_utils.py +122 -0
  342. torchrl/testing/env_creators.py +227 -0
  343. torchrl/testing/env_helper.py +35 -0
  344. torchrl/testing/gym_helpers.py +156 -0
  345. torchrl/testing/llm_mocks.py +119 -0
  346. torchrl/testing/mocking_classes.py +2720 -0
  347. torchrl/testing/modules.py +295 -0
  348. torchrl/testing/mp_helpers.py +15 -0
  349. torchrl/testing/ray_helpers.py +293 -0
  350. torchrl/testing/utils.py +190 -0
  351. torchrl/trainers/__init__.py +42 -0
  352. torchrl/trainers/algorithms/__init__.py +11 -0
  353. torchrl/trainers/algorithms/configs/__init__.py +705 -0
  354. torchrl/trainers/algorithms/configs/collectors.py +216 -0
  355. torchrl/trainers/algorithms/configs/common.py +41 -0
  356. torchrl/trainers/algorithms/configs/data.py +308 -0
  357. torchrl/trainers/algorithms/configs/envs.py +104 -0
  358. torchrl/trainers/algorithms/configs/envs_libs.py +361 -0
  359. torchrl/trainers/algorithms/configs/logging.py +80 -0
  360. torchrl/trainers/algorithms/configs/modules.py +570 -0
  361. torchrl/trainers/algorithms/configs/objectives.py +177 -0
  362. torchrl/trainers/algorithms/configs/trainers.py +340 -0
  363. torchrl/trainers/algorithms/configs/transforms.py +955 -0
  364. torchrl/trainers/algorithms/configs/utils.py +252 -0
  365. torchrl/trainers/algorithms/configs/weight_sync_schemes.py +191 -0
  366. torchrl/trainers/algorithms/configs/weight_update.py +159 -0
  367. torchrl/trainers/algorithms/ppo.py +373 -0
  368. torchrl/trainers/algorithms/sac.py +308 -0
  369. torchrl/trainers/helpers/__init__.py +40 -0
  370. torchrl/trainers/helpers/collectors.py +416 -0
  371. torchrl/trainers/helpers/envs.py +573 -0
  372. torchrl/trainers/helpers/logger.py +33 -0
  373. torchrl/trainers/helpers/losses.py +132 -0
  374. torchrl/trainers/helpers/models.py +658 -0
  375. torchrl/trainers/helpers/replay_buffer.py +59 -0
  376. torchrl/trainers/helpers/trainers.py +301 -0
  377. torchrl/trainers/trainers.py +2052 -0
  378. torchrl/weight_update/__init__.py +33 -0
  379. torchrl/weight_update/_distributed.py +749 -0
  380. torchrl/weight_update/_mp.py +624 -0
  381. torchrl/weight_update/_noupdate.py +102 -0
  382. torchrl/weight_update/_ray.py +1032 -0
  383. torchrl/weight_update/_rpc.py +284 -0
  384. torchrl/weight_update/_shared.py +891 -0
  385. torchrl/weight_update/llm/__init__.py +32 -0
  386. torchrl/weight_update/llm/vllm_double_buffer.py +370 -0
  387. torchrl/weight_update/llm/vllm_nccl.py +710 -0
  388. torchrl/weight_update/utils.py +73 -0
  389. torchrl/weight_update/weight_sync_schemes.py +1244 -0
  390. torchrl-0.11.0.dist-info/METADATA +1308 -0
  391. torchrl-0.11.0.dist-info/RECORD +395 -0
  392. torchrl-0.11.0.dist-info/WHEEL +5 -0
  393. torchrl-0.11.0.dist-info/entry_points.txt +2 -0
  394. torchrl-0.11.0.dist-info/licenses/LICENSE +21 -0
  395. torchrl-0.11.0.dist-info/top_level.txt +7 -0
@@ -0,0 +1,1669 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from __future__ import annotations
6
+
7
+ import contextlib
8
+ import warnings
9
+ from collections.abc import Mapping
10
+ from copy import deepcopy
11
+ from dataclasses import dataclass
12
+
13
+ import torch
14
+ from tensordict import (
15
+ is_tensor_collection,
16
+ TensorDict,
17
+ TensorDictBase,
18
+ TensorDictParams,
19
+ )
20
+ from tensordict.nn import (
21
+ composite_lp_aggregate,
22
+ CompositeDistribution,
23
+ dispatch,
24
+ ProbabilisticTensorDictModule,
25
+ ProbabilisticTensorDictSequential,
26
+ set_composite_lp_aggregate,
27
+ TensorDictModule,
28
+ )
29
+ from tensordict.utils import NestedKey
30
+ from torch import distributions as d
31
+
32
+ from torchrl._utils import _standardize, logger as torchrl_logger, VERBOSE
33
+ from torchrl.objectives.common import LossModule
34
+ from torchrl.objectives.utils import (
35
+ _cache_values,
36
+ _clip_value_loss,
37
+ _GAMMA_LMBDA_DEPREC_ERROR,
38
+ _maybe_add_or_extend_key,
39
+ _maybe_get_or_select,
40
+ _reduce,
41
+ _sum_td_features,
42
+ default_value_kwargs,
43
+ distance_loss,
44
+ ValueEstimators,
45
+ )
46
+ from torchrl.objectives.value import (
47
+ GAE,
48
+ TD0Estimator,
49
+ TD1Estimator,
50
+ TDLambdaEstimator,
51
+ ValueEstimatorBase,
52
+ VTrace,
53
+ )
54
+
55
+
56
+ class PPOLoss(LossModule):
57
+ """A parent PPO loss class.
58
+
59
+ PPO (Proximal Policy Optimization) is a model-free, online RL algorithm
60
+ that makes use of a recorded (batch of)
61
+ trajectories to perform several optimization steps, while actively
62
+ preventing the updated policy to deviate too
63
+ much from its original parameter configuration.
64
+
65
+ PPO loss can be found in different flavors, depending on the way the
66
+ constrained optimization is implemented: ClipPPOLoss and KLPENPPOLoss.
67
+ Unlike its subclasses, this class does not implement any regularization
68
+ and should therefore be used cautiously.
69
+
70
+ For more details regarding PPO, refer to: "Proximal Policy Optimization Algorithms",
71
+ https://arxiv.org/abs/1707.06347
72
+
73
+ Args:
74
+ actor_network (ProbabilisticTensorDictSequential): policy operator.
75
+ Typically, a :class:`~tensordict.nn.ProbabilisticTensorDictSequential` subclass taking observations
76
+ as input and outputting an action (or actions) as well as its log-probability value.
77
+ critic_network (ValueOperator): value operator. The critic will usually take the observations as input
78
+ and return a scalar value (``state_value`` by default) in the output keys.
79
+
80
+ .. note::
81
+ While this loss module does not enforce any specific model mode (train/eval), it is highly recommended
82
+ to keep your model in eval mode during RL training to ensure deterministic behavior.
83
+ A failure to learn due to a train/eval mode mismatch is often observed when the Effective Sample Size (ESS)
84
+ drops or increases significantly (see note below).
85
+
86
+ .. note::
87
+ The PPO loss exposes a couple of additional metrics that can be used to monitor the training process:
88
+
89
+ - The clip fraction is the ratio of the number of clipped weights in the PPO loss (i.e. the ratio of the number of weights that were clipped to the total number of weights).
90
+ - The Effective Sample Size (ESS) is a measure of the effective number of samples in the batch, computed as the inverse of the sum of the squared importance weights.
91
+ A value of 1 indicates that the importance weights are all equal to 1 (i.e., the samples are equally weighted).
92
+ Any value below 1 indicates that the samples are not equally weighted, and the ESS is a measure of the effective number of samples.
93
+ If the value drops or increases significantly, it often indicates issues with the model configuration (such as a train/eval mode mismatch, or a large policy update).
94
+
95
+ Keyword Args:
96
+ entropy_bonus (bool, optional): if ``True``, an entropy bonus will be added to the
97
+ loss to favour exploratory policies.
98
+ samples_mc_entropy (int, optional): if the distribution retrieved from the policy
99
+ operator does not have a closed form
100
+ formula for the entropy, a Monte-Carlo estimate will be used.
101
+ ``samples_mc_entropy`` will control how many
102
+ samples will be used to compute this estimate.
103
+ Defaults to ``1``.
104
+ entropy_coeff: scalar | Mapping[NestedKey, scalar], optional): entropy multiplier when computing the total loss.
105
+ * **Scalar**: one value applied to the summed entropy of every action head.
106
+ * **Mapping** ``{head_name: coeff}`` gives an individual coefficient for each action-head's entropy.
107
+ Defaults to ``0.01``.
108
+
109
+ See :ref:`ppo_entropy_coefficients` for detailed usage examples and troubleshooting.
110
+ log_explained_variance (bool, optional): if ``True``, the explained variance of the critic
111
+ predictions w.r.t. value targets will be computed and logged as ``"explained_variance"``.
112
+ This can help monitor critic quality during training. Best possible score is 1.0, lower values are worse. Defaults to ``True``.
113
+ critic_coeff (scalar, optional): critic loss multiplier when computing the total
114
+ loss. Defaults to ``1.0``. Set ``critic_coeff`` to ``None`` to exclude the value
115
+ loss from the forward outputs.
116
+ loss_critic_type (str, optional): loss function for the value discrepancy.
117
+ Can be one of "l1", "l2" or "smooth_l1". Defaults to ``"smooth_l1"``.
118
+ normalize_advantage (bool, optional): if ``True``, the advantage will be normalized
119
+ before being used. Defaults to ``False``.
120
+ normalize_advantage_exclude_dims (Tuple[int], optional): dimensions to exclude from the advantage
121
+ standardization. Negative dimensions are valid. This is useful in multiagent (or multiobjective) settings
122
+ where the agent (or objective) dimension may be excluded from the reductions. Default: ().
123
+ separate_losses (bool, optional): if ``True``, shared parameters between
124
+ policy and critic will only be trained on the policy loss.
125
+ Defaults to ``False``, i.e., gradients are propagated to shared
126
+ parameters for both policy and critic losses.
127
+ advantage_key (str, optional): [Deprecated, use set_keys(advantage_key=advantage_key) instead]
128
+ The input tensordict key where the advantage is
129
+ expected to be written. Defaults to ``"advantage"``.
130
+ value_target_key (str, optional): [Deprecated, use set_keys(value_target_key=value_target_key) instead]
131
+ The input tensordict key where the target state
132
+ value is expected to be written. Defaults to ``"value_target"``.
133
+ value_key (str, optional): [Deprecated, use set_keys(value_key) instead]
134
+ The input tensordict key where the state
135
+ value is expected to be written. Defaults to ``"state_value"``.
136
+ functional (bool, optional): whether modules should be functionalized.
137
+ Functionalizing permits features like meta-RL, but makes it
138
+ impossible to use distributed models (DDP, FSDP, ...) and comes
139
+ with a little cost. Defaults to ``True``.
140
+ reduction (str, optional): Specifies the reduction to apply to the output:
141
+ ``"none"`` | ``"mean"`` | ``"sum"``. ``"none"``: no reduction will be applied,
142
+ ``"mean"``: the sum of the output will be divided by the number of
143
+ elements in the output, ``"sum"``: the output will be summed. Default: ``"mean"``.
144
+ clip_value (:obj:`float`, optional): If provided, it will be used to compute a clipped version of the value
145
+ prediction with respect to the input tensordict value estimate and use it to calculate the value loss.
146
+ The purpose of clipping is to limit the impact of extreme value predictions, helping stabilize training
147
+ and preventing large updates. However, it will have no impact if the value estimate was done by the current
148
+ version of the value estimator. Defaults to ``None``.
149
+ device (torch.device, optional): device of the buffers. Defaults to ``None``.
150
+
151
+ .. note:: Parameters and buffers from the policy / critic will not be cast to that device to ensure that
152
+ the storages match the ones that are passed to other components, such as data collectors.
153
+
154
+ .. note::
155
+ The advantage (typically GAE) can be computed by the loss function or
156
+ in the training loop. The latter option is usually preferred, but this is
157
+ up to the user to choose which option is to be preferred.
158
+ If the advantage key (``"advantage`` by default) is not present in the
159
+ input tensordict, the advantage will be computed by the :meth:`~.forward`
160
+ method.
161
+
162
+ >>> ppo_loss = PPOLoss(actor, critic)
163
+ >>> advantage = GAE(critic)
164
+ >>> data = next(datacollector)
165
+ >>> losses = ppo_loss(data)
166
+ >>> # equivalent
167
+ >>> advantage(data)
168
+ >>> losses = ppo_loss(data)
169
+
170
+ A custom advantage module can be built using :meth:`~.make_value_estimator`.
171
+ The default is :class:`~torchrl.objectives.value.GAE` with hyperparameters
172
+ dictated by :func:`~torchrl.objectives.utils.default_value_kwargs`.
173
+
174
+ >>> ppo_loss = PPOLoss(actor, critic)
175
+ >>> ppo_loss.make_value_estimator(ValueEstimators.TDLambda)
176
+ >>> data = next(datacollector)
177
+ >>> losses = ppo_loss(data)
178
+
179
+ .. note::
180
+ If the actor and the value function share parameters, one can avoid
181
+ calling the common module multiple times by passing only the head of the
182
+ value network to the PPO loss module:
183
+
184
+ >>> common = SomeModule(in_keys=["observation"], out_keys=["hidden"])
185
+ >>> actor_head = SomeActor(in_keys=["hidden"])
186
+ >>> value_head = SomeValue(in_keys=["hidden"])
187
+ >>> # first option, with 2 calls on the common module
188
+ >>> model = ActorValueOperator(common, actor_head, value_head)
189
+ >>> loss_module = PPOLoss(model.get_policy_operator(), model.get_value_operator())
190
+ >>> # second option, with a single call to the common module
191
+ >>> loss_module = PPOLoss(ProbabilisticTensorDictSequential(model, actor_head), value_head)
192
+
193
+ This will work regardless of whether separate_losses is activated or not.
194
+
195
+ Examples:
196
+ >>> import torch
197
+ >>> from torch import nn
198
+ >>> from torchrl.data.tensor_specs import Bounded
199
+ >>> from torchrl.modules.distributions import NormalParamExtractor, TanhNormal
200
+ >>> from torchrl.modules.tensordict_module.actors import ProbabilisticActor, ValueOperator
201
+ >>> from torchrl.modules.tensordict_module.common import SafeModule
202
+ >>> from torchrl.objectives.ppo import PPOLoss
203
+ >>> from tensordict import TensorDict
204
+ >>> n_act, n_obs = 4, 3
205
+ >>> spec = Bounded(-torch.ones(n_act), torch.ones(n_act), (n_act,))
206
+ >>> base_layer = nn.Linear(n_obs, 5)
207
+ >>> net = nn.Sequential(base_layer, nn.Linear(5, 2 * n_act), NormalParamExtractor())
208
+ >>> module = SafeModule(net, in_keys=["observation"], out_keys=["loc", "scale"])
209
+ >>> actor = ProbabilisticActor(
210
+ ... module=module,
211
+ ... distribution_class=TanhNormal,
212
+ ... in_keys=["loc", "scale"],
213
+ ... spec=spec)
214
+ >>> module = nn.Sequential(base_layer, nn.Linear(5, 1))
215
+ >>> value = ValueOperator(
216
+ ... module=module,
217
+ ... in_keys=["observation"])
218
+ >>> loss = PPOLoss(actor, value)
219
+ >>> batch = [2, ]
220
+ >>> action = spec.rand(batch)
221
+ >>> data = TensorDict({"observation": torch.randn(*batch, n_obs),
222
+ ... "action": action,
223
+ ... "action_log_prob": torch.randn_like(action[..., 1]),
224
+ ... ("next", "done"): torch.zeros(*batch, 1, dtype=torch.bool),
225
+ ... ("next", "terminated"): torch.zeros(*batch, 1, dtype=torch.bool),
226
+ ... ("next", "reward"): torch.randn(*batch, 1),
227
+ ... ("next", "observation"): torch.randn(*batch, n_obs),
228
+ ... }, batch)
229
+ >>> loss(data)
230
+ TensorDict(
231
+ fields={
232
+ entropy: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
233
+ explained_variance: Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.float32, is_shared=False),
234
+ kl_approx: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
235
+ loss_critic: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
236
+ loss_entropy: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
237
+ loss_objective: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False)},
238
+ batch_size=torch.Size([]),
239
+ device=None,
240
+ is_shared=False)
241
+
242
+ This class is compatible with non-tensordict based modules too and can be
243
+ used without recurring to any tensordict-related primitive. In this case,
244
+ the expected keyword arguments are:
245
+ ``["action", "sample_log_prob", "next_reward", "next_done", "next_terminated"]`` + in_keys of the actor and value network.
246
+ The return value is a tuple of tensors in the following order:
247
+ ``["loss_objective"]`` + ``["entropy", "loss_entropy"]`` if entropy_bonus is set + ``"loss_critic"`` if critic_coeff is not ``None``.
248
+ The output keys can also be filtered using :meth:`PPOLoss.select_out_keys` method.
249
+
250
+ Examples:
251
+ >>> import torch
252
+ >>> from torch import nn
253
+ >>> from torchrl.data.tensor_specs import Bounded
254
+ >>> from torchrl.modules.distributions import NormalParamExtractor, TanhNormal
255
+ >>> from torchrl.modules.tensordict_module.actors import ProbabilisticActor, ValueOperator
256
+ >>> from torchrl.modules.tensordict_module.common import SafeModule
257
+ >>> from torchrl.objectives.ppo import PPOLoss
258
+ >>> n_act, n_obs = 4, 3
259
+ >>> spec = Bounded(-torch.ones(n_act), torch.ones(n_act), (n_act,))
260
+ >>> base_layer = nn.Linear(n_obs, 5)
261
+ >>> net = nn.Sequential(base_layer, nn.Linear(5, 2 * n_act), NormalParamExtractor())
262
+ >>> module = SafeModule(net, in_keys=["observation"], out_keys=["loc", "scale"])
263
+ >>> actor = ProbabilisticActor(
264
+ ... module=module,
265
+ ... distribution_class=TanhNormal,
266
+ ... in_keys=["loc", "scale"],
267
+ ... spec=spec)
268
+ >>> module = nn.Sequential(base_layer, nn.Linear(5, 1))
269
+ >>> value = ValueOperator(
270
+ ... module=module,
271
+ ... in_keys=["observation"])
272
+ >>> loss = PPOLoss(actor, value)
273
+ >>> loss.set_keys(sample_log_prob="sampleLogProb")
274
+ >>> _ = loss.select_out_keys("loss_objective")
275
+ >>> batch = [2, ]
276
+ >>> action = spec.rand(batch)
277
+ >>> loss_objective = loss(
278
+ ... observation=torch.randn(*batch, n_obs),
279
+ ... action=action,
280
+ ... sampleLogProb=torch.randn_like(action[..., 1]) / 10,
281
+ ... next_done=torch.zeros(*batch, 1, dtype=torch.bool),
282
+ ... next_terminated=torch.zeros(*batch, 1, dtype=torch.bool),
283
+ ... next_reward=torch.randn(*batch, 1),
284
+ ... next_observation=torch.randn(*batch, n_obs))
285
+ >>> loss_objective.backward()
286
+
287
+ **Simple Entropy Coefficient Examples**:
288
+ >>> # Scalar entropy coefficient (default behavior)
289
+ >>> loss = PPOLoss(actor, critic, entropy_coeff=0.01)
290
+ >>>
291
+ >>> # Per-head entropy coefficients (for composite action spaces)
292
+ >>> entropy_coeff = {
293
+ ... ("agent0", "action_log_prob"): 0.01, # Low exploration
294
+ ... ("agent1", "action_log_prob"): 0.05, # High exploration
295
+ ... }
296
+ >>> loss = PPOLoss(actor, critic, entropy_coeff=entropy_coeff)
297
+
298
+ .. note::
299
+ There is an exception regarding compatibility with non-tensordict-based modules.
300
+ If the actor network is probabilistic and uses a :class:`~tensordict.nn.distributions.CompositeDistribution`,
301
+ this class must be used with tensordicts and cannot function as a tensordict-independent module.
302
+ This is because composite action spaces inherently rely on the structured representation of data provided by
303
+ tensordicts to handle their actions.
304
+
305
+ .. _ppo_entropy_coefficients:
306
+
307
+ .. note::
308
+ **Entropy Bonus and Coefficient Management**
309
+
310
+ The entropy bonus encourages exploration by adding the negative entropy of the policy to the loss.
311
+ This can be configured in two ways:
312
+
313
+ **Scalar Coefficient (Default)**: Use a single coefficient for all action heads:
314
+ >>> loss = PPOLoss(actor, critic, entropy_coeff=0.01)
315
+
316
+ **Per-Head Coefficients**: Use different coefficients for different action components:
317
+ >>> # For a robot with movement and gripper actions
318
+ >>> entropy_coeff = {
319
+ ... ("agent0", "action_log_prob"): 0.01, # Movement: low exploration
320
+ ... ("agent1", "action_log_prob"): 0.05, # Gripper: high exploration
321
+ ... }
322
+ >>> loss = PPOLoss(actor, critic, entropy_coeff=entropy_coeff)
323
+
324
+ **Key Requirements**: When using per-head coefficients, you must provide the full nested key
325
+ path to each action head's log probability (e.g., `("agent0", "action_log_prob")`).
326
+
327
+ **Monitoring Entropy Loss**:
328
+
329
+ When using composite action spaces, the loss output includes:
330
+ - `"entropy"`: Summed entropy across all action heads (for logging)
331
+ - `"composite_entropy"`: Individual entropy values for each action head
332
+ - `"loss_entropy"`: The weighted entropy loss term
333
+
334
+ Example output:
335
+ >>> result = loss(data)
336
+ >>> print(result["entropy"]) # Total entropy: 2.34
337
+ >>> print(result["composite_entropy"]) # Per-head: {"movement": 1.2, "gripper": 1.14}
338
+ >>> print(result["loss_entropy"]) # Weighted loss: -0.0234
339
+
340
+ **Common Issues**:
341
+
342
+ **KeyError: "Missing entropy coeff for head 'head_name'"**:
343
+ - Ensure you provide coefficients for ALL action heads
344
+ - Use full nested keys: `("head_name", "action_log_prob")`
345
+ - Check that your action space structure matches the coefficient mapping
346
+
347
+ **Incorrect Entropy Calculation**:
348
+ - Call `set_composite_lp_aggregate(False).set()` before creating your policy
349
+ - Verify that your action space uses :class:`~tensordict.nn.distributions.CompositeDistribution`
350
+ """
351
+
352
+ @dataclass
353
+ class _AcceptedKeys:
354
+ """Maintains default values for all configurable tensordict keys.
355
+
356
+ This class defines which tensordict keys can be set using '.set_keys(key_name=key_value)' and their
357
+ default values
358
+
359
+ Attributes:
360
+ advantage (NestedKey): The input tensordict key where the advantage is expected.
361
+ Will be used for the underlying value estimator. Defaults to ``"advantage"``.
362
+ value_target (NestedKey): The input tensordict key where the target state value is expected.
363
+ Will be used for the underlying value estimator Defaults to ``"value_target"``.
364
+ value (NestedKey): The input tensordict key where the state value is expected.
365
+ Will be used for the underlying value estimator. Defaults to ``"state_value"``.
366
+ sample_log_prob (NestedKey or list of nested keys): The input tensordict key where the
367
+ sample log probability is expected.
368
+ Defaults to ``"sample_log_prob"`` when :func:`~tensordict.nn.composite_lp_aggregate` returns `True`,
369
+ `"action_log_prob"` otherwise.
370
+ action (NestedKey or list of nested keys): The input tensordict key where the action is expected.
371
+ Defaults to ``"action"``.
372
+ reward (NestedKey or list of nested keys): The input tensordict key where the reward is expected.
373
+ Will be used for the underlying value estimator. Defaults to ``"reward"``.
374
+ done (NestedKey or list of nested keys): The key in the input TensorDict that indicates
375
+ whether a trajectory is done. Will be used for the underlying value estimator.
376
+ Defaults to ``"done"``.
377
+ terminated (NestedKey or list of nested keys): The key in the input TensorDict that indicates
378
+ whether a trajectory is terminated. Will be used for the underlying value estimator.
379
+ Defaults to ``"terminated"``.
380
+ """
381
+
382
+ advantage: NestedKey = "advantage"
383
+ value_target: NestedKey = "value_target"
384
+ value: NestedKey = "state_value"
385
+ sample_log_prob: NestedKey | list[NestedKey] | None = None
386
+ action: NestedKey | list[NestedKey] = "action"
387
+ reward: NestedKey | list[NestedKey] = "reward"
388
+ done: NestedKey | list[NestedKey] = "done"
389
+ terminated: NestedKey | list[NestedKey] = "terminated"
390
+
391
+ def __post_init__(self):
392
+ if self.sample_log_prob is None:
393
+ if composite_lp_aggregate(nowarn=True):
394
+ self.sample_log_prob = "sample_log_prob"
395
+ else:
396
+ self.sample_log_prob = "action_log_prob"
397
+
398
+ default_keys = _AcceptedKeys
399
+ tensor_keys: _AcceptedKeys
400
+ default_value_estimator = ValueEstimators.GAE
401
+
402
+ actor_network: ProbabilisticTensorDictModule
403
+ critic_network: TensorDictModule
404
+ actor_network_params: TensorDictParams
405
+ critic_network_params: TensorDictParams
406
+ target_actor_network_params: TensorDictParams
407
+ target_critic_network_params: TensorDictParams
408
+
409
+ def __init__(
410
+ self,
411
+ actor_network: ProbabilisticTensorDictSequential | None = None,
412
+ critic_network: TensorDictModule | None = None,
413
+ *,
414
+ entropy_bonus: bool = True,
415
+ samples_mc_entropy: int = 1,
416
+ entropy_coeff: float | Mapping[NestedKey, float] | None = None,
417
+ log_explained_variance: bool = True,
418
+ critic_coeff: float | None = None,
419
+ loss_critic_type: str = "smooth_l1",
420
+ normalize_advantage: bool = False,
421
+ normalize_advantage_exclude_dims: tuple[int] = (),
422
+ gamma: float | None = None,
423
+ separate_losses: bool = False,
424
+ advantage_key: str | None = None,
425
+ value_target_key: str | None = None,
426
+ value_key: str | None = None,
427
+ functional: bool = True,
428
+ actor: ProbabilisticTensorDictSequential = None,
429
+ critic: ProbabilisticTensorDictSequential = None,
430
+ reduction: str | None = None,
431
+ clip_value: float | None = None,
432
+ device: torch.device | None = None,
433
+ **kwargs,
434
+ ):
435
+ if actor is not None:
436
+ actor_network = actor
437
+ del actor
438
+ if critic is not None:
439
+ critic_network = critic
440
+ del critic
441
+
442
+ # critic_coef has been removed in v0.11
443
+ if "critic_coef" in kwargs:
444
+ raise TypeError(
445
+ "'critic_coef' has been removed in torchrl v0.11. Please use 'critic_coeff' instead."
446
+ )
447
+
448
+ if critic_coeff is None and critic_network is not None:
449
+ critic_coeff = 1.0
450
+ elif critic_coeff in (None, 0) and critic_network is not None:
451
+ critic_coeff = None
452
+
453
+ if actor_network is None or (
454
+ critic_network is None and critic_coeff not in (None, 0.0)
455
+ ):
456
+ raise TypeError(
457
+ "Missing positional arguments actor_network or critic_network."
458
+ )
459
+ if reduction is None:
460
+ reduction = "mean"
461
+
462
+ self._functional = functional
463
+ self._in_keys = None
464
+ self._out_keys = None
465
+ super().__init__()
466
+ if functional:
467
+ self.convert_to_functional(actor_network, "actor_network")
468
+ else:
469
+ self.actor_network = actor_network
470
+ self.actor_network_params = None
471
+ self.target_actor_network_params = None
472
+
473
+ if separate_losses:
474
+ # we want to make sure there are no duplicates in the params: the
475
+ # params of critic must be refs to actor if they're shared
476
+ policy_params = list(actor_network.parameters())
477
+ else:
478
+ policy_params = None
479
+ if functional and critic_network is not None:
480
+ self.convert_to_functional(
481
+ critic_network, "critic_network", compare_against=policy_params
482
+ )
483
+ else:
484
+ self.critic_network = critic_network
485
+ self.critic_network_params = None
486
+ self.target_critic_network_params = None
487
+
488
+ self.log_explained_variance = log_explained_variance
489
+ self.samples_mc_entropy = samples_mc_entropy
490
+ self.entropy_bonus = entropy_bonus
491
+ self.separate_losses = separate_losses
492
+ self.reduction = reduction
493
+
494
+ if device is None:
495
+ try:
496
+ device = next(self.parameters()).device
497
+ except (AttributeError, StopIteration):
498
+ device = getattr(
499
+ torch, "get_default_device", lambda: torch.device("cpu")
500
+ )()
501
+
502
+ # entropy_coef has been removed in v0.11
503
+ if "entropy_coef" in kwargs:
504
+ raise TypeError(
505
+ "'entropy_coef' has been removed in torchrl v0.11. Please use 'entropy_coeff' instead."
506
+ )
507
+
508
+ # Set default value if None
509
+ if entropy_coeff is None:
510
+ entropy_coeff = 0.01
511
+
512
+ if isinstance(entropy_coeff, Mapping):
513
+ # Store the mapping for per-head coefficients
514
+ self._entropy_coeff_map = {k: float(v) for k, v in entropy_coeff.items()}
515
+ # Register an empty buffer for compatibility
516
+ self.register_buffer("entropy_coeff", torch.tensor(0.0))
517
+ elif isinstance(entropy_coeff, (float, int, torch.Tensor)):
518
+ # Register the scalar entropy coefficient
519
+ coeff = (
520
+ float(entropy_coeff)
521
+ if not torch.is_tensor(entropy_coeff)
522
+ else float(entropy_coeff.item())
523
+ )
524
+ self.register_buffer("entropy_coeff", torch.tensor(coeff))
525
+ self._entropy_coeff_map = None
526
+ else:
527
+ raise TypeError("entropy_coeff must be a float or a Mapping[str, float]")
528
+ if critic_coeff is not None:
529
+ self.register_buffer(
530
+ "critic_coeff", torch.tensor(critic_coeff, device=device)
531
+ )
532
+ else:
533
+ self.critic_coeff = None
534
+ self._has_critic = bool(self.critic_coeff is not None and self.critic_coeff > 0)
535
+ self.loss_critic_type = loss_critic_type
536
+ self.normalize_advantage = normalize_advantage
537
+ self.normalize_advantage_exclude_dims = normalize_advantage_exclude_dims
538
+
539
+ if gamma is not None:
540
+ raise TypeError(_GAMMA_LMBDA_DEPREC_ERROR)
541
+ self._set_deprecated_ctor_keys(
542
+ advantage=advantage_key,
543
+ value_target=value_target_key,
544
+ value=value_key,
545
+ )
546
+
547
+ if clip_value is not None:
548
+ if isinstance(clip_value, float):
549
+ clip_value = torch.tensor(clip_value, device=device)
550
+ elif isinstance(clip_value, torch.Tensor):
551
+ if clip_value.numel() != 1:
552
+ raise ValueError(
553
+ f"clip_value must be a float or a scalar tensor, got {clip_value}."
554
+ )
555
+ else:
556
+ raise ValueError(
557
+ f"clip_value must be a float or a scalar tensor, got {clip_value}."
558
+ )
559
+ self.register_buffer("clip_value", clip_value.to(device))
560
+ else:
561
+ self.clip_value = None
562
+ try:
563
+ log_prob_keys = self.actor_network.log_prob_keys
564
+ action_keys = self.actor_network.dist_sample_keys
565
+ if len(log_prob_keys) > 1:
566
+ self.set_keys(sample_log_prob=log_prob_keys, action=action_keys)
567
+ else:
568
+ self.set_keys(sample_log_prob=log_prob_keys[0], action=action_keys[0])
569
+ except AttributeError:
570
+ pass
571
+
572
+ @property
573
+ def functional(self):
574
+ return self._functional
575
+
576
+ def _set_in_keys(self):
577
+ keys = []
578
+ _maybe_add_or_extend_key(keys, self.actor_network.in_keys)
579
+ _maybe_add_or_extend_key(keys, self.actor_network.in_keys, "next")
580
+ if self.critic_network is not None:
581
+ _maybe_add_or_extend_key(keys, self.critic_network.in_keys)
582
+ _maybe_add_or_extend_key(keys, self.tensor_keys.action)
583
+ _maybe_add_or_extend_key(keys, self.tensor_keys.sample_log_prob)
584
+ _maybe_add_or_extend_key(keys, self.tensor_keys.reward, "next")
585
+ _maybe_add_or_extend_key(keys, self.tensor_keys.done, "next")
586
+ _maybe_add_or_extend_key(keys, self.tensor_keys.terminated, "next")
587
+
588
+ self._in_keys = list(set(keys))
589
+
590
+ @property
591
+ def in_keys(self):
592
+ if self._in_keys is None:
593
+ self._set_in_keys()
594
+ return self._in_keys
595
+
596
+ @in_keys.setter
597
+ def in_keys(self, values):
598
+ self._in_keys = values
599
+
600
+ @property
601
+ def out_keys(self):
602
+ if self._out_keys is None:
603
+ keys = ["loss_objective"]
604
+ if self.entropy_bonus:
605
+ keys.extend(["entropy", "loss_entropy"])
606
+ if self.loss_critic:
607
+ keys.append("loss_critic")
608
+ if self.clip_value:
609
+ keys.append("value_clip_fraction")
610
+ self._out_keys = keys
611
+ return self._out_keys
612
+
613
+ @out_keys.setter
614
+ def out_keys(self, values):
615
+ self._out_keys = values
616
+
617
+ def _forward_value_estimator_keys(self, **kwargs) -> None:
618
+ if hasattr(self, "_value_estimator") and self._value_estimator is not None:
619
+ self._value_estimator.set_keys(
620
+ advantage=self.tensor_keys.advantage,
621
+ value_target=self.tensor_keys.value_target,
622
+ value=self.tensor_keys.value,
623
+ reward=self.tensor_keys.reward,
624
+ done=self.tensor_keys.done,
625
+ terminated=self.tensor_keys.terminated,
626
+ sample_log_prob=self.tensor_keys.sample_log_prob,
627
+ )
628
+ self._set_in_keys()
629
+
630
+ def reset(self) -> None:
631
+ pass
632
+
633
+ def _get_entropy(
634
+ self, dist: d.Distribution, adv_shape: torch.Size
635
+ ) -> torch.Tensor | TensorDict:
636
+ try:
637
+ entropy = dist.entropy()
638
+ if not entropy.isfinite().all():
639
+ del entropy
640
+ if VERBOSE:
641
+ torchrl_logger.info(
642
+ "Entropy is not finite. Using Monte Carlo sampling."
643
+ )
644
+ raise NotImplementedError
645
+ except NotImplementedError:
646
+ if VERBOSE:
647
+ torchrl_logger.warning(
648
+ f"Entropy not implemented for {type(dist)} or is not finite. Using Monte Carlo sampling."
649
+ )
650
+ if getattr(dist, "has_rsample", False):
651
+ x = dist.rsample((self.samples_mc_entropy,))
652
+ else:
653
+ x = dist.sample((self.samples_mc_entropy,))
654
+ with (
655
+ set_composite_lp_aggregate(False)
656
+ if isinstance(dist, CompositeDistribution)
657
+ else contextlib.nullcontext()
658
+ ):
659
+ log_prob = dist.log_prob(x)
660
+ if is_tensor_collection(log_prob):
661
+ if isinstance(self.tensor_keys.sample_log_prob, NestedKey):
662
+ log_prob = log_prob.get(self.tensor_keys.sample_log_prob)
663
+ else:
664
+ log_prob = log_prob.select(*self.tensor_keys.sample_log_prob)
665
+
666
+ entropy = -log_prob.mean(0)
667
+ if is_tensor_collection(entropy) and entropy.batch_size != adv_shape:
668
+ entropy.batch_size = adv_shape
669
+ return entropy.unsqueeze(-1)
670
+
671
+ def _get_cur_log_prob(self, tensordict):
672
+ if isinstance(
673
+ self.actor_network,
674
+ (ProbabilisticTensorDictSequential, ProbabilisticTensorDictModule),
675
+ ) or hasattr(self.actor_network, "get_dist"):
676
+ # assert tensordict['log_probs'].requires_grad
677
+ # assert tensordict['logits'].requires_grad
678
+ with (
679
+ self.actor_network_params.to_module(self.actor_network)
680
+ if self.functional
681
+ else contextlib.nullcontext()
682
+ ):
683
+ dist = self.actor_network.get_dist(tensordict)
684
+ is_composite = isinstance(dist, CompositeDistribution)
685
+
686
+ if is_composite:
687
+ action = tensordict.select(
688
+ *(
689
+ (self.tensor_keys.action,)
690
+ if isinstance(self.tensor_keys.action, NestedKey)
691
+ else self.tensor_keys.action
692
+ )
693
+ )
694
+ else:
695
+ action = _maybe_get_or_select(tensordict, self.tensor_keys.action)
696
+
697
+ if action.requires_grad:
698
+ raise RuntimeError(
699
+ f"tensordict stored {self.tensor_keys.action} requires grad."
700
+ )
701
+ log_prob = dist.log_prob(action)
702
+ else:
703
+ raise NotImplementedError(
704
+ "Only probabilistic modules from tensordict.nn are currently supported. "
705
+ "If you need to implement a custom logic to retrieve the log-probs (to compute "
706
+ "the PPO objective) or the distribution (for the PPO entropy), please augment "
707
+ f"the {type(self).__class__} by implementing your own logic in _get_cur_log_prob."
708
+ )
709
+ # with self.actor_network_params.to_module(
710
+ # self.actor_network
711
+ # ) if self.functional else contextlib.nullcontext():
712
+ # td = self.actor_network(tensordict)
713
+ # log_prob = td.get(self.tensor_keys.sample_log_prob)
714
+ # dist = torch.distributions.Categorical(td.get("logits"))
715
+ # is_composite = False
716
+ return log_prob, dist, is_composite
717
+
718
+ def _log_weight(
719
+ self, tensordict: TensorDictBase, adv_shape: torch.Size
720
+ ) -> tuple[torch.Tensor, d.Distribution, torch.Tensor]:
721
+ prev_log_prob = _maybe_get_or_select(
722
+ tensordict,
723
+ self.tensor_keys.sample_log_prob,
724
+ adv_shape,
725
+ )
726
+ if prev_log_prob is None:
727
+ raise KeyError(
728
+ f"Couldn't find the log-prob {self.tensor_keys.sample_log_prob} in the input data."
729
+ )
730
+ if prev_log_prob.requires_grad:
731
+ raise RuntimeError(
732
+ f"tensordict stored {self.tensor_keys.sample_log_prob} requires grad."
733
+ )
734
+
735
+ log_prob, dist, is_composite = self._get_cur_log_prob(tensordict)
736
+
737
+ if is_composite:
738
+ with set_composite_lp_aggregate(False):
739
+ if not is_tensor_collection(prev_log_prob):
740
+ # this isn't great: in general, multi-head actions should have a composite log-prob too
741
+ warnings.warn(
742
+ "You are using a composite distribution, yet your log-probability is a tensor. "
743
+ "Make sure you have called tensordict.nn.set_composite_lp_aggregate(False).set() at "
744
+ "the beginning of your script to get a proper composite log-prob.",
745
+ category=UserWarning,
746
+ )
747
+
748
+ if is_tensor_collection(log_prob):
749
+ log_prob = _sum_td_features(log_prob)
750
+ log_prob.view_as(prev_log_prob)
751
+ if log_prob.batch_size != adv_shape:
752
+ log_prob.batch_size = adv_shape
753
+ log_weight = (log_prob - prev_log_prob).unsqueeze(-1)
754
+ if is_tensor_collection(log_weight):
755
+ log_weight = _sum_td_features(log_weight)
756
+ log_weight = log_weight.view(adv_shape).unsqueeze(-1)
757
+
758
+ kl_approx = (prev_log_prob - log_prob).unsqueeze(-1)
759
+ if is_tensor_collection(kl_approx):
760
+ kl_approx = _sum_td_features(kl_approx)
761
+
762
+ return log_weight, dist, kl_approx
763
+
764
+ def loss_critic(
765
+ self, tensordict: TensorDictBase
766
+ ) -> tuple[torch.Tensor | TensorDict, ...]:
767
+ """Returns the critic loss multiplied by ``critic_coeff``, if it is not ``None``."""
768
+ # TODO: if the advantage is gathered by forward, this introduces an
769
+ # overhead that we could easily reduce.
770
+ if self.separate_losses:
771
+ tensordict = tensordict.detach()
772
+ target_return = tensordict.get(
773
+ self.tensor_keys.value_target, None
774
+ ) # TODO: None soon to be removed
775
+ if target_return is None:
776
+ raise KeyError(
777
+ f"the key {self.tensor_keys.value_target} was not found in the input tensordict. "
778
+ f"Make sure you provided the right key and the value_target (i.e. the target "
779
+ f"return) has been retrieved accordingly. Advantage classes such as GAE, "
780
+ f"TDLambdaEstimate and TDEstimate all return a 'value_target' entry that "
781
+ f"can be used for the value loss."
782
+ )
783
+
784
+ if self.clip_value:
785
+ old_state_value = tensordict.get(self.tensor_keys.value)
786
+ if old_state_value is None:
787
+ raise KeyError(
788
+ f"clip_value is set to {self.clip_value}, but "
789
+ f"the key {self.tensor_keys.value} was not found in the input tensordict. "
790
+ f"Make sure that the 'value_key' passed to PPO exists in the input tensordict."
791
+ )
792
+
793
+ with (
794
+ self.critic_network_params.to_module(self.critic_network)
795
+ if self.functional
796
+ else contextlib.nullcontext()
797
+ ):
798
+ state_value_td = self.critic_network(tensordict)
799
+
800
+ state_value = state_value_td.get(self.tensor_keys.value)
801
+ if state_value is None:
802
+ raise KeyError(
803
+ f"the key {self.tensor_keys.value} was not found in the critic output tensordict. "
804
+ f"Make sure that the 'value_key' passed to PPO is accurate."
805
+ )
806
+
807
+ loss_value = distance_loss(
808
+ target_return,
809
+ state_value,
810
+ loss_function=self.loss_critic_type,
811
+ )
812
+
813
+ clip_fraction = None
814
+ if self.clip_value:
815
+ loss_value, clip_fraction = _clip_value_loss(
816
+ old_state_value,
817
+ state_value,
818
+ self.clip_value,
819
+ target_return,
820
+ loss_value,
821
+ self.loss_critic_type,
822
+ )
823
+
824
+ explained_variance = None
825
+ if self.log_explained_variance:
826
+ with torch.no_grad(): # <‑‑ break grad‐flow
827
+ tgt = target_return.detach()
828
+ pred = state_value.detach()
829
+ eps = torch.finfo(tgt.dtype).eps
830
+
831
+ resid = torch.var(tgt - pred, correction=0, dim=0)
832
+ total = torch.var(tgt, correction=0, dim=0)
833
+ explained_variance = 1.0 - resid / (total + eps)
834
+
835
+ self._clear_weakrefs(
836
+ tensordict,
837
+ "actor_network_params",
838
+ "critic_network_params",
839
+ "target_actor_network_params",
840
+ "target_critic_network_params",
841
+ )
842
+ if self._has_critic:
843
+ return self.critic_coeff * loss_value, clip_fraction, explained_variance
844
+ return loss_value, clip_fraction, explained_variance
845
+
846
+ @property
847
+ @_cache_values
848
+ def _cached_critic_network_params_detached(self):
849
+ if not self.functional:
850
+ return None
851
+ return self.critic_network_params.detach()
852
+
853
+ @dispatch
854
+ def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
855
+ tensordict = tensordict.clone(False)
856
+ advantage = tensordict.get(self.tensor_keys.advantage, None)
857
+ if advantage is None:
858
+ self.value_estimator(
859
+ tensordict,
860
+ params=self._cached_critic_network_params_detached,
861
+ target_params=self.target_critic_network_params,
862
+ )
863
+ advantage = tensordict.get(self.tensor_keys.advantage)
864
+ if self.normalize_advantage and advantage.numel() > 1:
865
+ if advantage.numel() > tensordict.batch_size.numel() and not len(
866
+ self.normalize_advantage_exclude_dims
867
+ ):
868
+ warnings.warn(
869
+ "You requested advantage normalization and the advantage key has more dimensions"
870
+ " than the tensordict batch. Make sure to pass `normalize_advantage_exclude_dims` "
871
+ "if you want to keep any dimension independent while computing normalization statistics. "
872
+ "If you are working in multi-agent/multi-objective settings this is highly suggested."
873
+ )
874
+ advantage = _standardize(advantage, self.normalize_advantage_exclude_dims)
875
+
876
+ log_weight, dist, kl_approx = self._log_weight(
877
+ tensordict, adv_shape=advantage.shape[:-1]
878
+ )
879
+ neg_loss = log_weight.exp() * advantage
880
+ td_out = TensorDict({"loss_objective": -neg_loss})
881
+ td_out.set("kl_approx", kl_approx.detach().mean()) # for logging
882
+ if self.entropy_bonus:
883
+ entropy = self._get_entropy(dist, adv_shape=advantage.shape[:-1])
884
+ if is_tensor_collection(entropy):
885
+ # Reports the entropy of each action head.
886
+ td_out.set("composite_entropy", entropy.detach())
887
+ td_out.set(
888
+ "entropy", _sum_td_features(entropy).detach().mean()
889
+ ) # for logging
890
+ else:
891
+ td_out.set("entropy", entropy.detach().mean()) # for logging
892
+ td_out.set("loss_entropy", self._weighted_loss_entropy(entropy))
893
+ if self._has_critic:
894
+ loss_critic, value_clip_fraction, explained_variance = self.loss_critic(
895
+ tensordict
896
+ )
897
+ td_out.set("loss_critic", loss_critic)
898
+ if value_clip_fraction is not None:
899
+ td_out.set("value_clip_fraction", value_clip_fraction)
900
+ if explained_variance is not None:
901
+ td_out.set("explained_variance", explained_variance)
902
+ td_out = td_out.named_apply(
903
+ lambda name, value: _reduce(value, reduction=self.reduction).squeeze(-1)
904
+ if name.startswith("loss_")
905
+ else value,
906
+ )
907
+ self._clear_weakrefs(
908
+ tensordict,
909
+ td_out,
910
+ "actor_network_params",
911
+ "critic_network_params",
912
+ "target_actor_network_params",
913
+ "target_critic_network_params",
914
+ )
915
+ return td_out
916
+
917
+ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams):
918
+ if value_type is None:
919
+ value_type = self.default_value_estimator
920
+
921
+ # Handle ValueEstimatorBase instance or class
922
+ if isinstance(value_type, ValueEstimatorBase) or (
923
+ isinstance(value_type, type) and issubclass(value_type, ValueEstimatorBase)
924
+ ):
925
+ return LossModule.make_value_estimator(self, value_type, **hyperparams)
926
+
927
+ self.value_type = value_type
928
+ hp = dict(default_value_kwargs(value_type))
929
+ if hasattr(self, "gamma"):
930
+ hp["gamma"] = self.gamma
931
+ hp.update(hyperparams)
932
+ if value_type == ValueEstimators.TD1:
933
+ self._value_estimator = TD1Estimator(
934
+ value_network=self.critic_network, **hp
935
+ )
936
+ elif value_type == ValueEstimators.TD0:
937
+ self._value_estimator = TD0Estimator(
938
+ value_network=self.critic_network, **hp
939
+ )
940
+ elif value_type == ValueEstimators.GAE:
941
+ self._value_estimator = GAE(value_network=self.critic_network, **hp)
942
+ elif value_type == ValueEstimators.TDLambda:
943
+ self._value_estimator = TDLambdaEstimator(
944
+ value_network=self.critic_network, **hp
945
+ )
946
+ elif value_type == ValueEstimators.VTrace:
947
+ # VTrace currently does not support functional call on the actor
948
+ if self.functional:
949
+ actor_with_params = deepcopy(self.actor_network)
950
+ self.actor_network_params.to_module(actor_with_params)
951
+ else:
952
+ actor_with_params = self.actor_network
953
+ self._value_estimator = VTrace(
954
+ value_network=self.critic_network, actor_network=actor_with_params, **hp
955
+ )
956
+ else:
957
+ raise NotImplementedError(f"Unknown value type {value_type}")
958
+
959
+ tensor_keys = {
960
+ "advantage": self.tensor_keys.advantage,
961
+ "value": self.tensor_keys.value,
962
+ "value_target": self.tensor_keys.value_target,
963
+ "reward": self.tensor_keys.reward,
964
+ "done": self.tensor_keys.done,
965
+ "terminated": self.tensor_keys.terminated,
966
+ "sample_log_prob": self.tensor_keys.sample_log_prob,
967
+ }
968
+ self._value_estimator.set_keys(**tensor_keys)
969
+
970
+ def _weighted_loss_entropy(
971
+ self, entropy: torch.Tensor | TensorDictBase
972
+ ) -> torch.Tensor:
973
+ """Compute the weighted entropy loss.
974
+
975
+ If `self._entropy_coeff_map` is provided, apply per-head entropy coefficients.
976
+ Otherwise, use the scalar `self.entropy_coeff`.
977
+ The entries in self._entropy_coeff_map require the full nested key to the entropy head.
978
+ """
979
+ # Mode 1: Use scalar entropy coefficient (default behavior)
980
+ if self._entropy_coeff_map is None:
981
+ # If entropy is a TensorDict (composite action space), sum all entropy values
982
+ if is_tensor_collection(entropy):
983
+ entropy = _sum_td_features(entropy)
984
+ # Apply scalar coefficient: loss = -coeff * entropy (negative for maximization)
985
+ return -self.entropy_coeff * entropy
986
+
987
+ # Mode 2: Use per-head entropy coefficients (for composite action spaces)
988
+ loss_term = None # Initialize running sum over action heads
989
+ coeff = 0 # Placeholder for coefficient value
990
+ # Iterate through all entropy heads in the composite action space
991
+ for head_name, entropy_head in entropy.items(
992
+ include_nested=True, leaves_only=True
993
+ ):
994
+ try:
995
+ # Look up the coefficient for this specific action head
996
+ coeff = self._entropy_coeff_map[head_name]
997
+ except KeyError as exc:
998
+ # Provide clear error message if coefficient mapping is incomplete
999
+ raise KeyError(f"Missing entropy coeff for head '{head_name}'") from exc
1000
+ # Convert coefficient to tensor with matching dtype and device
1001
+ coeff_t = torch.as_tensor(
1002
+ coeff, dtype=entropy_head.dtype, device=entropy_head.device
1003
+ )
1004
+ # Compute weighted loss for this head: -coeff * entropy
1005
+ head_loss_term = -coeff_t * entropy_head
1006
+ # Accumulate loss terms across all heads
1007
+ loss_term = (
1008
+ head_loss_term if loss_term is None else loss_term + head_loss_term
1009
+ )
1010
+
1011
+ return loss_term
1012
+
1013
+
1014
+ class ClipPPOLoss(PPOLoss):
1015
+ """Clipped PPO loss.
1016
+
1017
+ The clipped importance weighted loss is computed as follows:
1018
+ loss = -min( weight * advantage, min(max(weight, 1-eps), 1+eps) * advantage)
1019
+
1020
+ Args:
1021
+ actor_network (ProbabilisticTensorDictSequential): policy operator.
1022
+ critic_network (ValueOperator): value operator.
1023
+
1024
+ .. note::
1025
+ While this loss module does not enforce any specific model mode (train/eval), it is highly recommended
1026
+ to keep your model in eval mode during RL training to ensure deterministic behavior.
1027
+ A failure to learn due to a train/eval mode mismatch is often observed when the Effective Sample Size (ESS)
1028
+ drops or increases significantly (see note below).
1029
+
1030
+ .. note::
1031
+ The PPO loss exposes a couple of additional metrics that can be used to monitor the training process:
1032
+
1033
+ - The clip fraction is the ratio of the number of clipped weights in the PPO loss (i.e. the ratio of the number of weights that were clipped to the total number of weights).
1034
+ - The Effective Sample Size (ESS) is a measure of the effective number of samples in the batch, computed as the inverse of the sum of the squared importance weights.
1035
+ A value of 1 indicates that the importance weights are all equal to 1 (i.e., the samples are equally weighted).
1036
+ Any value below 1 indicates that the samples are not equally weighted, and the ESS is a measure of the effective number of samples.
1037
+ If the value drops or increases significantly, it often indicates issues with the model configuration (such as a train/eval mode mismatch, or a large policy update).
1038
+
1039
+ Keyword Args:
1040
+ clip_epsilon (scalar, optional): weight clipping threshold in the clipped PPO loss equation.
1041
+ default: 0.2
1042
+ entropy_bonus (bool, optional): if ``True``, an entropy bonus will be added to the
1043
+ loss to favour exploratory policies.
1044
+ samples_mc_entropy (int, optional): if the distribution retrieved from the policy
1045
+ operator does not have a closed form
1046
+ formula for the entropy, a Monte-Carlo estimate will be used.
1047
+ ``samples_mc_entropy`` will control how many
1048
+ samples will be used to compute this estimate.
1049
+ Defaults to ``1``.
1050
+ entropy_coeff: (scalar | Mapping[NestedKey, scalar], optional): entropy multiplier when computing the total loss.
1051
+ * **Scalar**: one value applied to the summed entropy of every action head.
1052
+ * **Mapping** ``{head_name: coeff}`` gives an individual coefficient for each action-head's entropy.
1053
+ Defaults to ``0.01``.
1054
+
1055
+ See :ref:`ppo_entropy_coefficients` for detailed usage examples and troubleshooting.
1056
+ critic_coeff (scalar, optional): critic loss multiplier when computing the total
1057
+ loss. Defaults to ``1.0``. Set ``critic_coeff`` to ``None`` to exclude the value
1058
+ loss from the forward outputs.
1059
+ loss_critic_type (str, optional): loss function for the value discrepancy.
1060
+ Can be one of "l1", "l2" or "smooth_l1". Defaults to ``"smooth_l1"``.
1061
+ normalize_advantage (bool, optional): if ``True``, the advantage will be normalized
1062
+ before being used. Defaults to ``False``.
1063
+ normalize_advantage_exclude_dims (Tuple[int], optional): dimensions to exclude from the advantage
1064
+ standardization. Negative dimensions are valid. This is useful in multiagent (or multiobjective) settings
1065
+ where the agent (or objective) dimension may be excluded from the reductions. Default: ().
1066
+ separate_losses (bool, optional): if ``True``, shared parameters between
1067
+ policy and critic will only be trained on the policy loss.
1068
+ Defaults to ``False``, i.e., gradients are propagated to shared
1069
+ parameters for both policy and critic losses.
1070
+ advantage_key (str, optional): [Deprecated, use set_keys(advantage_key=advantage_key) instead]
1071
+ The input tensordict key where the advantage is
1072
+ expected to be written. Defaults to ``"advantage"``.
1073
+ value_target_key (str, optional): [Deprecated, use set_keys(value_target_key=value_target_key) instead]
1074
+ The input tensordict key where the target state
1075
+ value is expected to be written. Defaults to ``"value_target"``.
1076
+ value_key (str, optional): [Deprecated, use set_keys(value_key) instead]
1077
+ The input tensordict key where the state
1078
+ value is expected to be written. Defaults to ``"state_value"``.
1079
+ functional (bool, optional): whether modules should be functionalized.
1080
+ Functionalizing permits features like meta-RL, but makes it
1081
+ impossible to use distributed models (DDP, FSDP, ...) and comes
1082
+ with a little cost. Defaults to ``True``.
1083
+ reduction (str, optional): Specifies the reduction to apply to the output:
1084
+ ``"none"`` | ``"mean"`` | ``"sum"``. ``"none"``: no reduction will be applied,
1085
+ ``"mean"``: the sum of the output will be divided by the number of
1086
+ elements in the output, ``"sum"``: the output will be summed. Default: ``"mean"``.
1087
+ clip_value (bool or float, optional): If a ``float`` is provided, it will be used to compute a clipped
1088
+ version of the value prediction with respect to the input tensordict value estimate and use it to
1089
+ calculate the value loss. The purpose of clipping is to limit the impact of extreme value predictions,
1090
+ helping stabilize training and preventing large updates. However, it will have no impact if the value
1091
+ estimate was done by the current version of the value estimator. If instead ``True`` is provided, the
1092
+ ``clip_epsilon`` parameter will be used as the clipping threshold. If not provided or ``False``, no
1093
+ clipping will be performed. Defaults to ``False``.
1094
+ device (torch.device, optional): device of the buffers. Defaults to ``None``.
1095
+
1096
+ .. note:: Parameters and buffers from the policy / critic will not be cast to that device to ensure that
1097
+ the storages match the ones that are passed to other components, such as data collectors.
1098
+
1099
+ .. note:
1100
+ The advantage (typically GAE) can be computed by the loss function or
1101
+ in the training loop. The latter option is usually preferred, but this is
1102
+ up to the user to choose which option is to be preferred.
1103
+ If the advantage key (``"advantage`` by default) is not present in the
1104
+ input tensordict, the advantage will be computed by the :meth:`~.forward`
1105
+ method.
1106
+
1107
+ >>> ppo_loss = ClipPPOLoss(actor, critic)
1108
+ >>> advantage = GAE(critic)
1109
+ >>> data = next(datacollector)
1110
+ >>> losses = ppo_loss(data)
1111
+ >>> # equivalent
1112
+ >>> advantage(data)
1113
+ >>> losses = ppo_loss(data)
1114
+
1115
+ A custom advantage module can be built using :meth:`~.make_value_estimator`.
1116
+ The default is :class:`~torchrl.objectives.value.GAE` with hyperparameters
1117
+ dictated by :func:`~torchrl.objectives.utils.default_value_kwargs`.
1118
+
1119
+ >>> ppo_loss = ClipPPOLoss(actor, critic)
1120
+ >>> ppo_loss.make_value_estimator(ValueEstimators.TDLambda)
1121
+ >>> data = next(datacollector)
1122
+ >>> losses = ppo_loss(data)
1123
+
1124
+ .. note::
1125
+ If the actor and the value function share parameters, one can avoid
1126
+ calling the common module multiple times by passing only the head of the
1127
+ value network to the PPO loss module:
1128
+
1129
+ >>> common = SomeModule(in_keys=["observation"], out_keys=["hidden"])
1130
+ >>> actor_head = SomeActor(in_keys=["hidden"])
1131
+ >>> value_head = SomeValue(in_keys=["hidden"])
1132
+ >>> # first option, with 2 calls on the common module
1133
+ >>> model = ActorValueOperator(common, actor_head, value_head)
1134
+ >>> loss_module = ClipPPOLoss(model.get_policy_operator(), model.get_value_operator())
1135
+ >>> # second option, with a single call to the common module
1136
+ >>> loss_module = ClipPPOLoss(ProbabilisticTensorDictSequential(model, actor_head), value_head)
1137
+
1138
+ This will work regardless of whether separate_losses is activated or not.
1139
+
1140
+ """
1141
+
1142
+ actor_network: TensorDictModule
1143
+ critic_network: TensorDictModule
1144
+ actor_network_params: TensorDictParams
1145
+ critic_network_params: TensorDictParams
1146
+ target_actor_network_params: TensorDictParams
1147
+ target_critic_network_params: TensorDictParams
1148
+
1149
+ def __init__(
1150
+ self,
1151
+ actor_network: ProbabilisticTensorDictSequential | None = None,
1152
+ critic_network: TensorDictModule | None = None,
1153
+ *,
1154
+ clip_epsilon: float = 0.2,
1155
+ entropy_bonus: bool = True,
1156
+ samples_mc_entropy: int = 1,
1157
+ entropy_coeff: float | Mapping[NestedKey, float] | None = None,
1158
+ critic_coeff: float | None = None,
1159
+ loss_critic_type: str = "smooth_l1",
1160
+ normalize_advantage: bool = False,
1161
+ normalize_advantage_exclude_dims: tuple[int] = (),
1162
+ gamma: float | None = None,
1163
+ separate_losses: bool = False,
1164
+ reduction: str | None = None,
1165
+ clip_value: bool | float | None = None,
1166
+ device: torch.device | None = None,
1167
+ **kwargs,
1168
+ ):
1169
+ # Define clipping of the value loss
1170
+ if isinstance(clip_value, bool):
1171
+ clip_value = clip_epsilon if clip_value else None
1172
+
1173
+ super().__init__(
1174
+ actor_network,
1175
+ critic_network,
1176
+ entropy_bonus=entropy_bonus,
1177
+ samples_mc_entropy=samples_mc_entropy,
1178
+ entropy_coeff=entropy_coeff,
1179
+ critic_coeff=critic_coeff,
1180
+ loss_critic_type=loss_critic_type,
1181
+ normalize_advantage=normalize_advantage,
1182
+ normalize_advantage_exclude_dims=normalize_advantage_exclude_dims,
1183
+ gamma=gamma,
1184
+ separate_losses=separate_losses,
1185
+ reduction=reduction,
1186
+ clip_value=clip_value,
1187
+ device=device,
1188
+ **kwargs,
1189
+ )
1190
+ if device is None:
1191
+ try:
1192
+ device = next(self.parameters()).device
1193
+ except (AttributeError, StopIteration):
1194
+ device = getattr(
1195
+ torch, "get_default_device", lambda: torch.device("cpu")
1196
+ )()
1197
+ self.register_buffer("clip_epsilon", torch.tensor(clip_epsilon, device=device))
1198
+
1199
+ @property
1200
+ def _clip_bounds(self):
1201
+ return (
1202
+ (-self.clip_epsilon).log1p(),
1203
+ self.clip_epsilon.log1p(),
1204
+ )
1205
+
1206
+ @property
1207
+ def out_keys(self):
1208
+ if self._out_keys is None:
1209
+ keys = ["loss_objective", "clip_fraction"]
1210
+ if self.entropy_bonus:
1211
+ keys.extend(["entropy", "loss_entropy"])
1212
+ if self.loss_critic:
1213
+ keys.append("loss_critic")
1214
+ if self.clip_value:
1215
+ keys.append("value_clip_fraction")
1216
+ keys.append("ESS")
1217
+ self._out_keys = keys
1218
+ return self._out_keys
1219
+
1220
+ @out_keys.setter
1221
+ def out_keys(self, values):
1222
+ self._out_keys = values
1223
+
1224
+ @dispatch
1225
+ def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
1226
+ tensordict = tensordict.clone(False)
1227
+ advantage = tensordict.get(
1228
+ self.tensor_keys.advantage, None, as_padded_tensor=True
1229
+ )
1230
+ if advantage is None:
1231
+ if self.critic_network is None:
1232
+ raise RuntimeError(
1233
+ "Critic network is not specified, cannot compute advantage within forward."
1234
+ )
1235
+ self.value_estimator(
1236
+ tensordict,
1237
+ params=self._cached_critic_network_params_detached,
1238
+ target_params=self.target_critic_network_params,
1239
+ )
1240
+ advantage = tensordict.get(self.tensor_keys.advantage)
1241
+ if self.normalize_advantage and advantage.numel() > 1:
1242
+ if advantage.numel() > tensordict.batch_size.numel() and not len(
1243
+ self.normalize_advantage_exclude_dims
1244
+ ):
1245
+ warnings.warn(
1246
+ "You requested advantage normalization and the advantage key has more dimensions"
1247
+ " than the tensordict batch. Make sure to pass `normalize_advantage_exclude_dims` "
1248
+ "if you want to keep any dimension independent while computing normalization statistics. "
1249
+ "If you are working in multi-agent/multi-objective settings this is highly suggested."
1250
+ )
1251
+ advantage = _standardize(advantage, self.normalize_advantage_exclude_dims)
1252
+
1253
+ log_weight, dist, kl_approx = self._log_weight(
1254
+ tensordict, adv_shape=advantage.shape[:-1]
1255
+ )
1256
+ # ESS for logging
1257
+ with torch.no_grad():
1258
+ # In theory, ESS should be computed on particles sampled from the same source. Here we sample according
1259
+ # to different, unrelated trajectories, which is not standard. Still, it can give an idea of the weights'
1260
+ # dispersion.
1261
+ lw = log_weight.squeeze()
1262
+ ess = (2 * lw.logsumexp(0) - (2 * lw).logsumexp(0)).exp()
1263
+ batch = log_weight.shape[0]
1264
+
1265
+ gain1 = log_weight.exp() * advantage
1266
+
1267
+ log_weight_clip = log_weight.clamp(*self._clip_bounds)
1268
+ clip_fraction = (log_weight_clip != log_weight).to(log_weight.dtype).mean()
1269
+ ratio = log_weight_clip.exp()
1270
+ gain2 = ratio * advantage
1271
+
1272
+ gain = torch.stack([gain1, gain2], -1).min(dim=-1).values
1273
+ td_out = TensorDict({"loss_objective": -gain})
1274
+ td_out.set("clip_fraction", clip_fraction)
1275
+ td_out.set("kl_approx", kl_approx.detach().mean()) # for logging
1276
+
1277
+ if self.entropy_bonus:
1278
+ entropy = self._get_entropy(dist, adv_shape=advantage.shape[:-1])
1279
+ if is_tensor_collection(entropy):
1280
+ # Reports the entropy of each action head.
1281
+ td_out.set("composite_entropy", entropy.detach())
1282
+ td_out.set(
1283
+ "entropy", _sum_td_features(entropy).detach().mean()
1284
+ ) # for logging
1285
+ else:
1286
+ td_out.set("entropy", entropy.detach().mean()) # for logging
1287
+ td_out.set("loss_entropy", self._weighted_loss_entropy(entropy))
1288
+ if self._has_critic:
1289
+ loss_critic, value_clip_fraction, explained_variance = self.loss_critic(
1290
+ tensordict
1291
+ )
1292
+ td_out.set("loss_critic", loss_critic)
1293
+ if value_clip_fraction is not None:
1294
+ td_out.set("value_clip_fraction", value_clip_fraction)
1295
+ if explained_variance is not None:
1296
+ td_out.set("explained_variance", explained_variance)
1297
+
1298
+ td_out.set("ESS", _reduce(ess, self.reduction) / batch)
1299
+ td_out = td_out.named_apply(
1300
+ lambda name, value: _reduce(value, reduction=self.reduction).squeeze(-1)
1301
+ if name.startswith("loss_")
1302
+ else value,
1303
+ )
1304
+ self._clear_weakrefs(
1305
+ tensordict,
1306
+ td_out,
1307
+ "actor_network_params",
1308
+ "critic_network_params",
1309
+ "target_actor_network_params",
1310
+ "target_critic_network_params",
1311
+ )
1312
+ return td_out
1313
+
1314
+
1315
+ class KLPENPPOLoss(PPOLoss):
1316
+ """KL Penalty PPO loss.
1317
+
1318
+ The KL penalty loss has the following formula:
1319
+ loss = loss - beta * KL(old_policy, new_policy)
1320
+ The "beta" parameter is adapted on-the-fly to match a target KL divergence between the new and old policy, thus
1321
+ favouring a certain level of distancing between the two while still preventing them to be too much apart.
1322
+
1323
+ Args:
1324
+ actor_network (ProbabilisticTensorDictSequential): policy operator.
1325
+ critic_network (ValueOperator): value operator.
1326
+
1327
+ Keyword Args:
1328
+ dtarg (scalar, optional): target KL divergence. Defaults to ``0.01``.
1329
+ samples_mc_kl (int, optional): number of samples used to compute the KL divergence
1330
+ if no analytical formula can be found. Defaults to ``1``.
1331
+ beta (scalar, optional): initial KL divergence multiplier.
1332
+ Defaults to ``1.0``.
1333
+ decrement (scalar, optional): how much beta should be decremented if KL < dtarg. Valid range: decrement <= 1.0
1334
+ default: ``0.5``.
1335
+ increment (scalar, optional): how much beta should be incremented if KL > dtarg. Valid range: increment >= 1.0
1336
+ default: ``2.0``.
1337
+ entropy_bonus (bool, optional): if ``True``, an entropy bonus will be added to the
1338
+ loss to favour exploratory policies. Defaults to ``True``.
1339
+ samples_mc_entropy (int, optional): if the distribution retrieved from the policy
1340
+ operator does not have a closed form
1341
+ formula for the entropy, a Monte-Carlo estimate will be used.
1342
+ ``samples_mc_entropy`` will control how many
1343
+ samples will be used to compute this estimate.
1344
+ Defaults to ``1``.
1345
+ entropy_coeff: scalar | Mapping[NestedKey, scalar], optional): entropy multiplier when computing the total loss.
1346
+ * **Scalar**: one value applied to the summed entropy of every action head.
1347
+ * **Mapping** ``{head_name: coeff}`` gives an individual coefficient for each action-head's entropy.
1348
+ Defaults to ``0.01``.
1349
+
1350
+ See :ref:`ppo_entropy_coefficients` for detailed usage examples and troubleshooting.
1351
+ critic_coeff (scalar, optional): critic loss multiplier when computing the total
1352
+ loss. Defaults to ``1.0``.
1353
+ loss_critic_type (str, optional): loss function for the value discrepancy.
1354
+ Can be one of "l1", "l2" or "smooth_l1". Defaults to ``"smooth_l1"``.
1355
+ normalize_advantage (bool, optional): if ``True``, the advantage will be normalized
1356
+ before being used. Defaults to ``False``.
1357
+ normalize_advantage_exclude_dims (Tuple[int], optional): dimensions to exclude from the advantage
1358
+ standardization. Negative dimensions are valid. This is useful in multiagent (or multiobjective) settings
1359
+ where the agent (or objective) dimension may be excluded from the reductions. Default: ().
1360
+ separate_losses (bool, optional): if ``True``, shared parameters between
1361
+ policy and critic will only be trained on the policy loss.
1362
+ Defaults to ``False``, i.e., gradients are propagated to shared
1363
+ parameters for both policy and critic losses.
1364
+ advantage_key (str, optional): [Deprecated, use set_keys(advantage_key=advantage_key) instead]
1365
+ The input tensordict key where the advantage is
1366
+ expected to be written. Defaults to ``"advantage"``.
1367
+ value_target_key (str, optional): [Deprecated, use set_keys(value_target_key=value_target_key) instead]
1368
+ The input tensordict key where the target state
1369
+ value is expected to be written. Defaults to ``"value_target"``.
1370
+ value_key (str, optional): [Deprecated, use set_keys(value_key) instead]
1371
+ The input tensordict key where the state
1372
+ value is expected to be written. Defaults to ``"state_value"``.
1373
+ functional (bool, optional): whether modules should be functionalized.
1374
+ Functionalizing permits features like meta-RL, but makes it
1375
+ impossible to use distributed models (DDP, FSDP, ...) and comes
1376
+ with a little cost. Defaults to ``True``.
1377
+ reduction (str, optional): Specifies the reduction to apply to the output:
1378
+ ``"none"`` | ``"mean"`` | ``"sum"``. ``"none"``: no reduction will be applied,
1379
+ ``"mean"``: the sum of the output will be divided by the number of
1380
+ elements in the output, ``"sum"``: the output will be summed. Default: ``"mean"``.
1381
+ clip_value (:obj:`float`, optional): If provided, it will be used to compute a clipped version of the value
1382
+ prediction with respect to the input tensordict value estimate and use it to calculate the value loss.
1383
+ The purpose of clipping is to limit the impact of extreme value predictions, helping stabilize training
1384
+ and preventing large updates. However, it will have no impact if the value estimate was done by the current
1385
+ version of the value estimator. Defaults to ``None``.
1386
+ device (torch.device, optional): device of the buffers. Defaults to ``None``.
1387
+
1388
+ .. note:: Parameters and buffers from the policy / critic will not be cast to that device to ensure that
1389
+ the storages match the ones that are passed to other components, such as data collectors.
1390
+
1391
+ .. note:
1392
+ The advantage (typically GAE) can be computed by the loss function or
1393
+ in the training loop. The latter option is usually preferred, but this is
1394
+ up to the user to choose which option is to be preferred.
1395
+ If the advantage key (``"advantage`` by default) is not present in the
1396
+ input tensordict, the advantage will be computed by the :meth:`~.forward`
1397
+ method.
1398
+
1399
+ >>> ppo_loss = KLPENPPOLoss(actor, critic)
1400
+ >>> advantage = GAE(critic)
1401
+ >>> data = next(datacollector)
1402
+ >>> losses = ppo_loss(data)
1403
+ >>> # equivalent
1404
+ >>> advantage(data)
1405
+ >>> losses = ppo_loss(data)
1406
+
1407
+ A custom advantage module can be built using :meth:`~.make_value_estimator`.
1408
+ The default is :class:`~torchrl.objectives.value.GAE` with hyperparameters
1409
+ dictated by :func:`~torchrl.objectives.utils.default_value_kwargs`.
1410
+
1411
+ >>> ppo_loss = KLPENPPOLoss(actor, critic)
1412
+ >>> ppo_loss.make_value_estimator(ValueEstimators.TDLambda)
1413
+ >>> data = next(datacollector)
1414
+ >>> losses = ppo_loss(data)
1415
+
1416
+ .. note::
1417
+ If the actor and the value function share parameters, one can avoid
1418
+ calling the common module multiple times by passing only the head of the
1419
+ value network to the PPO loss module:
1420
+
1421
+ >>> common = SomeModule(in_keys=["observation"], out_keys=["hidden"])
1422
+ >>> actor_head = SomeActor(in_keys=["hidden"])
1423
+ >>> value_head = SomeValue(in_keys=["hidden"])
1424
+ >>> # first option, with 2 calls on the common module
1425
+ >>> model = ActorValueOperator(common, actor_head, value_head)
1426
+ >>> loss_module = KLPENPPOLoss(model.get_policy_operator(), model.get_value_operator())
1427
+ >>> # second option, with a single call to the common module
1428
+ >>> loss_module = KLPENPPOLoss(ProbabilisticTensorDictSequential(model, actor_head), value_head)
1429
+
1430
+ This will work regardless of whether separate_losses is activated or not.
1431
+
1432
+ """
1433
+
1434
+ actor_network: TensorDictModule
1435
+ critic_network: TensorDictModule
1436
+ actor_network_params: TensorDictParams
1437
+ critic_network_params: TensorDictParams
1438
+ target_actor_network_params: TensorDictParams
1439
+ target_critic_network_params: TensorDictParams
1440
+
1441
+ def __init__(
1442
+ self,
1443
+ actor_network: ProbabilisticTensorDictSequential | None = None,
1444
+ critic_network: TensorDictModule | None = None,
1445
+ *,
1446
+ dtarg: float = 0.01,
1447
+ beta: float = 1.0,
1448
+ increment: float = 2,
1449
+ decrement: float = 0.5,
1450
+ samples_mc_kl: int = 1,
1451
+ entropy_bonus: bool = True,
1452
+ samples_mc_entropy: int = 1,
1453
+ entropy_coeff: float | Mapping[NestedKey, float] | None = None,
1454
+ critic_coeff: float | None = None,
1455
+ loss_critic_type: str = "smooth_l1",
1456
+ normalize_advantage: bool = False,
1457
+ normalize_advantage_exclude_dims: tuple[int] = (),
1458
+ gamma: float | None = None,
1459
+ separate_losses: bool = False,
1460
+ reduction: str | None = None,
1461
+ clip_value: float | None = None,
1462
+ device: torch.device | None = None,
1463
+ **kwargs,
1464
+ ):
1465
+ super().__init__(
1466
+ actor_network,
1467
+ critic_network,
1468
+ entropy_bonus=entropy_bonus,
1469
+ samples_mc_entropy=samples_mc_entropy,
1470
+ entropy_coeff=entropy_coeff,
1471
+ critic_coeff=critic_coeff,
1472
+ loss_critic_type=loss_critic_type,
1473
+ normalize_advantage=normalize_advantage,
1474
+ normalize_advantage_exclude_dims=normalize_advantage_exclude_dims,
1475
+ gamma=gamma,
1476
+ separate_losses=separate_losses,
1477
+ reduction=reduction,
1478
+ clip_value=clip_value,
1479
+ device=device,
1480
+ **kwargs,
1481
+ )
1482
+
1483
+ if device is None:
1484
+ try:
1485
+ device = next(self.parameters()).device
1486
+ except (AttributeError, StopIteration):
1487
+ device = getattr(
1488
+ torch, "get_default_device", lambda: torch.device("cpu")
1489
+ )()
1490
+
1491
+ self.dtarg = dtarg
1492
+ self._beta_init = beta
1493
+ self.register_buffer("beta", torch.tensor(beta, device=device))
1494
+
1495
+ if increment < 1.0:
1496
+ raise ValueError(
1497
+ f"increment should be >= 1.0 in KLPENPPOLoss, got {increment:4.4f}"
1498
+ )
1499
+ self.increment = increment
1500
+ if decrement > 1.0:
1501
+ raise ValueError(
1502
+ f"decrement should be <= 1.0 in KLPENPPOLoss, got {decrement:4.4f}"
1503
+ )
1504
+ self.decrement = decrement
1505
+ self.samples_mc_kl = samples_mc_kl
1506
+
1507
+ def _set_in_keys(self):
1508
+ keys = []
1509
+ _maybe_add_or_extend_key(keys, self.actor_network.in_keys)
1510
+ _maybe_add_or_extend_key(keys, self.actor_network.in_keys, "next")
1511
+ if self.critic_network is not None:
1512
+ _maybe_add_or_extend_key(keys, self.critic_network.in_keys)
1513
+ _maybe_add_or_extend_key(keys, self.tensor_keys.action)
1514
+ _maybe_add_or_extend_key(keys, self.tensor_keys.sample_log_prob)
1515
+ _maybe_add_or_extend_key(keys, self.tensor_keys.reward, "next")
1516
+ _maybe_add_or_extend_key(keys, self.tensor_keys.done, "next")
1517
+ _maybe_add_or_extend_key(keys, self.tensor_keys.terminated, "next")
1518
+
1519
+ # Get the parameter keys from the actor dist
1520
+ actor_dist_module = None
1521
+ for module in self.actor_network.modules():
1522
+ # Ideally we should combine them if there is more than one
1523
+ if isinstance(module, ProbabilisticTensorDictModule):
1524
+ if actor_dist_module is not None:
1525
+ raise RuntimeError(
1526
+ "Actors with one and only one distribution are currently supported "
1527
+ f"in {type(self).__name__}. If you need to use more than one "
1528
+ f"distributions over the action space please submit an issue "
1529
+ f"on github."
1530
+ )
1531
+ actor_dist_module = module
1532
+ if actor_dist_module is None:
1533
+ raise RuntimeError("Could not find the probabilistic module in the actor.")
1534
+ keys += list(actor_dist_module.in_keys)
1535
+ self._in_keys = list(set(keys))
1536
+
1537
+ @property
1538
+ def out_keys(self):
1539
+ if self._out_keys is None:
1540
+ keys = ["loss_objective", "kl"]
1541
+ if self.entropy_bonus:
1542
+ keys.extend(["entropy", "loss_entropy"])
1543
+ if self.loss_critic:
1544
+ keys.append("loss_critic")
1545
+ if self.clip_value:
1546
+ keys.append("value_clip_fraction")
1547
+ self._out_keys = keys
1548
+ return self._out_keys
1549
+
1550
+ @out_keys.setter
1551
+ def out_keys(self, values):
1552
+ self._out_keys = values
1553
+
1554
+ @dispatch
1555
+ def forward(self, tensordict: TensorDictBase) -> TensorDict:
1556
+ tensordict_copy = tensordict.copy()
1557
+ try:
1558
+ previous_dist = self.actor_network.build_dist_from_params(tensordict)
1559
+ except KeyError as err:
1560
+ raise KeyError(
1561
+ "The parameters of the distribution were not found. "
1562
+ f"Make sure they are provided to {type(self).__name__}."
1563
+ ) from err
1564
+ advantage = tensordict_copy.get(self.tensor_keys.advantage, None)
1565
+ if advantage is None:
1566
+ self.value_estimator(
1567
+ tensordict_copy,
1568
+ params=self._cached_critic_network_params_detached,
1569
+ target_params=self.target_critic_network_params,
1570
+ )
1571
+ advantage = tensordict_copy.get(self.tensor_keys.advantage)
1572
+ if self.normalize_advantage and advantage.numel() > 1:
1573
+ if advantage.numel() > tensordict.batch_size.numel() and not len(
1574
+ self.normalize_advantage_exclude_dims
1575
+ ):
1576
+ warnings.warn(
1577
+ "You requested advantage normalization and the advantage key has more dimensions"
1578
+ " than the tensordict batch. Make sure to pass `normalize_advantage_exclude_dims` "
1579
+ "if you want to keep any dimension independent while computing normalization statistics. "
1580
+ "If you are working in multi-agent/multi-objective settings this is highly suggested."
1581
+ )
1582
+ advantage = _standardize(advantage, self.normalize_advantage_exclude_dims)
1583
+
1584
+ log_weight, dist, kl_approx = self._log_weight(
1585
+ tensordict_copy, adv_shape=advantage.shape[:-1]
1586
+ )
1587
+ neg_loss = log_weight.exp() * advantage
1588
+
1589
+ with (
1590
+ self.actor_network_params.to_module(self.actor_network)
1591
+ if self.functional
1592
+ else contextlib.nullcontext()
1593
+ ):
1594
+ current_dist = self.actor_network.get_dist(tensordict_copy)
1595
+ is_composite = isinstance(current_dist, CompositeDistribution)
1596
+ try:
1597
+ kl = torch.distributions.kl.kl_divergence(previous_dist, current_dist)
1598
+ except NotImplementedError:
1599
+ x = previous_dist.sample((self.samples_mc_kl,))
1600
+ with (
1601
+ set_composite_lp_aggregate(False)
1602
+ if is_composite
1603
+ else contextlib.nullcontext()
1604
+ ):
1605
+ previous_log_prob = previous_dist.log_prob(x)
1606
+ current_log_prob = current_dist.log_prob(x)
1607
+ if is_tensor_collection(previous_log_prob):
1608
+ if previous_log_prob.batch_size != advantage.shape[:-1]:
1609
+ previous_log_prob.batch_size = (
1610
+ self.samples_mc_kl,
1611
+ ) + advantage.shape[:-1]
1612
+ current_log_prob.batch_size = (
1613
+ self.samples_mc_kl,
1614
+ ) + advantage.shape[:-1]
1615
+ previous_log_prob = _sum_td_features(previous_log_prob)
1616
+ # Both dists have presumably the same params
1617
+ current_log_prob = _sum_td_features(current_log_prob)
1618
+ kl = (previous_log_prob - current_log_prob).mean(0)
1619
+ kl = kl.unsqueeze(-1)
1620
+ neg_loss = neg_loss - self.beta * kl
1621
+ if kl.mean() > self.dtarg * 1.5:
1622
+ self.beta.data *= self.increment
1623
+ elif kl.mean() < self.dtarg / 1.5:
1624
+ self.beta.data *= self.decrement
1625
+ td_out = TensorDict(
1626
+ {
1627
+ "loss_objective": -neg_loss,
1628
+ "kl": kl.detach(),
1629
+ "kl_approx": kl_approx.detach().mean(),
1630
+ },
1631
+ )
1632
+
1633
+ if self.entropy_bonus:
1634
+ entropy = self._get_entropy(dist, adv_shape=advantage.shape[:-1])
1635
+ if is_tensor_collection(entropy):
1636
+ # Reports the entropy of each action head.
1637
+ td_out.set("composite_entropy", entropy.detach())
1638
+ td_out.set(
1639
+ "entropy", _sum_td_features(entropy).detach().mean()
1640
+ ) # for logging
1641
+ else:
1642
+ td_out.set("entropy", entropy.detach().mean()) # for logging
1643
+ td_out.set("loss_entropy", self._weighted_loss_entropy(entropy))
1644
+ if self._has_critic:
1645
+ loss_critic, value_clip_fraction, explained_variance = self.loss_critic(
1646
+ tensordict_copy
1647
+ )
1648
+ td_out.set("loss_critic", loss_critic)
1649
+ if value_clip_fraction is not None:
1650
+ td_out.set("value_clip_fraction", value_clip_fraction)
1651
+ if explained_variance is not None:
1652
+ td_out.set("explained_variance", explained_variance)
1653
+ td_out = td_out.named_apply(
1654
+ lambda name, value: _reduce(value, reduction=self.reduction).squeeze(-1)
1655
+ if name.startswith("loss_")
1656
+ else value,
1657
+ )
1658
+ self._clear_weakrefs(
1659
+ tensordict,
1660
+ td_out,
1661
+ "actor_network_params",
1662
+ "critic_network_params",
1663
+ "target_actor_network_params",
1664
+ "target_critic_network_params",
1665
+ )
1666
+ return td_out
1667
+
1668
+ def reset(self) -> None:
1669
+ self.beta = self._beta_init