torchrl 0.11.0__cp314-cp314t-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (394) hide show
  1. benchmarks/benchmark_batched_envs.py +104 -0
  2. benchmarks/conftest.py +91 -0
  3. benchmarks/ecosystem/gym_env_throughput.py +321 -0
  4. benchmarks/ecosystem/vmas_rllib_vs_torchrl_sampling_performance.py +231 -0
  5. benchmarks/requirements.txt +7 -0
  6. benchmarks/storage/benchmark_sample_latency_over_rpc.py +193 -0
  7. benchmarks/test_collectors_benchmark.py +240 -0
  8. benchmarks/test_compressed_storage_benchmark.py +145 -0
  9. benchmarks/test_envs_benchmark.py +133 -0
  10. benchmarks/test_llm.py +101 -0
  11. benchmarks/test_non_tensor_env_benchmark.py +70 -0
  12. benchmarks/test_objectives_benchmarks.py +1199 -0
  13. benchmarks/test_replaybuffer_benchmark.py +254 -0
  14. sota-check/README.md +35 -0
  15. sota-implementations/README.md +142 -0
  16. sota-implementations/a2c/README.md +39 -0
  17. sota-implementations/a2c/a2c_atari.py +291 -0
  18. sota-implementations/a2c/a2c_mujoco.py +273 -0
  19. sota-implementations/a2c/utils_atari.py +240 -0
  20. sota-implementations/a2c/utils_mujoco.py +160 -0
  21. sota-implementations/bandits/README.md +7 -0
  22. sota-implementations/bandits/dqn.py +126 -0
  23. sota-implementations/cql/cql_offline.py +198 -0
  24. sota-implementations/cql/cql_online.py +249 -0
  25. sota-implementations/cql/discrete_cql_offline.py +180 -0
  26. sota-implementations/cql/discrete_cql_online.py +227 -0
  27. sota-implementations/cql/utils.py +471 -0
  28. sota-implementations/crossq/crossq.py +271 -0
  29. sota-implementations/crossq/utils.py +320 -0
  30. sota-implementations/ddpg/ddpg.py +231 -0
  31. sota-implementations/ddpg/utils.py +325 -0
  32. sota-implementations/decision_transformer/dt.py +163 -0
  33. sota-implementations/decision_transformer/lamb.py +167 -0
  34. sota-implementations/decision_transformer/online_dt.py +178 -0
  35. sota-implementations/decision_transformer/utils.py +562 -0
  36. sota-implementations/discrete_sac/discrete_sac.py +243 -0
  37. sota-implementations/discrete_sac/utils.py +324 -0
  38. sota-implementations/dqn/README.md +30 -0
  39. sota-implementations/dqn/dqn_atari.py +272 -0
  40. sota-implementations/dqn/dqn_cartpole.py +236 -0
  41. sota-implementations/dqn/utils_atari.py +132 -0
  42. sota-implementations/dqn/utils_cartpole.py +90 -0
  43. sota-implementations/dreamer/README.md +129 -0
  44. sota-implementations/dreamer/dreamer.py +586 -0
  45. sota-implementations/dreamer/dreamer_utils.py +1107 -0
  46. sota-implementations/expert-iteration/README.md +352 -0
  47. sota-implementations/expert-iteration/ei_utils.py +770 -0
  48. sota-implementations/expert-iteration/expert-iteration-async.py +512 -0
  49. sota-implementations/expert-iteration/expert-iteration-sync.py +508 -0
  50. sota-implementations/expert-iteration/requirements_gsm8k.txt +13 -0
  51. sota-implementations/expert-iteration/requirements_ifeval.txt +16 -0
  52. sota-implementations/gail/gail.py +327 -0
  53. sota-implementations/gail/gail_utils.py +68 -0
  54. sota-implementations/gail/ppo_utils.py +157 -0
  55. sota-implementations/grpo/README.md +273 -0
  56. sota-implementations/grpo/grpo-async.py +437 -0
  57. sota-implementations/grpo/grpo-sync.py +435 -0
  58. sota-implementations/grpo/grpo_utils.py +843 -0
  59. sota-implementations/grpo/requirements_gsm8k.txt +11 -0
  60. sota-implementations/grpo/requirements_ifeval.txt +16 -0
  61. sota-implementations/impala/README.md +33 -0
  62. sota-implementations/impala/impala_multi_node_ray.py +292 -0
  63. sota-implementations/impala/impala_multi_node_submitit.py +284 -0
  64. sota-implementations/impala/impala_single_node.py +261 -0
  65. sota-implementations/impala/utils.py +184 -0
  66. sota-implementations/iql/discrete_iql.py +230 -0
  67. sota-implementations/iql/iql_offline.py +164 -0
  68. sota-implementations/iql/iql_online.py +225 -0
  69. sota-implementations/iql/utils.py +437 -0
  70. sota-implementations/multiagent/README.md +74 -0
  71. sota-implementations/multiagent/iql.py +237 -0
  72. sota-implementations/multiagent/maddpg_iddpg.py +266 -0
  73. sota-implementations/multiagent/mappo_ippo.py +267 -0
  74. sota-implementations/multiagent/qmix_vdn.py +271 -0
  75. sota-implementations/multiagent/sac.py +337 -0
  76. sota-implementations/multiagent/utils/__init__.py +4 -0
  77. sota-implementations/multiagent/utils/logging.py +151 -0
  78. sota-implementations/multiagent/utils/utils.py +43 -0
  79. sota-implementations/ppo/README.md +29 -0
  80. sota-implementations/ppo/ppo_atari.py +305 -0
  81. sota-implementations/ppo/ppo_mujoco.py +293 -0
  82. sota-implementations/ppo/utils_atari.py +238 -0
  83. sota-implementations/ppo/utils_mujoco.py +152 -0
  84. sota-implementations/ppo_trainer/train.py +21 -0
  85. sota-implementations/redq/README.md +7 -0
  86. sota-implementations/redq/redq.py +199 -0
  87. sota-implementations/redq/utils.py +1060 -0
  88. sota-implementations/sac/sac-async.py +266 -0
  89. sota-implementations/sac/sac.py +239 -0
  90. sota-implementations/sac/utils.py +381 -0
  91. sota-implementations/sac_trainer/train.py +16 -0
  92. sota-implementations/td3/td3.py +254 -0
  93. sota-implementations/td3/utils.py +319 -0
  94. sota-implementations/td3_bc/td3_bc.py +177 -0
  95. sota-implementations/td3_bc/utils.py +251 -0
  96. torchrl/__init__.py +144 -0
  97. torchrl/_extension.py +74 -0
  98. torchrl/_torchrl.cp314t-win_amd64.pyd +0 -0
  99. torchrl/_utils.py +1431 -0
  100. torchrl/collectors/__init__.py +48 -0
  101. torchrl/collectors/_base.py +1058 -0
  102. torchrl/collectors/_constants.py +88 -0
  103. torchrl/collectors/_multi_async.py +324 -0
  104. torchrl/collectors/_multi_base.py +1805 -0
  105. torchrl/collectors/_multi_sync.py +464 -0
  106. torchrl/collectors/_runner.py +581 -0
  107. torchrl/collectors/_single.py +2009 -0
  108. torchrl/collectors/_single_async.py +259 -0
  109. torchrl/collectors/collectors.py +62 -0
  110. torchrl/collectors/distributed/__init__.py +32 -0
  111. torchrl/collectors/distributed/default_configs.py +133 -0
  112. torchrl/collectors/distributed/generic.py +1306 -0
  113. torchrl/collectors/distributed/ray.py +1092 -0
  114. torchrl/collectors/distributed/rpc.py +1006 -0
  115. torchrl/collectors/distributed/sync.py +731 -0
  116. torchrl/collectors/distributed/utils.py +160 -0
  117. torchrl/collectors/llm/__init__.py +10 -0
  118. torchrl/collectors/llm/base.py +494 -0
  119. torchrl/collectors/llm/ray_collector.py +275 -0
  120. torchrl/collectors/llm/utils.py +36 -0
  121. torchrl/collectors/llm/weight_update/__init__.py +10 -0
  122. torchrl/collectors/llm/weight_update/vllm.py +348 -0
  123. torchrl/collectors/llm/weight_update/vllm_v2.py +311 -0
  124. torchrl/collectors/utils.py +433 -0
  125. torchrl/collectors/weight_update.py +591 -0
  126. torchrl/csrc/numpy_utils.h +38 -0
  127. torchrl/csrc/pybind.cpp +27 -0
  128. torchrl/csrc/segment_tree.h +458 -0
  129. torchrl/csrc/torch_utils.h +34 -0
  130. torchrl/csrc/utils.cpp +48 -0
  131. torchrl/csrc/utils.h +31 -0
  132. torchrl/data/__init__.py +187 -0
  133. torchrl/data/datasets/__init__.py +58 -0
  134. torchrl/data/datasets/atari_dqn.py +878 -0
  135. torchrl/data/datasets/common.py +281 -0
  136. torchrl/data/datasets/d4rl.py +489 -0
  137. torchrl/data/datasets/d4rl_infos.py +187 -0
  138. torchrl/data/datasets/gen_dgrl.py +375 -0
  139. torchrl/data/datasets/minari_data.py +643 -0
  140. torchrl/data/datasets/openml.py +177 -0
  141. torchrl/data/datasets/openx.py +798 -0
  142. torchrl/data/datasets/roboset.py +363 -0
  143. torchrl/data/datasets/utils.py +11 -0
  144. torchrl/data/datasets/vd4rl.py +432 -0
  145. torchrl/data/llm/__init__.py +34 -0
  146. torchrl/data/llm/dataset.py +491 -0
  147. torchrl/data/llm/history.py +1378 -0
  148. torchrl/data/llm/prompt.py +198 -0
  149. torchrl/data/llm/reward.py +225 -0
  150. torchrl/data/llm/topk.py +186 -0
  151. torchrl/data/llm/utils.py +543 -0
  152. torchrl/data/map/__init__.py +21 -0
  153. torchrl/data/map/hash.py +185 -0
  154. torchrl/data/map/query.py +204 -0
  155. torchrl/data/map/tdstorage.py +363 -0
  156. torchrl/data/map/tree.py +1434 -0
  157. torchrl/data/map/utils.py +103 -0
  158. torchrl/data/postprocs/__init__.py +8 -0
  159. torchrl/data/postprocs/postprocs.py +391 -0
  160. torchrl/data/replay_buffers/__init__.py +99 -0
  161. torchrl/data/replay_buffers/checkpointers.py +622 -0
  162. torchrl/data/replay_buffers/ray_buffer.py +292 -0
  163. torchrl/data/replay_buffers/replay_buffers.py +2376 -0
  164. torchrl/data/replay_buffers/samplers.py +2578 -0
  165. torchrl/data/replay_buffers/scheduler.py +265 -0
  166. torchrl/data/replay_buffers/storages.py +2412 -0
  167. torchrl/data/replay_buffers/utils.py +1042 -0
  168. torchrl/data/replay_buffers/writers.py +781 -0
  169. torchrl/data/tensor_specs.py +7101 -0
  170. torchrl/data/utils.py +334 -0
  171. torchrl/envs/__init__.py +265 -0
  172. torchrl/envs/async_envs.py +1105 -0
  173. torchrl/envs/batched_envs.py +3093 -0
  174. torchrl/envs/common.py +4241 -0
  175. torchrl/envs/custom/__init__.py +11 -0
  176. torchrl/envs/custom/chess.py +617 -0
  177. torchrl/envs/custom/llm.py +214 -0
  178. torchrl/envs/custom/pendulum.py +401 -0
  179. torchrl/envs/custom/san_moves.txt +29274 -0
  180. torchrl/envs/custom/tictactoeenv.py +288 -0
  181. torchrl/envs/env_creator.py +263 -0
  182. torchrl/envs/gym_like.py +752 -0
  183. torchrl/envs/libs/__init__.py +68 -0
  184. torchrl/envs/libs/_gym_utils.py +326 -0
  185. torchrl/envs/libs/brax.py +846 -0
  186. torchrl/envs/libs/dm_control.py +544 -0
  187. torchrl/envs/libs/envpool.py +447 -0
  188. torchrl/envs/libs/gym.py +2239 -0
  189. torchrl/envs/libs/habitat.py +138 -0
  190. torchrl/envs/libs/isaac_lab.py +87 -0
  191. torchrl/envs/libs/isaacgym.py +203 -0
  192. torchrl/envs/libs/jax_utils.py +166 -0
  193. torchrl/envs/libs/jumanji.py +963 -0
  194. torchrl/envs/libs/meltingpot.py +599 -0
  195. torchrl/envs/libs/openml.py +153 -0
  196. torchrl/envs/libs/openspiel.py +652 -0
  197. torchrl/envs/libs/pettingzoo.py +1042 -0
  198. torchrl/envs/libs/procgen.py +351 -0
  199. torchrl/envs/libs/robohive.py +429 -0
  200. torchrl/envs/libs/smacv2.py +645 -0
  201. torchrl/envs/libs/unity_mlagents.py +891 -0
  202. torchrl/envs/libs/utils.py +147 -0
  203. torchrl/envs/libs/vmas.py +813 -0
  204. torchrl/envs/llm/__init__.py +63 -0
  205. torchrl/envs/llm/chat.py +730 -0
  206. torchrl/envs/llm/datasets/README.md +4 -0
  207. torchrl/envs/llm/datasets/__init__.py +17 -0
  208. torchrl/envs/llm/datasets/gsm8k.py +353 -0
  209. torchrl/envs/llm/datasets/ifeval.py +274 -0
  210. torchrl/envs/llm/envs.py +789 -0
  211. torchrl/envs/llm/libs/README.md +3 -0
  212. torchrl/envs/llm/libs/__init__.py +8 -0
  213. torchrl/envs/llm/libs/mlgym.py +869 -0
  214. torchrl/envs/llm/reward/__init__.py +10 -0
  215. torchrl/envs/llm/reward/gsm8k.py +324 -0
  216. torchrl/envs/llm/reward/ifeval/README.md +13 -0
  217. torchrl/envs/llm/reward/ifeval/__init__.py +10 -0
  218. torchrl/envs/llm/reward/ifeval/_instructions.py +1667 -0
  219. torchrl/envs/llm/reward/ifeval/_instructions_main.py +131 -0
  220. torchrl/envs/llm/reward/ifeval/_instructions_registry.py +100 -0
  221. torchrl/envs/llm/reward/ifeval/_instructions_util.py +1677 -0
  222. torchrl/envs/llm/reward/ifeval/_scorer.py +454 -0
  223. torchrl/envs/llm/transforms/__init__.py +55 -0
  224. torchrl/envs/llm/transforms/browser.py +292 -0
  225. torchrl/envs/llm/transforms/dataloading.py +859 -0
  226. torchrl/envs/llm/transforms/format.py +73 -0
  227. torchrl/envs/llm/transforms/kl.py +1544 -0
  228. torchrl/envs/llm/transforms/policy_version.py +189 -0
  229. torchrl/envs/llm/transforms/reason.py +323 -0
  230. torchrl/envs/llm/transforms/tokenizer.py +321 -0
  231. torchrl/envs/llm/transforms/tools.py +1955 -0
  232. torchrl/envs/model_based/__init__.py +9 -0
  233. torchrl/envs/model_based/common.py +180 -0
  234. torchrl/envs/model_based/dreamer.py +112 -0
  235. torchrl/envs/transforms/__init__.py +147 -0
  236. torchrl/envs/transforms/functional.py +48 -0
  237. torchrl/envs/transforms/gym_transforms.py +203 -0
  238. torchrl/envs/transforms/module.py +341 -0
  239. torchrl/envs/transforms/r3m.py +372 -0
  240. torchrl/envs/transforms/ray_service.py +663 -0
  241. torchrl/envs/transforms/rb_transforms.py +214 -0
  242. torchrl/envs/transforms/transforms.py +11835 -0
  243. torchrl/envs/transforms/utils.py +94 -0
  244. torchrl/envs/transforms/vc1.py +307 -0
  245. torchrl/envs/transforms/vecnorm.py +845 -0
  246. torchrl/envs/transforms/vip.py +407 -0
  247. torchrl/envs/utils.py +1718 -0
  248. torchrl/envs/vec_envs.py +11 -0
  249. torchrl/modules/__init__.py +206 -0
  250. torchrl/modules/distributions/__init__.py +73 -0
  251. torchrl/modules/distributions/continuous.py +830 -0
  252. torchrl/modules/distributions/discrete.py +908 -0
  253. torchrl/modules/distributions/truncated_normal.py +187 -0
  254. torchrl/modules/distributions/utils.py +233 -0
  255. torchrl/modules/llm/__init__.py +62 -0
  256. torchrl/modules/llm/backends/__init__.py +65 -0
  257. torchrl/modules/llm/backends/vllm/__init__.py +94 -0
  258. torchrl/modules/llm/backends/vllm/_models.py +46 -0
  259. torchrl/modules/llm/backends/vllm/base.py +72 -0
  260. torchrl/modules/llm/backends/vllm/vllm_async.py +2075 -0
  261. torchrl/modules/llm/backends/vllm/vllm_plugin.py +22 -0
  262. torchrl/modules/llm/backends/vllm/vllm_sync.py +446 -0
  263. torchrl/modules/llm/backends/vllm/vllm_utils.py +129 -0
  264. torchrl/modules/llm/policies/__init__.py +28 -0
  265. torchrl/modules/llm/policies/common.py +1809 -0
  266. torchrl/modules/llm/policies/transformers_wrapper.py +2756 -0
  267. torchrl/modules/llm/policies/vllm_wrapper.py +2241 -0
  268. torchrl/modules/llm/utils.py +23 -0
  269. torchrl/modules/mcts/__init__.py +21 -0
  270. torchrl/modules/mcts/scores.py +579 -0
  271. torchrl/modules/models/__init__.py +86 -0
  272. torchrl/modules/models/batchrenorm.py +119 -0
  273. torchrl/modules/models/decision_transformer.py +179 -0
  274. torchrl/modules/models/exploration.py +731 -0
  275. torchrl/modules/models/llm.py +156 -0
  276. torchrl/modules/models/model_based.py +596 -0
  277. torchrl/modules/models/models.py +1712 -0
  278. torchrl/modules/models/multiagent.py +1067 -0
  279. torchrl/modules/models/recipes/impala.py +185 -0
  280. torchrl/modules/models/utils.py +162 -0
  281. torchrl/modules/planners/__init__.py +10 -0
  282. torchrl/modules/planners/cem.py +228 -0
  283. torchrl/modules/planners/common.py +73 -0
  284. torchrl/modules/planners/mppi.py +265 -0
  285. torchrl/modules/tensordict_module/__init__.py +89 -0
  286. torchrl/modules/tensordict_module/actors.py +2457 -0
  287. torchrl/modules/tensordict_module/common.py +529 -0
  288. torchrl/modules/tensordict_module/exploration.py +814 -0
  289. torchrl/modules/tensordict_module/probabilistic.py +321 -0
  290. torchrl/modules/tensordict_module/rnn.py +1639 -0
  291. torchrl/modules/tensordict_module/sequence.py +132 -0
  292. torchrl/modules/tensordict_module/world_models.py +34 -0
  293. torchrl/modules/utils/__init__.py +38 -0
  294. torchrl/modules/utils/mappings.py +9 -0
  295. torchrl/modules/utils/utils.py +89 -0
  296. torchrl/objectives/__init__.py +78 -0
  297. torchrl/objectives/a2c.py +659 -0
  298. torchrl/objectives/common.py +753 -0
  299. torchrl/objectives/cql.py +1346 -0
  300. torchrl/objectives/crossq.py +710 -0
  301. torchrl/objectives/ddpg.py +453 -0
  302. torchrl/objectives/decision_transformer.py +371 -0
  303. torchrl/objectives/deprecated.py +516 -0
  304. torchrl/objectives/dqn.py +683 -0
  305. torchrl/objectives/dreamer.py +488 -0
  306. torchrl/objectives/functional.py +48 -0
  307. torchrl/objectives/gail.py +258 -0
  308. torchrl/objectives/iql.py +996 -0
  309. torchrl/objectives/llm/__init__.py +30 -0
  310. torchrl/objectives/llm/grpo.py +846 -0
  311. torchrl/objectives/llm/sft.py +482 -0
  312. torchrl/objectives/multiagent/__init__.py +8 -0
  313. torchrl/objectives/multiagent/qmixer.py +396 -0
  314. torchrl/objectives/ppo.py +1669 -0
  315. torchrl/objectives/redq.py +683 -0
  316. torchrl/objectives/reinforce.py +530 -0
  317. torchrl/objectives/sac.py +1580 -0
  318. torchrl/objectives/td3.py +570 -0
  319. torchrl/objectives/td3_bc.py +625 -0
  320. torchrl/objectives/utils.py +782 -0
  321. torchrl/objectives/value/__init__.py +28 -0
  322. torchrl/objectives/value/advantages.py +1956 -0
  323. torchrl/objectives/value/functional.py +1459 -0
  324. torchrl/objectives/value/utils.py +360 -0
  325. torchrl/record/__init__.py +17 -0
  326. torchrl/record/loggers/__init__.py +23 -0
  327. torchrl/record/loggers/common.py +48 -0
  328. torchrl/record/loggers/csv.py +226 -0
  329. torchrl/record/loggers/mlflow.py +142 -0
  330. torchrl/record/loggers/tensorboard.py +139 -0
  331. torchrl/record/loggers/trackio.py +163 -0
  332. torchrl/record/loggers/utils.py +78 -0
  333. torchrl/record/loggers/wandb.py +214 -0
  334. torchrl/record/recorder.py +554 -0
  335. torchrl/services/__init__.py +79 -0
  336. torchrl/services/base.py +109 -0
  337. torchrl/services/ray_service.py +453 -0
  338. torchrl/testing/__init__.py +107 -0
  339. torchrl/testing/assertions.py +179 -0
  340. torchrl/testing/dist_utils.py +122 -0
  341. torchrl/testing/env_creators.py +227 -0
  342. torchrl/testing/env_helper.py +35 -0
  343. torchrl/testing/gym_helpers.py +156 -0
  344. torchrl/testing/llm_mocks.py +119 -0
  345. torchrl/testing/mocking_classes.py +2720 -0
  346. torchrl/testing/modules.py +295 -0
  347. torchrl/testing/mp_helpers.py +15 -0
  348. torchrl/testing/ray_helpers.py +293 -0
  349. torchrl/testing/utils.py +190 -0
  350. torchrl/trainers/__init__.py +42 -0
  351. torchrl/trainers/algorithms/__init__.py +11 -0
  352. torchrl/trainers/algorithms/configs/__init__.py +705 -0
  353. torchrl/trainers/algorithms/configs/collectors.py +216 -0
  354. torchrl/trainers/algorithms/configs/common.py +41 -0
  355. torchrl/trainers/algorithms/configs/data.py +308 -0
  356. torchrl/trainers/algorithms/configs/envs.py +104 -0
  357. torchrl/trainers/algorithms/configs/envs_libs.py +361 -0
  358. torchrl/trainers/algorithms/configs/logging.py +80 -0
  359. torchrl/trainers/algorithms/configs/modules.py +570 -0
  360. torchrl/trainers/algorithms/configs/objectives.py +177 -0
  361. torchrl/trainers/algorithms/configs/trainers.py +340 -0
  362. torchrl/trainers/algorithms/configs/transforms.py +955 -0
  363. torchrl/trainers/algorithms/configs/utils.py +252 -0
  364. torchrl/trainers/algorithms/configs/weight_sync_schemes.py +191 -0
  365. torchrl/trainers/algorithms/configs/weight_update.py +159 -0
  366. torchrl/trainers/algorithms/ppo.py +373 -0
  367. torchrl/trainers/algorithms/sac.py +308 -0
  368. torchrl/trainers/helpers/__init__.py +40 -0
  369. torchrl/trainers/helpers/collectors.py +416 -0
  370. torchrl/trainers/helpers/envs.py +573 -0
  371. torchrl/trainers/helpers/logger.py +33 -0
  372. torchrl/trainers/helpers/losses.py +132 -0
  373. torchrl/trainers/helpers/models.py +658 -0
  374. torchrl/trainers/helpers/replay_buffer.py +59 -0
  375. torchrl/trainers/helpers/trainers.py +301 -0
  376. torchrl/trainers/trainers.py +2052 -0
  377. torchrl/weight_update/__init__.py +33 -0
  378. torchrl/weight_update/_distributed.py +749 -0
  379. torchrl/weight_update/_mp.py +624 -0
  380. torchrl/weight_update/_noupdate.py +102 -0
  381. torchrl/weight_update/_ray.py +1032 -0
  382. torchrl/weight_update/_rpc.py +284 -0
  383. torchrl/weight_update/_shared.py +891 -0
  384. torchrl/weight_update/llm/__init__.py +32 -0
  385. torchrl/weight_update/llm/vllm_double_buffer.py +370 -0
  386. torchrl/weight_update/llm/vllm_nccl.py +710 -0
  387. torchrl/weight_update/utils.py +73 -0
  388. torchrl/weight_update/weight_sync_schemes.py +1244 -0
  389. torchrl-0.11.0.dist-info/LICENSE +21 -0
  390. torchrl-0.11.0.dist-info/METADATA +1307 -0
  391. torchrl-0.11.0.dist-info/RECORD +394 -0
  392. torchrl-0.11.0.dist-info/WHEEL +5 -0
  393. torchrl-0.11.0.dist-info/entry_points.txt +2 -0
  394. torchrl-0.11.0.dist-info/top_level.txt +7 -0
@@ -0,0 +1,683 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from __future__ import annotations
6
+
7
+ import math
8
+ from dataclasses import dataclass
9
+ from numbers import Number
10
+
11
+ import torch
12
+ from tensordict import TensorDict, TensorDictBase, TensorDictParams
13
+ from tensordict.nn import composite_lp_aggregate, dispatch, TensorDictModule
14
+ from tensordict.utils import NestedKey
15
+ from torch import Tensor
16
+
17
+ from torchrl.data.tensor_specs import Composite
18
+ from torchrl.envs.utils import ExplorationType, set_exploration_type, step_mdp
19
+ from torchrl.objectives.common import LossModule
20
+ from torchrl.objectives.utils import (
21
+ _cache_values,
22
+ _GAMMA_LMBDA_DEPREC_ERROR,
23
+ _reduce,
24
+ _vmap_func,
25
+ default_value_kwargs,
26
+ distance_loss,
27
+ ValueEstimators,
28
+ )
29
+ from torchrl.objectives.value import (
30
+ TD0Estimator,
31
+ TD1Estimator,
32
+ TDLambdaEstimator,
33
+ ValueEstimatorBase,
34
+ )
35
+
36
+
37
+ class REDQLoss(LossModule):
38
+ """REDQ Loss module.
39
+
40
+ REDQ (RANDOMIZED ENSEMBLED DOUBLE Q-LEARNING: LEARNING FAST WITHOUT A MODEL
41
+ https://openreview.net/pdf?id=AY8zfZm0tDd) generalizes the idea of using an ensemble of Q-value functions to
42
+ train a SAC-like algorithm.
43
+
44
+ Args:
45
+ actor_network (TensorDictModule): the actor to be trained
46
+ qvalue_network (TensorDictModule): a single Q-value network or a list of Q-value networks.
47
+ If a single instance of `qvalue_network` is provided, it will be duplicated ``num_qvalue_nets``
48
+ times. If a list of modules is passed, their
49
+ parameters will be stacked unless they share the same identity (in which case
50
+ the original parameter will be expanded).
51
+
52
+ .. warning:: When a list of parameters if passed, it will **not** be compared against the policy parameters
53
+ and all the parameters will be considered as untied.
54
+
55
+ Keyword Args:
56
+ num_qvalue_nets (int, optional): Number of Q-value networks to be trained.
57
+ Default is ``10``.
58
+ sub_sample_len (int, optional): number of Q-value networks to be
59
+ subsampled to evaluate the next state value
60
+ Default is ``2``.
61
+ loss_function (str, optional): loss function to be used for the Q-value.
62
+ Can be one of ``"smooth_l1"``, ``"l2"``,
63
+ ``"l1"``, Default is ``"smooth_l1"``.
64
+ alpha_init (:obj:`float`, optional): initial entropy multiplier.
65
+ Default is ``1.0``.
66
+ min_alpha (:obj:`float`, optional): min value of alpha.
67
+ Default is ``0.1``.
68
+ max_alpha (:obj:`float`, optional): max value of alpha.
69
+ Default is ``10.0``.
70
+ action_spec (TensorSpec, optional): the action tensor spec. If not provided
71
+ and the target entropy is ``"auto"``, it will be retrieved from
72
+ the actor.
73
+ fixed_alpha (bool, optional): whether alpha should be trained to match
74
+ a target entropy. Default is ``False``.
75
+ target_entropy (Union[str, Number], optional): Target entropy for the
76
+ stochastic policy. Default is "auto", where target entropy is
77
+ computed as :obj:`-prod(n_actions)`.
78
+ delay_qvalue (bool, optional): Whether to separate the target Q value
79
+ networks from the Q value networks used
80
+ for data collection. Default is ``False``.
81
+ gSDE (bool, optional): Knowing if gSDE is used is necessary to create
82
+ random noise variables.
83
+ Default is ``False``.
84
+ priority_key (str, optional): [Deprecated, use .set_keys() instead] Key where to write the priority value
85
+ for prioritized replay buffers. Default is
86
+ ``"td_error"``.
87
+ separate_losses (bool, optional): if ``True``, shared parameters between
88
+ policy and critic will only be trained on the policy loss.
89
+ Defaults to ``False``, i.e., gradients are propagated to shared
90
+ parameters for both policy and critic losses.
91
+ reduction (str, optional): Specifies the reduction to apply to the output:
92
+ ``"none"`` | ``"mean"`` | ``"sum"``. ``"none"``: no reduction will be applied,
93
+ ``"mean"``: the sum of the output will be divided by the number of
94
+ elements in the output, ``"sum"``: the output will be summed. Default: ``"mean"``.
95
+ deactivate_vmap (bool, optional): whether to deactivate vmap calls and replace them with a plain for loop.
96
+ Defaults to ``False``.
97
+
98
+ Examples:
99
+ >>> import torch
100
+ >>> from torch import nn
101
+ >>> from torchrl.data import Bounded
102
+ >>> from torchrl.modules.distributions import NormalParamExtractor, TanhNormal
103
+ >>> from torchrl.modules.tensordict_module.actors import ProbabilisticActor, ValueOperator
104
+ >>> from torchrl.modules.tensordict_module.common import SafeModule
105
+ >>> from torchrl.objectives.redq import REDQLoss
106
+ >>> from tensordict import TensorDict
107
+ >>> n_act, n_obs = 4, 3
108
+ >>> spec = Bounded(-torch.ones(n_act), torch.ones(n_act), (n_act,))
109
+ >>> net = nn.Sequential(nn.Linear(n_obs, 2 * n_act), NormalParamExtractor())
110
+ >>> module = SafeModule(net, in_keys=["observation"], out_keys=["loc", "scale"])
111
+ >>> actor = ProbabilisticActor(
112
+ ... module=module,
113
+ ... in_keys=["loc", "scale"],
114
+ ... spec=spec,
115
+ ... distribution_class=TanhNormal)
116
+ >>> class ValueClass(nn.Module):
117
+ ... def __init__(self):
118
+ ... super().__init__()
119
+ ... self.linear = nn.Linear(n_obs + n_act, 1)
120
+ ... def forward(self, obs, act):
121
+ ... return self.linear(torch.cat([obs, act], -1))
122
+ >>> module = ValueClass()
123
+ >>> qvalue = ValueOperator(
124
+ ... module=module,
125
+ ... in_keys=['observation', 'action'])
126
+ >>> loss = REDQLoss(actor, qvalue)
127
+ >>> batch = [2, ]
128
+ >>> action = spec.rand(batch)
129
+ >>> data = TensorDict({
130
+ ... "observation": torch.randn(*batch, n_obs),
131
+ ... "action": action,
132
+ ... ("next", "done"): torch.zeros(*batch, 1, dtype=torch.bool),
133
+ ... ("next", "terminated"): torch.zeros(*batch, 1, dtype=torch.bool),
134
+ ... ("next", "reward"): torch.randn(*batch, 1),
135
+ ... ("next", "observation"): torch.randn(*batch, n_obs),
136
+ ... }, batch)
137
+ >>> loss(data)
138
+ TensorDict(
139
+ fields={
140
+ action_log_prob_actor: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
141
+ alpha: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
142
+ entropy: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
143
+ loss_actor: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
144
+ loss_alpha: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
145
+ loss_qvalue: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
146
+ next.state_value: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
147
+ state_action_value_actor: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False),
148
+ target_value: Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False)},
149
+ batch_size=torch.Size([]),
150
+ device=None,
151
+ is_shared=False)
152
+
153
+ This class is compatible with non-tensordict based modules too and can be
154
+ used without recurring to any tensordict-related primitive. In this case,
155
+ the expected keyword arguments are:
156
+ ``["action", "next_reward", "next_done", "next_terminated"]`` + in_keys of the actor and qvalue network
157
+ The return value is a tuple of tensors in the following order:
158
+ ``["loss_actor", "loss_qvalue", "loss_alpha", "alpha", "entropy", "state_action_value_actor", "action_log_prob_actor", "next.state_value", "target_value",]``.
159
+
160
+ Examples:
161
+ >>> import torch
162
+ >>> from torch import nn
163
+ >>> from torchrl.data import Bounded
164
+ >>> from torchrl.modules.distributions import NormalParamExtractor, TanhNormal
165
+ >>> from torchrl.modules.tensordict_module.actors import ProbabilisticActor, ValueOperator
166
+ >>> from torchrl.modules.tensordict_module.common import SafeModule
167
+ >>> from torchrl.objectives.redq import REDQLoss
168
+ >>> n_act, n_obs = 4, 3
169
+ >>> spec = Bounded(-torch.ones(n_act), torch.ones(n_act), (n_act,))
170
+ >>> net = nn.Sequential(nn.Linear(n_obs, 2 * n_act), NormalParamExtractor())
171
+ >>> module = SafeModule(net, in_keys=["observation"], out_keys=["loc", "scale"])
172
+ >>> actor = ProbabilisticActor(
173
+ ... module=module,
174
+ ... in_keys=["loc", "scale"],
175
+ ... spec=spec,
176
+ ... distribution_class=TanhNormal)
177
+ >>> class ValueClass(nn.Module):
178
+ ... def __init__(self):
179
+ ... super().__init__()
180
+ ... self.linear = nn.Linear(n_obs + n_act, 1)
181
+ ... def forward(self, obs, act):
182
+ ... return self.linear(torch.cat([obs, act], -1))
183
+ >>> module = ValueClass()
184
+ >>> qvalue = ValueOperator(
185
+ ... module=module,
186
+ ... in_keys=['observation', 'action'])
187
+ >>> loss = REDQLoss(actor, qvalue)
188
+ >>> batch = [2, ]
189
+ >>> action = spec.rand(batch)
190
+ >>> # filter output keys to "loss_actor", and "loss_qvalue"
191
+ >>> _ = loss.select_out_keys("loss_actor", "loss_qvalue")
192
+ >>> loss_actor, loss_qvalue = loss(
193
+ ... observation=torch.randn(*batch, n_obs),
194
+ ... action=action,
195
+ ... next_done=torch.zeros(*batch, 1, dtype=torch.bool),
196
+ ... next_terminated=torch.zeros(*batch, 1, dtype=torch.bool),
197
+ ... next_reward=torch.randn(*batch, 1),
198
+ ... next_observation=torch.randn(*batch, n_obs))
199
+ >>> loss_actor.backward()
200
+
201
+ """
202
+
203
+ @dataclass
204
+ class _AcceptedKeys:
205
+ """Maintains default values for all configurable tensordict keys.
206
+
207
+ This class defines which tensordict keys can be set using '.set_keys(key_name=key_value)' and their
208
+ default values
209
+
210
+ Attributes:
211
+ value (NestedKey): The input tensordict key where the state value is expected.
212
+ Will be used for the underlying value estimator. Defaults to ``"state_value"``.
213
+ action (NestedKey): The input tensordict key where the action is expected. Defaults to ``"action"``.
214
+ sample_log_prob (NestedKey): The input tensordict key where the
215
+ sample log probability is expected.
216
+ Defaults to ``"sample_log_prob"`` when :func:`~tensordict.nn.composite_lp_aggregate` returns `True`,
217
+ `"action_log_prob"` otherwise.
218
+ priority (NestedKey): The input tensordict key where the target
219
+ priority is written to. Defaults to ``"td_error"``.
220
+ state_action_value (NestedKey): The input tensordict key where the
221
+ state action value is expected. Defaults to ``"state_action_value"``.
222
+ reward (NestedKey): The input tensordict key where the reward is expected.
223
+ Will be used for the underlying value estimator. Defaults to ``"reward"``.
224
+ done (NestedKey): The key in the input TensorDict that indicates
225
+ whether a trajectory is done. Will be used for the underlying value estimator.
226
+ Defaults to ``"done"``.
227
+ terminated (NestedKey): The key in the input TensorDict that indicates
228
+ whether a trajectory is terminated. Will be used for the underlying value estimator.
229
+ Defaults to ``"terminated"``.
230
+ """
231
+
232
+ action: NestedKey = "action"
233
+ value: NestedKey = "state_value"
234
+ sample_log_prob: NestedKey | None = None
235
+ priority: NestedKey = "td_error"
236
+ state_action_value: NestedKey = "state_action_value"
237
+ reward: NestedKey = "reward"
238
+ done: NestedKey = "done"
239
+ terminated: NestedKey = "terminated"
240
+
241
+ def __post_init__(self):
242
+ if self.sample_log_prob is None:
243
+ if composite_lp_aggregate(nowarn=True):
244
+ self.sample_log_prob = "sample_log_prob"
245
+ else:
246
+ self.sample_log_prob = "action_log_prob"
247
+
248
+ tensor_keys: _AcceptedKeys
249
+ default_keys = _AcceptedKeys
250
+ delay_actor: bool = False
251
+ default_value_estimator = ValueEstimators.TD0
252
+ out_keys = [
253
+ "loss_actor",
254
+ "loss_qvalue",
255
+ "loss_alpha",
256
+ "alpha",
257
+ "entropy",
258
+ "state_action_value_actor",
259
+ "action_log_prob_actor",
260
+ "next.state_value",
261
+ "target_value",
262
+ ]
263
+
264
+ actor_network: TensorDictModule
265
+ qvalue_network: TensorDictModule
266
+ actor_network_params: TensorDictParams
267
+ qvalue_network_params: TensorDictParams
268
+ target_actor_network_params: TensorDictParams
269
+ target_qvalue_network_params: TensorDictParams
270
+
271
+ def __init__(
272
+ self,
273
+ actor_network: TensorDictModule,
274
+ qvalue_network: TensorDictModule | list[TensorDictModule],
275
+ *,
276
+ num_qvalue_nets: int = 10,
277
+ sub_sample_len: int = 2,
278
+ loss_function: str = "smooth_l1",
279
+ alpha_init: float = 1.0,
280
+ min_alpha: float = 0.1,
281
+ max_alpha: float = 10.0,
282
+ action_spec=None,
283
+ fixed_alpha: bool = False,
284
+ target_entropy: str | Number = "auto",
285
+ delay_qvalue: bool = True,
286
+ gSDE: bool = False,
287
+ gamma: float | None = None,
288
+ priority_key: str | None = None,
289
+ separate_losses: bool = False,
290
+ reduction: str | None = None,
291
+ deactivate_vmap: bool = False,
292
+ ):
293
+ if reduction is None:
294
+ reduction = "mean"
295
+ super().__init__()
296
+ self._in_keys = None
297
+ self._set_deprecated_ctor_keys(priority_key=priority_key)
298
+
299
+ self.convert_to_functional(
300
+ actor_network,
301
+ "actor_network",
302
+ create_target_params=self.delay_actor,
303
+ )
304
+
305
+ # let's make sure that actor_network has `return_log_prob` set to True
306
+ self.actor_network.return_log_prob = True
307
+ self.deactivate_vmap = deactivate_vmap
308
+ if separate_losses:
309
+ # we want to make sure there are no duplicates in the params: the
310
+ # params of critic must be refs to actor if they're shared
311
+ policy_params = list(actor_network.parameters())
312
+ else:
313
+ policy_params = None
314
+ self.delay_qvalue = delay_qvalue
315
+ self.convert_to_functional(
316
+ qvalue_network,
317
+ "qvalue_network",
318
+ num_qvalue_nets,
319
+ create_target_params=self.delay_qvalue,
320
+ compare_against=policy_params,
321
+ )
322
+ self.num_qvalue_nets = num_qvalue_nets
323
+ self.sub_sample_len = max(1, min(sub_sample_len, num_qvalue_nets - 1))
324
+ self.loss_function = loss_function
325
+
326
+ try:
327
+ device = next(self.parameters()).device
328
+ except AttributeError:
329
+ device = getattr(torch, "get_default_device", lambda: torch.device("cpu"))()
330
+
331
+ self.register_buffer("alpha_init", torch.tensor(alpha_init, device=device))
332
+ self.register_buffer(
333
+ "min_log_alpha", torch.tensor(min_alpha, device=device).log()
334
+ )
335
+ self.register_buffer(
336
+ "max_log_alpha", torch.tensor(max_alpha, device=device).log()
337
+ )
338
+ self.fixed_alpha = fixed_alpha
339
+ if fixed_alpha:
340
+ self.register_buffer(
341
+ "log_alpha", torch.tensor(math.log(alpha_init), device=device)
342
+ )
343
+ else:
344
+ self.register_parameter(
345
+ "log_alpha",
346
+ torch.nn.Parameter(
347
+ torch.tensor(
348
+ math.log(alpha_init), device=device, requires_grad=True
349
+ )
350
+ ),
351
+ )
352
+
353
+ self._target_entropy = target_entropy
354
+ self._action_spec = action_spec
355
+ self.target_entropy_buffer = None
356
+ self.reduction = reduction
357
+ self.gSDE = gSDE
358
+ if gamma is not None:
359
+ raise TypeError(_GAMMA_LMBDA_DEPREC_ERROR)
360
+ self._make_vmap()
361
+
362
+ def _make_vmap(self):
363
+ self._vmap_qvalue_network00 = _vmap_func(
364
+ self.qvalue_network,
365
+ randomness=self.vmap_randomness,
366
+ pseudo_vmap=self.deactivate_vmap,
367
+ )
368
+ self._vmap_getdist = _vmap_func(
369
+ self.actor_network,
370
+ func="get_dist_params",
371
+ randomness=self.vmap_randomness,
372
+ pseudo_vmap=self.deactivate_vmap,
373
+ )
374
+
375
+ @property
376
+ def target_entropy(self):
377
+ target_entropy = self.target_entropy_buffer
378
+ if target_entropy is None:
379
+ delattr(self, "target_entropy_buffer")
380
+ target_entropy = self._target_entropy
381
+ action_spec = self._action_spec
382
+ actor_network = self.actor_network
383
+ device = next(self.parameters()).device
384
+ if target_entropy == "auto":
385
+ action_spec = (
386
+ action_spec
387
+ if action_spec is not None
388
+ else getattr(actor_network, "spec", None)
389
+ )
390
+ if action_spec is None:
391
+ raise RuntimeError(
392
+ "Cannot infer the dimensionality of the action. Consider providing "
393
+ "the target entropy explicitly or provide the spec of the "
394
+ "action tensor in the actor network."
395
+ )
396
+ if not isinstance(action_spec, Composite):
397
+ action_spec = Composite({self.tensor_keys.action: action_spec})
398
+ if (
399
+ isinstance(self.tensor_keys.action, tuple)
400
+ and len(self.tensor_keys.action) > 1
401
+ ):
402
+ action_container_shape = action_spec[
403
+ self.tensor_keys.action[:-1]
404
+ ].shape
405
+ else:
406
+ action_container_shape = action_spec.shape
407
+ target_entropy = -float(
408
+ action_spec[self.tensor_keys.action]
409
+ .shape[len(action_container_shape) :]
410
+ .numel()
411
+ )
412
+ self.register_buffer(
413
+ "target_entropy_buffer", torch.tensor(target_entropy, device=device)
414
+ )
415
+ return self.target_entropy_buffer
416
+ return target_entropy
417
+
418
+ def _forward_value_estimator_keys(self, **kwargs) -> None:
419
+ if self._value_estimator is not None:
420
+ self._value_estimator.set_keys(
421
+ value=self._tensor_keys.value,
422
+ reward=self.tensor_keys.reward,
423
+ done=self.tensor_keys.done,
424
+ terminated=self.tensor_keys.terminated,
425
+ )
426
+ self._set_in_keys()
427
+
428
+ @property
429
+ def alpha(self):
430
+ with torch.no_grad():
431
+ return self.log_alpha.clamp(self.min_log_alpha, self.max_log_alpha).exp()
432
+
433
+ def _set_in_keys(self):
434
+ keys = [
435
+ self.tensor_keys.action,
436
+ self.tensor_keys.sample_log_prob,
437
+ ("next", self.tensor_keys.reward),
438
+ ("next", self.tensor_keys.done),
439
+ ("next", self.tensor_keys.terminated),
440
+ *self.actor_network.in_keys,
441
+ *[("next", key) for key in self.actor_network.in_keys],
442
+ *self.qvalue_network.in_keys,
443
+ ]
444
+ self._in_keys = list(set(keys))
445
+
446
+ @property
447
+ def in_keys(self):
448
+ if self._in_keys is None:
449
+ self._set_in_keys()
450
+ return self._in_keys
451
+
452
+ @in_keys.setter
453
+ def in_keys(self, values):
454
+ self._in_keys = values
455
+
456
+ @property
457
+ @_cache_values
458
+ def _cached_detach_qvalue_network_params(self):
459
+ return self.qvalue_network_params.detach()
460
+
461
+ def _qvalue_params_cat(self, selected_q_params):
462
+ qvalue_params = torch.cat(
463
+ [
464
+ self._cached_detach_qvalue_network_params,
465
+ selected_q_params,
466
+ self.qvalue_network_params,
467
+ ],
468
+ 0,
469
+ )
470
+ return qvalue_params
471
+
472
+ @dispatch
473
+ def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
474
+ obs_keys = self.actor_network.in_keys
475
+ tensordict_select = tensordict.select(
476
+ "next", *obs_keys, self.tensor_keys.action, strict=False
477
+ )
478
+ # We need to copy bc select does not copy sub-tds
479
+ tensordict_select = tensordict_select.copy()
480
+
481
+ selected_models_idx = torch.randperm(self.num_qvalue_nets)[
482
+ : self.sub_sample_len
483
+ ].sort()[0]
484
+ selected_q_params = self.target_qvalue_network_params[selected_models_idx]
485
+
486
+ actor_params = torch.stack(
487
+ [self.actor_network_params, self.target_actor_network_params], 0
488
+ )
489
+
490
+ tensordict_actor_grad = tensordict_select.select(
491
+ *obs_keys, strict=False
492
+ ) # to avoid overwriting keys
493
+ next_td_actor = step_mdp(tensordict_select).select(
494
+ *self.actor_network.in_keys, strict=False
495
+ ) # next_observation ->
496
+ tensordict_actor = torch.stack([tensordict_actor_grad, next_td_actor], 0)
497
+
498
+ with set_exploration_type(ExplorationType.RANDOM):
499
+ if self.gSDE:
500
+ tensordict_actor.set(
501
+ "_eps_gSDE",
502
+ torch.zeros(tensordict_actor.shape, device=tensordict_actor.device),
503
+ )
504
+ # vmap doesn't support sampling, so we take it out from the vmap
505
+ td_params = self._vmap_getdist(
506
+ tensordict_actor,
507
+ actor_params,
508
+ )
509
+ sample_key = self.tensor_keys.action
510
+ sample_key_lp = self.tensor_keys.sample_log_prob
511
+ tensordict_actor_dist = self.actor_network.build_dist_from_params(td_params)
512
+ tensordict_actor.set(sample_key, tensordict_actor_dist.rsample())
513
+ tensordict_actor.set(
514
+ sample_key_lp,
515
+ tensordict_actor_dist.log_prob(tensordict_actor.get(sample_key)),
516
+ )
517
+
518
+ # repeat tensordict_actor to match the qvalue size
519
+ _actor_loss_td = (
520
+ tensordict_actor[0]
521
+ .select(*self.qvalue_network.in_keys)
522
+ .expand(self.num_qvalue_nets, *tensordict_actor[0].batch_size)
523
+ ) # for actor loss
524
+ _qval_td = tensordict_select.select(*self.qvalue_network.in_keys).expand(
525
+ self.num_qvalue_nets,
526
+ *tensordict_select.select(*self.qvalue_network.in_keys).batch_size,
527
+ ) # for qvalue loss
528
+ _next_val_td = (
529
+ tensordict_actor[1]
530
+ .select(*self.qvalue_network.in_keys)
531
+ .expand(self.sub_sample_len, *tensordict_actor[1].batch_size)
532
+ ) # for next value estimation
533
+ tensordict_qval = torch.cat(
534
+ [
535
+ _actor_loss_td,
536
+ _next_val_td,
537
+ _qval_td,
538
+ ],
539
+ 0,
540
+ )
541
+
542
+ # cat params
543
+ tensordict_qval = self._vmap_qvalue_network00(
544
+ tensordict_qval,
545
+ self._qvalue_params_cat(selected_q_params),
546
+ )
547
+
548
+ state_action_value = tensordict_qval.get(
549
+ self.tensor_keys.state_action_value
550
+ ).squeeze(-1)
551
+ (
552
+ state_action_value_actor,
553
+ next_state_action_value_qvalue,
554
+ state_action_value_qvalue,
555
+ ) = state_action_value.split(
556
+ [self.num_qvalue_nets, self.sub_sample_len, self.num_qvalue_nets],
557
+ dim=0,
558
+ )
559
+ sample_log_prob = tensordict_actor.get(
560
+ self.tensor_keys.sample_log_prob
561
+ ).squeeze(-1)
562
+ (
563
+ action_log_prob_actor,
564
+ next_action_log_prob_qvalue,
565
+ ) = sample_log_prob.unbind(0)
566
+
567
+ loss_actor = -(state_action_value_actor - self.alpha * action_log_prob_actor)
568
+
569
+ next_state_value = (
570
+ next_state_action_value_qvalue - self.alpha * next_action_log_prob_qvalue
571
+ )
572
+ next_state_value = next_state_value.min(0)[0]
573
+
574
+ tensordict_select.set(
575
+ ("next", self.tensor_keys.value), next_state_value.unsqueeze(-1)
576
+ )
577
+ target_value = self.value_estimator.value_estimate(tensordict_select).squeeze(
578
+ -1
579
+ )
580
+
581
+ pred_val = state_action_value_qvalue
582
+ td_error = (pred_val - target_value).pow(2)
583
+ loss_qval = distance_loss(
584
+ pred_val,
585
+ target_value.expand_as(pred_val),
586
+ loss_function=self.loss_function,
587
+ )
588
+
589
+ tensordict.set(self.tensor_keys.priority, td_error.detach().max(0)[0])
590
+
591
+ loss_alpha = self._loss_alpha(sample_log_prob)
592
+ if not loss_qval.shape == loss_actor.shape:
593
+ raise RuntimeError(
594
+ f"QVal and actor loss have different shape: {loss_qval.shape} and {loss_actor.shape}"
595
+ )
596
+ td_out = TensorDict(
597
+ {
598
+ "loss_actor": loss_actor,
599
+ "loss_qvalue": loss_qval,
600
+ "loss_alpha": loss_alpha,
601
+ "alpha": self.alpha.detach(),
602
+ "entropy": -sample_log_prob.detach().mean(),
603
+ "state_action_value_actor": state_action_value_actor.detach(),
604
+ "action_log_prob_actor": action_log_prob_actor.detach(),
605
+ "next.state_value": next_state_value.detach(),
606
+ "target_value": target_value.detach(),
607
+ },
608
+ [],
609
+ )
610
+ td_out = td_out.named_apply(
611
+ lambda name, value: _reduce(value, reduction=self.reduction)
612
+ if name.startswith("loss_")
613
+ else value,
614
+ )
615
+ self._clear_weakrefs(
616
+ tensordict,
617
+ td_out,
618
+ "actor_network_params",
619
+ "qvalue_network_params",
620
+ "target_actor_network_params",
621
+ "target_qvalue_network_params",
622
+ )
623
+ return td_out
624
+
625
+ def _loss_alpha(self, log_pi: Tensor) -> Tensor:
626
+ if torch.is_grad_enabled() and not log_pi.requires_grad:
627
+ raise RuntimeError(
628
+ "expected log_pi to require gradient for the alpha loss)"
629
+ )
630
+ if self.target_entropy is not None:
631
+ # we can compute this loss even if log_alpha is not a parameter
632
+ alpha_loss = -self._safe_log_alpha.exp() * (
633
+ log_pi.detach() + self.target_entropy
634
+ )
635
+ else:
636
+ # placeholder
637
+ alpha_loss = torch.zeros_like(log_pi)
638
+ return alpha_loss
639
+
640
+ @property
641
+ def _safe_log_alpha(self):
642
+ log_alpha = self.log_alpha
643
+ with torch.no_grad():
644
+ log_alpha_clamp = log_alpha.clamp(self.min_log_alpha, self.max_log_alpha)
645
+ log_alpha_det = log_alpha.detach()
646
+ return log_alpha - log_alpha_det + log_alpha_clamp
647
+
648
+ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams):
649
+ if value_type is None:
650
+ value_type = self.default_value_estimator
651
+
652
+ # Handle ValueEstimatorBase instance or class
653
+ if isinstance(value_type, ValueEstimatorBase) or (
654
+ isinstance(value_type, type) and issubclass(value_type, ValueEstimatorBase)
655
+ ):
656
+ return LossModule.make_value_estimator(self, value_type, **hyperparams)
657
+
658
+ self.value_type = value_type
659
+ hp = dict(default_value_kwargs(value_type))
660
+ if hasattr(self, "gamma"):
661
+ hp["gamma"] = self.gamma
662
+ hp.update(hyperparams)
663
+ # we do not need a value network bc the next state value is already passed
664
+ if value_type == ValueEstimators.TD1:
665
+ self._value_estimator = TD1Estimator(value_network=None, **hp)
666
+ elif value_type == ValueEstimators.TD0:
667
+ self._value_estimator = TD0Estimator(value_network=None, **hp)
668
+ elif value_type == ValueEstimators.GAE:
669
+ raise NotImplementedError(
670
+ f"Value type {value_type} it not implemented for loss {type(self)}."
671
+ )
672
+ elif value_type == ValueEstimators.TDLambda:
673
+ self._value_estimator = TDLambdaEstimator(value_network=None, **hp)
674
+ else:
675
+ raise NotImplementedError(f"Unknown value type {value_type}")
676
+
677
+ tensor_keys = {
678
+ "value": self.tensor_keys.value,
679
+ "reward": self.tensor_keys.reward,
680
+ "done": self.tensor_keys.done,
681
+ "terminated": self.tensor_keys.terminated,
682
+ }
683
+ self._value_estimator.set_keys(**tensor_keys)