torchrl 0.11.0__cp314-cp314t-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. benchmarks/benchmark_batched_envs.py +104 -0
  2. benchmarks/conftest.py +91 -0
  3. benchmarks/ecosystem/gym_env_throughput.py +321 -0
  4. benchmarks/ecosystem/vmas_rllib_vs_torchrl_sampling_performance.py +231 -0
  5. benchmarks/requirements.txt +7 -0
  6. benchmarks/storage/benchmark_sample_latency_over_rpc.py +193 -0
  7. benchmarks/test_collectors_benchmark.py +240 -0
  8. benchmarks/test_compressed_storage_benchmark.py +145 -0
  9. benchmarks/test_envs_benchmark.py +133 -0
  10. benchmarks/test_llm.py +101 -0
  11. benchmarks/test_non_tensor_env_benchmark.py +70 -0
  12. benchmarks/test_objectives_benchmarks.py +1199 -0
  13. benchmarks/test_replaybuffer_benchmark.py +254 -0
  14. sota-check/README.md +35 -0
  15. sota-implementations/README.md +142 -0
  16. sota-implementations/a2c/README.md +39 -0
  17. sota-implementations/a2c/a2c_atari.py +291 -0
  18. sota-implementations/a2c/a2c_mujoco.py +273 -0
  19. sota-implementations/a2c/utils_atari.py +240 -0
  20. sota-implementations/a2c/utils_mujoco.py +160 -0
  21. sota-implementations/bandits/README.md +7 -0
  22. sota-implementations/bandits/dqn.py +126 -0
  23. sota-implementations/cql/cql_offline.py +198 -0
  24. sota-implementations/cql/cql_online.py +249 -0
  25. sota-implementations/cql/discrete_cql_offline.py +180 -0
  26. sota-implementations/cql/discrete_cql_online.py +227 -0
  27. sota-implementations/cql/utils.py +471 -0
  28. sota-implementations/crossq/crossq.py +271 -0
  29. sota-implementations/crossq/utils.py +320 -0
  30. sota-implementations/ddpg/ddpg.py +231 -0
  31. sota-implementations/ddpg/utils.py +325 -0
  32. sota-implementations/decision_transformer/dt.py +163 -0
  33. sota-implementations/decision_transformer/lamb.py +167 -0
  34. sota-implementations/decision_transformer/online_dt.py +178 -0
  35. sota-implementations/decision_transformer/utils.py +562 -0
  36. sota-implementations/discrete_sac/discrete_sac.py +243 -0
  37. sota-implementations/discrete_sac/utils.py +324 -0
  38. sota-implementations/dqn/README.md +30 -0
  39. sota-implementations/dqn/dqn_atari.py +272 -0
  40. sota-implementations/dqn/dqn_cartpole.py +236 -0
  41. sota-implementations/dqn/utils_atari.py +132 -0
  42. sota-implementations/dqn/utils_cartpole.py +90 -0
  43. sota-implementations/dreamer/README.md +129 -0
  44. sota-implementations/dreamer/dreamer.py +586 -0
  45. sota-implementations/dreamer/dreamer_utils.py +1107 -0
  46. sota-implementations/expert-iteration/README.md +352 -0
  47. sota-implementations/expert-iteration/ei_utils.py +770 -0
  48. sota-implementations/expert-iteration/expert-iteration-async.py +512 -0
  49. sota-implementations/expert-iteration/expert-iteration-sync.py +508 -0
  50. sota-implementations/expert-iteration/requirements_gsm8k.txt +13 -0
  51. sota-implementations/expert-iteration/requirements_ifeval.txt +16 -0
  52. sota-implementations/gail/gail.py +327 -0
  53. sota-implementations/gail/gail_utils.py +68 -0
  54. sota-implementations/gail/ppo_utils.py +157 -0
  55. sota-implementations/grpo/README.md +273 -0
  56. sota-implementations/grpo/grpo-async.py +437 -0
  57. sota-implementations/grpo/grpo-sync.py +435 -0
  58. sota-implementations/grpo/grpo_utils.py +843 -0
  59. sota-implementations/grpo/requirements_gsm8k.txt +11 -0
  60. sota-implementations/grpo/requirements_ifeval.txt +16 -0
  61. sota-implementations/impala/README.md +33 -0
  62. sota-implementations/impala/impala_multi_node_ray.py +292 -0
  63. sota-implementations/impala/impala_multi_node_submitit.py +284 -0
  64. sota-implementations/impala/impala_single_node.py +261 -0
  65. sota-implementations/impala/utils.py +184 -0
  66. sota-implementations/iql/discrete_iql.py +230 -0
  67. sota-implementations/iql/iql_offline.py +164 -0
  68. sota-implementations/iql/iql_online.py +225 -0
  69. sota-implementations/iql/utils.py +437 -0
  70. sota-implementations/multiagent/README.md +74 -0
  71. sota-implementations/multiagent/iql.py +237 -0
  72. sota-implementations/multiagent/maddpg_iddpg.py +266 -0
  73. sota-implementations/multiagent/mappo_ippo.py +267 -0
  74. sota-implementations/multiagent/qmix_vdn.py +271 -0
  75. sota-implementations/multiagent/sac.py +337 -0
  76. sota-implementations/multiagent/utils/__init__.py +4 -0
  77. sota-implementations/multiagent/utils/logging.py +151 -0
  78. sota-implementations/multiagent/utils/utils.py +43 -0
  79. sota-implementations/ppo/README.md +29 -0
  80. sota-implementations/ppo/ppo_atari.py +305 -0
  81. sota-implementations/ppo/ppo_mujoco.py +293 -0
  82. sota-implementations/ppo/utils_atari.py +238 -0
  83. sota-implementations/ppo/utils_mujoco.py +152 -0
  84. sota-implementations/ppo_trainer/train.py +21 -0
  85. sota-implementations/redq/README.md +7 -0
  86. sota-implementations/redq/redq.py +199 -0
  87. sota-implementations/redq/utils.py +1060 -0
  88. sota-implementations/sac/sac-async.py +266 -0
  89. sota-implementations/sac/sac.py +239 -0
  90. sota-implementations/sac/utils.py +381 -0
  91. sota-implementations/sac_trainer/train.py +16 -0
  92. sota-implementations/td3/td3.py +254 -0
  93. sota-implementations/td3/utils.py +319 -0
  94. sota-implementations/td3_bc/td3_bc.py +177 -0
  95. sota-implementations/td3_bc/utils.py +251 -0
  96. torchrl/.dylibs/libc++.1.0.dylib +0 -0
  97. torchrl/__init__.py +144 -0
  98. torchrl/_extension.py +74 -0
  99. torchrl/_torchrl.cpython-314t-darwin.so +0 -0
  100. torchrl/_utils.py +1431 -0
  101. torchrl/collectors/__init__.py +48 -0
  102. torchrl/collectors/_base.py +1058 -0
  103. torchrl/collectors/_constants.py +88 -0
  104. torchrl/collectors/_multi_async.py +324 -0
  105. torchrl/collectors/_multi_base.py +1805 -0
  106. torchrl/collectors/_multi_sync.py +464 -0
  107. torchrl/collectors/_runner.py +581 -0
  108. torchrl/collectors/_single.py +2009 -0
  109. torchrl/collectors/_single_async.py +259 -0
  110. torchrl/collectors/collectors.py +62 -0
  111. torchrl/collectors/distributed/__init__.py +32 -0
  112. torchrl/collectors/distributed/default_configs.py +133 -0
  113. torchrl/collectors/distributed/generic.py +1306 -0
  114. torchrl/collectors/distributed/ray.py +1092 -0
  115. torchrl/collectors/distributed/rpc.py +1006 -0
  116. torchrl/collectors/distributed/sync.py +731 -0
  117. torchrl/collectors/distributed/utils.py +160 -0
  118. torchrl/collectors/llm/__init__.py +10 -0
  119. torchrl/collectors/llm/base.py +494 -0
  120. torchrl/collectors/llm/ray_collector.py +275 -0
  121. torchrl/collectors/llm/utils.py +36 -0
  122. torchrl/collectors/llm/weight_update/__init__.py +10 -0
  123. torchrl/collectors/llm/weight_update/vllm.py +348 -0
  124. torchrl/collectors/llm/weight_update/vllm_v2.py +311 -0
  125. torchrl/collectors/utils.py +433 -0
  126. torchrl/collectors/weight_update.py +591 -0
  127. torchrl/csrc/numpy_utils.h +38 -0
  128. torchrl/csrc/pybind.cpp +27 -0
  129. torchrl/csrc/segment_tree.h +458 -0
  130. torchrl/csrc/torch_utils.h +34 -0
  131. torchrl/csrc/utils.cpp +48 -0
  132. torchrl/csrc/utils.h +31 -0
  133. torchrl/data/__init__.py +187 -0
  134. torchrl/data/datasets/__init__.py +58 -0
  135. torchrl/data/datasets/atari_dqn.py +878 -0
  136. torchrl/data/datasets/common.py +281 -0
  137. torchrl/data/datasets/d4rl.py +489 -0
  138. torchrl/data/datasets/d4rl_infos.py +187 -0
  139. torchrl/data/datasets/gen_dgrl.py +375 -0
  140. torchrl/data/datasets/minari_data.py +643 -0
  141. torchrl/data/datasets/openml.py +177 -0
  142. torchrl/data/datasets/openx.py +798 -0
  143. torchrl/data/datasets/roboset.py +363 -0
  144. torchrl/data/datasets/utils.py +11 -0
  145. torchrl/data/datasets/vd4rl.py +432 -0
  146. torchrl/data/llm/__init__.py +34 -0
  147. torchrl/data/llm/dataset.py +491 -0
  148. torchrl/data/llm/history.py +1378 -0
  149. torchrl/data/llm/prompt.py +198 -0
  150. torchrl/data/llm/reward.py +225 -0
  151. torchrl/data/llm/topk.py +186 -0
  152. torchrl/data/llm/utils.py +543 -0
  153. torchrl/data/map/__init__.py +21 -0
  154. torchrl/data/map/hash.py +185 -0
  155. torchrl/data/map/query.py +204 -0
  156. torchrl/data/map/tdstorage.py +363 -0
  157. torchrl/data/map/tree.py +1434 -0
  158. torchrl/data/map/utils.py +103 -0
  159. torchrl/data/postprocs/__init__.py +8 -0
  160. torchrl/data/postprocs/postprocs.py +391 -0
  161. torchrl/data/replay_buffers/__init__.py +99 -0
  162. torchrl/data/replay_buffers/checkpointers.py +622 -0
  163. torchrl/data/replay_buffers/ray_buffer.py +292 -0
  164. torchrl/data/replay_buffers/replay_buffers.py +2376 -0
  165. torchrl/data/replay_buffers/samplers.py +2578 -0
  166. torchrl/data/replay_buffers/scheduler.py +265 -0
  167. torchrl/data/replay_buffers/storages.py +2412 -0
  168. torchrl/data/replay_buffers/utils.py +1042 -0
  169. torchrl/data/replay_buffers/writers.py +781 -0
  170. torchrl/data/tensor_specs.py +7101 -0
  171. torchrl/data/utils.py +334 -0
  172. torchrl/envs/__init__.py +265 -0
  173. torchrl/envs/async_envs.py +1105 -0
  174. torchrl/envs/batched_envs.py +3093 -0
  175. torchrl/envs/common.py +4241 -0
  176. torchrl/envs/custom/__init__.py +11 -0
  177. torchrl/envs/custom/chess.py +617 -0
  178. torchrl/envs/custom/llm.py +214 -0
  179. torchrl/envs/custom/pendulum.py +401 -0
  180. torchrl/envs/custom/san_moves.txt +29274 -0
  181. torchrl/envs/custom/tictactoeenv.py +288 -0
  182. torchrl/envs/env_creator.py +263 -0
  183. torchrl/envs/gym_like.py +752 -0
  184. torchrl/envs/libs/__init__.py +68 -0
  185. torchrl/envs/libs/_gym_utils.py +326 -0
  186. torchrl/envs/libs/brax.py +846 -0
  187. torchrl/envs/libs/dm_control.py +544 -0
  188. torchrl/envs/libs/envpool.py +447 -0
  189. torchrl/envs/libs/gym.py +2239 -0
  190. torchrl/envs/libs/habitat.py +138 -0
  191. torchrl/envs/libs/isaac_lab.py +87 -0
  192. torchrl/envs/libs/isaacgym.py +203 -0
  193. torchrl/envs/libs/jax_utils.py +166 -0
  194. torchrl/envs/libs/jumanji.py +963 -0
  195. torchrl/envs/libs/meltingpot.py +599 -0
  196. torchrl/envs/libs/openml.py +153 -0
  197. torchrl/envs/libs/openspiel.py +652 -0
  198. torchrl/envs/libs/pettingzoo.py +1042 -0
  199. torchrl/envs/libs/procgen.py +351 -0
  200. torchrl/envs/libs/robohive.py +429 -0
  201. torchrl/envs/libs/smacv2.py +645 -0
  202. torchrl/envs/libs/unity_mlagents.py +891 -0
  203. torchrl/envs/libs/utils.py +147 -0
  204. torchrl/envs/libs/vmas.py +813 -0
  205. torchrl/envs/llm/__init__.py +63 -0
  206. torchrl/envs/llm/chat.py +730 -0
  207. torchrl/envs/llm/datasets/README.md +4 -0
  208. torchrl/envs/llm/datasets/__init__.py +17 -0
  209. torchrl/envs/llm/datasets/gsm8k.py +353 -0
  210. torchrl/envs/llm/datasets/ifeval.py +274 -0
  211. torchrl/envs/llm/envs.py +789 -0
  212. torchrl/envs/llm/libs/README.md +3 -0
  213. torchrl/envs/llm/libs/__init__.py +8 -0
  214. torchrl/envs/llm/libs/mlgym.py +869 -0
  215. torchrl/envs/llm/reward/__init__.py +10 -0
  216. torchrl/envs/llm/reward/gsm8k.py +324 -0
  217. torchrl/envs/llm/reward/ifeval/README.md +13 -0
  218. torchrl/envs/llm/reward/ifeval/__init__.py +10 -0
  219. torchrl/envs/llm/reward/ifeval/_instructions.py +1667 -0
  220. torchrl/envs/llm/reward/ifeval/_instructions_main.py +131 -0
  221. torchrl/envs/llm/reward/ifeval/_instructions_registry.py +100 -0
  222. torchrl/envs/llm/reward/ifeval/_instructions_util.py +1677 -0
  223. torchrl/envs/llm/reward/ifeval/_scorer.py +454 -0
  224. torchrl/envs/llm/transforms/__init__.py +55 -0
  225. torchrl/envs/llm/transforms/browser.py +292 -0
  226. torchrl/envs/llm/transforms/dataloading.py +859 -0
  227. torchrl/envs/llm/transforms/format.py +73 -0
  228. torchrl/envs/llm/transforms/kl.py +1544 -0
  229. torchrl/envs/llm/transforms/policy_version.py +189 -0
  230. torchrl/envs/llm/transforms/reason.py +323 -0
  231. torchrl/envs/llm/transforms/tokenizer.py +321 -0
  232. torchrl/envs/llm/transforms/tools.py +1955 -0
  233. torchrl/envs/model_based/__init__.py +9 -0
  234. torchrl/envs/model_based/common.py +180 -0
  235. torchrl/envs/model_based/dreamer.py +112 -0
  236. torchrl/envs/transforms/__init__.py +147 -0
  237. torchrl/envs/transforms/functional.py +48 -0
  238. torchrl/envs/transforms/gym_transforms.py +203 -0
  239. torchrl/envs/transforms/module.py +341 -0
  240. torchrl/envs/transforms/r3m.py +372 -0
  241. torchrl/envs/transforms/ray_service.py +663 -0
  242. torchrl/envs/transforms/rb_transforms.py +214 -0
  243. torchrl/envs/transforms/transforms.py +11835 -0
  244. torchrl/envs/transforms/utils.py +94 -0
  245. torchrl/envs/transforms/vc1.py +307 -0
  246. torchrl/envs/transforms/vecnorm.py +845 -0
  247. torchrl/envs/transforms/vip.py +407 -0
  248. torchrl/envs/utils.py +1718 -0
  249. torchrl/envs/vec_envs.py +11 -0
  250. torchrl/modules/__init__.py +206 -0
  251. torchrl/modules/distributions/__init__.py +73 -0
  252. torchrl/modules/distributions/continuous.py +830 -0
  253. torchrl/modules/distributions/discrete.py +908 -0
  254. torchrl/modules/distributions/truncated_normal.py +187 -0
  255. torchrl/modules/distributions/utils.py +233 -0
  256. torchrl/modules/llm/__init__.py +62 -0
  257. torchrl/modules/llm/backends/__init__.py +65 -0
  258. torchrl/modules/llm/backends/vllm/__init__.py +94 -0
  259. torchrl/modules/llm/backends/vllm/_models.py +46 -0
  260. torchrl/modules/llm/backends/vllm/base.py +72 -0
  261. torchrl/modules/llm/backends/vllm/vllm_async.py +2075 -0
  262. torchrl/modules/llm/backends/vllm/vllm_plugin.py +22 -0
  263. torchrl/modules/llm/backends/vllm/vllm_sync.py +446 -0
  264. torchrl/modules/llm/backends/vllm/vllm_utils.py +129 -0
  265. torchrl/modules/llm/policies/__init__.py +28 -0
  266. torchrl/modules/llm/policies/common.py +1809 -0
  267. torchrl/modules/llm/policies/transformers_wrapper.py +2756 -0
  268. torchrl/modules/llm/policies/vllm_wrapper.py +2241 -0
  269. torchrl/modules/llm/utils.py +23 -0
  270. torchrl/modules/mcts/__init__.py +21 -0
  271. torchrl/modules/mcts/scores.py +579 -0
  272. torchrl/modules/models/__init__.py +86 -0
  273. torchrl/modules/models/batchrenorm.py +119 -0
  274. torchrl/modules/models/decision_transformer.py +179 -0
  275. torchrl/modules/models/exploration.py +731 -0
  276. torchrl/modules/models/llm.py +156 -0
  277. torchrl/modules/models/model_based.py +596 -0
  278. torchrl/modules/models/models.py +1712 -0
  279. torchrl/modules/models/multiagent.py +1067 -0
  280. torchrl/modules/models/recipes/impala.py +185 -0
  281. torchrl/modules/models/utils.py +162 -0
  282. torchrl/modules/planners/__init__.py +10 -0
  283. torchrl/modules/planners/cem.py +228 -0
  284. torchrl/modules/planners/common.py +73 -0
  285. torchrl/modules/planners/mppi.py +265 -0
  286. torchrl/modules/tensordict_module/__init__.py +89 -0
  287. torchrl/modules/tensordict_module/actors.py +2457 -0
  288. torchrl/modules/tensordict_module/common.py +529 -0
  289. torchrl/modules/tensordict_module/exploration.py +814 -0
  290. torchrl/modules/tensordict_module/probabilistic.py +321 -0
  291. torchrl/modules/tensordict_module/rnn.py +1639 -0
  292. torchrl/modules/tensordict_module/sequence.py +132 -0
  293. torchrl/modules/tensordict_module/world_models.py +34 -0
  294. torchrl/modules/utils/__init__.py +38 -0
  295. torchrl/modules/utils/mappings.py +9 -0
  296. torchrl/modules/utils/utils.py +89 -0
  297. torchrl/objectives/__init__.py +78 -0
  298. torchrl/objectives/a2c.py +659 -0
  299. torchrl/objectives/common.py +753 -0
  300. torchrl/objectives/cql.py +1346 -0
  301. torchrl/objectives/crossq.py +710 -0
  302. torchrl/objectives/ddpg.py +453 -0
  303. torchrl/objectives/decision_transformer.py +371 -0
  304. torchrl/objectives/deprecated.py +516 -0
  305. torchrl/objectives/dqn.py +683 -0
  306. torchrl/objectives/dreamer.py +488 -0
  307. torchrl/objectives/functional.py +48 -0
  308. torchrl/objectives/gail.py +258 -0
  309. torchrl/objectives/iql.py +996 -0
  310. torchrl/objectives/llm/__init__.py +30 -0
  311. torchrl/objectives/llm/grpo.py +846 -0
  312. torchrl/objectives/llm/sft.py +482 -0
  313. torchrl/objectives/multiagent/__init__.py +8 -0
  314. torchrl/objectives/multiagent/qmixer.py +396 -0
  315. torchrl/objectives/ppo.py +1669 -0
  316. torchrl/objectives/redq.py +683 -0
  317. torchrl/objectives/reinforce.py +530 -0
  318. torchrl/objectives/sac.py +1580 -0
  319. torchrl/objectives/td3.py +570 -0
  320. torchrl/objectives/td3_bc.py +625 -0
  321. torchrl/objectives/utils.py +782 -0
  322. torchrl/objectives/value/__init__.py +28 -0
  323. torchrl/objectives/value/advantages.py +1956 -0
  324. torchrl/objectives/value/functional.py +1459 -0
  325. torchrl/objectives/value/utils.py +360 -0
  326. torchrl/record/__init__.py +17 -0
  327. torchrl/record/loggers/__init__.py +23 -0
  328. torchrl/record/loggers/common.py +48 -0
  329. torchrl/record/loggers/csv.py +226 -0
  330. torchrl/record/loggers/mlflow.py +142 -0
  331. torchrl/record/loggers/tensorboard.py +139 -0
  332. torchrl/record/loggers/trackio.py +163 -0
  333. torchrl/record/loggers/utils.py +78 -0
  334. torchrl/record/loggers/wandb.py +214 -0
  335. torchrl/record/recorder.py +554 -0
  336. torchrl/services/__init__.py +79 -0
  337. torchrl/services/base.py +109 -0
  338. torchrl/services/ray_service.py +453 -0
  339. torchrl/testing/__init__.py +107 -0
  340. torchrl/testing/assertions.py +179 -0
  341. torchrl/testing/dist_utils.py +122 -0
  342. torchrl/testing/env_creators.py +227 -0
  343. torchrl/testing/env_helper.py +35 -0
  344. torchrl/testing/gym_helpers.py +156 -0
  345. torchrl/testing/llm_mocks.py +119 -0
  346. torchrl/testing/mocking_classes.py +2720 -0
  347. torchrl/testing/modules.py +295 -0
  348. torchrl/testing/mp_helpers.py +15 -0
  349. torchrl/testing/ray_helpers.py +293 -0
  350. torchrl/testing/utils.py +190 -0
  351. torchrl/trainers/__init__.py +42 -0
  352. torchrl/trainers/algorithms/__init__.py +11 -0
  353. torchrl/trainers/algorithms/configs/__init__.py +705 -0
  354. torchrl/trainers/algorithms/configs/collectors.py +216 -0
  355. torchrl/trainers/algorithms/configs/common.py +41 -0
  356. torchrl/trainers/algorithms/configs/data.py +308 -0
  357. torchrl/trainers/algorithms/configs/envs.py +104 -0
  358. torchrl/trainers/algorithms/configs/envs_libs.py +361 -0
  359. torchrl/trainers/algorithms/configs/logging.py +80 -0
  360. torchrl/trainers/algorithms/configs/modules.py +570 -0
  361. torchrl/trainers/algorithms/configs/objectives.py +177 -0
  362. torchrl/trainers/algorithms/configs/trainers.py +340 -0
  363. torchrl/trainers/algorithms/configs/transforms.py +955 -0
  364. torchrl/trainers/algorithms/configs/utils.py +252 -0
  365. torchrl/trainers/algorithms/configs/weight_sync_schemes.py +191 -0
  366. torchrl/trainers/algorithms/configs/weight_update.py +159 -0
  367. torchrl/trainers/algorithms/ppo.py +373 -0
  368. torchrl/trainers/algorithms/sac.py +308 -0
  369. torchrl/trainers/helpers/__init__.py +40 -0
  370. torchrl/trainers/helpers/collectors.py +416 -0
  371. torchrl/trainers/helpers/envs.py +573 -0
  372. torchrl/trainers/helpers/logger.py +33 -0
  373. torchrl/trainers/helpers/losses.py +132 -0
  374. torchrl/trainers/helpers/models.py +658 -0
  375. torchrl/trainers/helpers/replay_buffer.py +59 -0
  376. torchrl/trainers/helpers/trainers.py +301 -0
  377. torchrl/trainers/trainers.py +2052 -0
  378. torchrl/weight_update/__init__.py +33 -0
  379. torchrl/weight_update/_distributed.py +749 -0
  380. torchrl/weight_update/_mp.py +624 -0
  381. torchrl/weight_update/_noupdate.py +102 -0
  382. torchrl/weight_update/_ray.py +1032 -0
  383. torchrl/weight_update/_rpc.py +284 -0
  384. torchrl/weight_update/_shared.py +891 -0
  385. torchrl/weight_update/llm/__init__.py +32 -0
  386. torchrl/weight_update/llm/vllm_double_buffer.py +370 -0
  387. torchrl/weight_update/llm/vllm_nccl.py +710 -0
  388. torchrl/weight_update/utils.py +73 -0
  389. torchrl/weight_update/weight_sync_schemes.py +1244 -0
  390. torchrl-0.11.0.dist-info/METADATA +1308 -0
  391. torchrl-0.11.0.dist-info/RECORD +395 -0
  392. torchrl-0.11.0.dist-info/WHEEL +5 -0
  393. torchrl-0.11.0.dist-info/entry_points.txt +2 -0
  394. torchrl-0.11.0.dist-info/licenses/LICENSE +21 -0
  395. torchrl-0.11.0.dist-info/top_level.txt +7 -0
@@ -0,0 +1,562 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from __future__ import annotations
6
+
7
+ import os
8
+ from pathlib import Path
9
+
10
+ import torch.nn
11
+
12
+ import torch.optim
13
+ from lamb import Lamb
14
+ from tensordict.nn import TensorDictModule
15
+
16
+ from torchrl.collectors import SyncDataCollector
17
+ from torchrl.data import (
18
+ LazyMemmapStorage,
19
+ RoundRobinWriter,
20
+ TensorDictReplayBuffer,
21
+ TensorStorage,
22
+ )
23
+ from torchrl.data.datasets.d4rl import D4RLExperienceReplay
24
+ from torchrl.data.replay_buffers import RandomSampler
25
+ from torchrl.envs import (
26
+ CatFrames,
27
+ Compose,
28
+ DoubleToFloat,
29
+ EnvCreator,
30
+ ExcludeTransform,
31
+ ObservationNorm,
32
+ ParallelEnv,
33
+ RandomCropTensorDict,
34
+ RenameTransform,
35
+ Reward2GoTransform,
36
+ RewardScaling,
37
+ RewardSum,
38
+ TargetReturn,
39
+ TensorDictPrimer,
40
+ TransformedEnv,
41
+ UnsqueezeTransform,
42
+ )
43
+ from torchrl.envs.libs.dm_control import DMControlEnv
44
+ from torchrl.envs.libs.gym import set_gym_backend
45
+ from torchrl.envs.utils import ExplorationType, set_exploration_type
46
+ from torchrl.modules import (
47
+ DTActor,
48
+ OnlineDTActor,
49
+ ProbabilisticActor,
50
+ TanhDelta,
51
+ TanhNormal,
52
+ )
53
+
54
+ from torchrl.objectives import DTLoss, OnlineDTLoss
55
+ from torchrl.record import VideoRecorder
56
+ from torchrl.record.loggers import generate_exp_name, get_logger
57
+ from torchrl.trainers.helpers.envs import LIBS
58
+
59
+ # ====================================================================
60
+ # Environment utils
61
+ # -----------------
62
+
63
+
64
+ def make_base_env(env_cfg, from_pixels=False, device=None):
65
+ set_gym_backend(env_cfg.backend).set()
66
+
67
+ env_library = LIBS[env_cfg.library]
68
+ env_name = env_cfg.name
69
+ frame_skip = env_cfg.frame_skip
70
+
71
+ env_kwargs = {
72
+ "env_name": env_name,
73
+ "frame_skip": frame_skip,
74
+ "from_pixels": from_pixels,
75
+ "pixels_only": False,
76
+ }
77
+ if env_library is DMControlEnv:
78
+ env_task = env_cfg.task
79
+ env_kwargs.update({"task_name": env_task})
80
+ env = env_library(**env_kwargs, device=device)
81
+ return env
82
+
83
+
84
+ def make_transformed_env(base_env, env_cfg, obs_loc, obs_std, train=False):
85
+ transformed_env = TransformedEnv(base_env)
86
+ transformed_env.append_transform(
87
+ RewardScaling(
88
+ loc=0,
89
+ scale=env_cfg.reward_scaling,
90
+ in_keys=["reward"],
91
+ standard_normal=False,
92
+ )
93
+ )
94
+ if train:
95
+ transformed_env.append_transform(
96
+ TargetReturn(
97
+ env_cfg.collect_target_return * env_cfg.reward_scaling,
98
+ out_keys=["return_to_go"],
99
+ mode=env_cfg.target_return_mode,
100
+ )
101
+ )
102
+ else:
103
+ transformed_env.append_transform(
104
+ TargetReturn(
105
+ env_cfg.eval_target_return * env_cfg.reward_scaling,
106
+ out_keys=["return_to_go"],
107
+ mode=env_cfg.target_return_mode,
108
+ )
109
+ )
110
+
111
+ # copy action from the input tensordict to the output
112
+ transformed_env.append_transform(TensorDictPrimer(base_env.full_action_spec))
113
+
114
+ transformed_env.append_transform(DoubleToFloat())
115
+ obsnorm = ObservationNorm(
116
+ loc=obs_loc, scale=obs_std, in_keys="observation", standard_normal=True
117
+ )
118
+ transformed_env.append_transform(obsnorm)
119
+ transformed_env.append_transform(
120
+ UnsqueezeTransform(
121
+ -2,
122
+ in_keys=["observation", "action", "return_to_go"],
123
+ out_keys=["observation_cat", "action_cat", "return_to_go_cat"],
124
+ )
125
+ )
126
+ transformed_env.append_transform(
127
+ CatFrames(
128
+ in_keys=["observation_cat", "action_cat", "return_to_go_cat"],
129
+ N=env_cfg.stacked_frames,
130
+ dim=-2,
131
+ padding="constant",
132
+ )
133
+ )
134
+
135
+ if train:
136
+ transformed_env.append_transform(RewardSum())
137
+
138
+ return transformed_env
139
+
140
+
141
+ def make_parallel_env(
142
+ env_cfg, obs_loc, obs_std, train=False, from_pixels=False, device=None
143
+ ):
144
+ if train:
145
+ num_envs = env_cfg.num_train_envs
146
+ else:
147
+ num_envs = env_cfg.num_eval_envs
148
+
149
+ def make_env():
150
+ with set_gym_backend(env_cfg.backend):
151
+ return make_base_env(env_cfg, from_pixels=from_pixels, device="cpu")
152
+
153
+ env = make_transformed_env(
154
+ ParallelEnv(
155
+ num_envs, EnvCreator(make_env), serial_for_single=True, device=device
156
+ ),
157
+ env_cfg,
158
+ obs_loc,
159
+ obs_std,
160
+ train,
161
+ )
162
+ env.start()
163
+ return env
164
+
165
+
166
+ def make_env(env_cfg, obs_loc, obs_std, train=False, from_pixels=False, device=None):
167
+ return make_parallel_env(
168
+ env_cfg,
169
+ obs_loc,
170
+ obs_std,
171
+ train=train,
172
+ from_pixels=from_pixels,
173
+ device=device,
174
+ )
175
+
176
+
177
+ # ====================================================================
178
+ # Collector and replay buffer
179
+ # ---------------------------
180
+
181
+
182
+ def make_collector(cfg, policy):
183
+ exclude_target_return = ExcludeTransform(
184
+ "return_to_go",
185
+ ("next", "return_to_go"),
186
+ ("next", "action"),
187
+ ("next", "observation"),
188
+ "scale",
189
+ "loc",
190
+ )
191
+ cat = CatFrames(
192
+ in_keys=["action"], out_keys=["action_cat"], N=20, dim=-2, padding="constant"
193
+ )
194
+ transforms = Compose(
195
+ exclude_target_return,
196
+ cat,
197
+ )
198
+ collector_cfg = cfg.collector
199
+ collector_class = SyncDataCollector
200
+ collector = collector_class(
201
+ make_env(cfg.env, train=True),
202
+ policy,
203
+ frames_per_batch=collector_cfg.frames_per_batch,
204
+ total_frames=collector_cfg.total_frames,
205
+ device=collector_cfg.devices,
206
+ max_frames_per_traj=collector_cfg.max_frames_per_traj,
207
+ postproc=transforms,
208
+ )
209
+ return collector
210
+
211
+
212
+ def make_offline_replay_buffer(rb_cfg, reward_scaling):
213
+ r2g = Reward2GoTransform(
214
+ gamma=1.0,
215
+ in_keys=[("next", "reward"), "reward"],
216
+ out_keys=[("next", "return_to_go"), "return_to_go"],
217
+ )
218
+ reward_scale = RewardScaling(
219
+ loc=0,
220
+ scale=reward_scaling,
221
+ in_keys=[("next", "return_to_go"), "return_to_go"],
222
+ standard_normal=False,
223
+ )
224
+ crop_seq = RandomCropTensorDict(sub_seq_len=rb_cfg.stacked_frames, sample_dim=-1)
225
+ d2f = DoubleToFloat()
226
+ rename = RenameTransform(
227
+ in_keys=[
228
+ "action",
229
+ "observation",
230
+ "return_to_go",
231
+ ("next", "return_to_go"),
232
+ ("next", "observation"),
233
+ ],
234
+ out_keys=[
235
+ "action_cat",
236
+ "observation_cat",
237
+ "return_to_go_cat",
238
+ ("next", "return_to_go_cat"),
239
+ ("next", "observation_cat"),
240
+ ],
241
+ )
242
+ exclude = ExcludeTransform(
243
+ "terminal",
244
+ "info",
245
+ ("next", "timeout"),
246
+ ("next", "terminal"),
247
+ ("next", "observation"),
248
+ ("next", "info"),
249
+ )
250
+
251
+ transforms = Compose(
252
+ r2g,
253
+ crop_seq,
254
+ reward_scale,
255
+ d2f,
256
+ rename,
257
+ exclude,
258
+ )
259
+ data = D4RLExperienceReplay(
260
+ dataset_id=rb_cfg.dataset,
261
+ split_trajs=True,
262
+ batch_size=rb_cfg.batch_size,
263
+ sampler=RandomSampler(), # SamplerWithoutReplacement(drop_last=False),
264
+ transform=None,
265
+ use_truncated_as_done=True,
266
+ direct_download=True,
267
+ prefetch=4,
268
+ writer=RoundRobinWriter(),
269
+ root=Path(os.environ["HOME"]) / ".cache" / "torchrl" / "data" / "d4rl",
270
+ )
271
+
272
+ # since we're not extending the data, adding keys can only be done via
273
+ # the creation of a new storage
274
+ data_memmap = data[:]
275
+ with data_memmap.unlock_():
276
+ data_memmap = r2g.inv(data_memmap)
277
+ data._storage = TensorStorage(data_memmap)
278
+
279
+ loc = data[:]["observation"].flatten(0, -2).mean(axis=0).float()
280
+ std = data[:]["observation"].flatten(0, -2).std(axis=0).float()
281
+
282
+ obsnorm = ObservationNorm(
283
+ loc=loc,
284
+ scale=std,
285
+ in_keys=["observation_cat", ("next", "observation_cat")],
286
+ standard_normal=True,
287
+ )
288
+ for t in transforms:
289
+ data.append_transform(t)
290
+ data.append_transform(obsnorm)
291
+ return data, loc, std
292
+
293
+
294
+ def make_online_replay_buffer(offline_buffer, rb_cfg, reward_scaling=0.001):
295
+ r2g = Reward2GoTransform(gamma=1.0, out_keys=["return_to_go"])
296
+ reward_scale = RewardScaling(
297
+ loc=0,
298
+ scale=reward_scaling,
299
+ in_keys=["return_to_go"],
300
+ out_keys=["return_to_go"],
301
+ standard_normal=False,
302
+ )
303
+ catframes = CatFrames(
304
+ in_keys=["return_to_go"],
305
+ out_keys=["return_to_go_cat"],
306
+ N=rb_cfg.stacked_frames,
307
+ dim=-2,
308
+ padding="constant",
309
+ as_inverse=True,
310
+ )
311
+ transforms = Compose(
312
+ r2g,
313
+ reward_scale,
314
+ catframes,
315
+ )
316
+ storage = LazyMemmapStorage(
317
+ max_size=rb_cfg.capacity,
318
+ scratch_dir=rb_cfg.scratch_dir,
319
+ device=rb_cfg.device,
320
+ )
321
+
322
+ replay_buffer = TensorDictReplayBuffer(
323
+ pin_memory=False,
324
+ prefetch=rb_cfg.prefetch,
325
+ storage=storage,
326
+ batch_size=rb_cfg.batch_size,
327
+ )
328
+ # init buffer with offline data
329
+ offline_data = offline_buffer[:100000]
330
+ offline_data.del_("index")
331
+ replay_buffer.extend(offline_data.clone().detach().to_tensordict())
332
+ # add transforms after offline data extension to not trigger reward-to-go calculation
333
+ replay_buffer.append_transform(transforms)
334
+
335
+ return replay_buffer
336
+
337
+
338
+ # ====================================================================
339
+ # Model
340
+ # -----
341
+
342
+
343
+ def make_odt_model(cfg, device: torch.device | None = None) -> TensorDictModule:
344
+ env_cfg = cfg.env
345
+ proof_environment = make_transformed_env(
346
+ make_base_env(env_cfg), env_cfg, obs_loc=0, obs_std=1
347
+ )
348
+
349
+ action_spec = proof_environment.action_spec_unbatched
350
+ for key, value in proof_environment.observation_spec_unbatched.items():
351
+ if key == "observation":
352
+ state_dim = value.shape[-1]
353
+ in_keys = [
354
+ "observation_cat",
355
+ "action_cat",
356
+ "return_to_go_cat",
357
+ ]
358
+
359
+ actor_net = OnlineDTActor(
360
+ state_dim=state_dim,
361
+ action_dim=action_spec.shape[-1],
362
+ transformer_config=cfg.transformer,
363
+ device=device,
364
+ )
365
+
366
+ actor_module = TensorDictModule(
367
+ actor_net,
368
+ in_keys=in_keys,
369
+ out_keys=[
370
+ "loc",
371
+ "scale",
372
+ ],
373
+ )
374
+ dist_class = TanhNormal
375
+ dist_kwargs = {
376
+ "low": -torch.ones((), device=device),
377
+ "high": torch.ones((), device=device),
378
+ "tanh_loc": False,
379
+ "upscale": torch.full((), 5, device=device),
380
+ # "safe_tanh": not cfg.compile.compile,
381
+ }
382
+
383
+ actor = ProbabilisticActor(
384
+ spec=action_spec,
385
+ in_keys=["loc", "scale"],
386
+ out_keys=["action"],
387
+ module=actor_module,
388
+ distribution_class=dist_class,
389
+ distribution_kwargs=dist_kwargs,
390
+ cache_dist=False,
391
+ return_log_prob=False,
392
+ )
393
+
394
+ # init the lazy layers
395
+ with torch.no_grad(), set_exploration_type(ExplorationType.RANDOM):
396
+ td = proof_environment.rollout(max_steps=100)
397
+ td["action"] = td["next", "action"]
398
+ actor(td.to(device))
399
+
400
+ return actor
401
+
402
+
403
+ def make_dt_model(cfg, device: torch.device | None = None):
404
+ env_cfg = cfg.env
405
+ proof_environment = make_transformed_env(
406
+ make_base_env(env_cfg), env_cfg, obs_loc=0, obs_std=1
407
+ )
408
+
409
+ action_spec = proof_environment.action_spec_unbatched
410
+ in_keys = [
411
+ "observation_cat",
412
+ "action_cat",
413
+ "return_to_go_cat",
414
+ ]
415
+
416
+ actor_net = DTActor(
417
+ state_dim=proof_environment.observation_spec_unbatched["observation"].shape[-1],
418
+ action_dim=action_spec.shape[-1],
419
+ transformer_config=cfg.transformer,
420
+ device=device,
421
+ )
422
+
423
+ actor_module = TensorDictModule(
424
+ actor_net,
425
+ in_keys=in_keys,
426
+ out_keys=["param"],
427
+ )
428
+ dist_class = TanhDelta
429
+ dist_kwargs = {
430
+ "low": action_spec.space.low.to(device),
431
+ "high": action_spec.space.high.to(device),
432
+ "safe": not cfg.compile.compile,
433
+ }
434
+
435
+ actor = ProbabilisticActor(
436
+ spec=action_spec.to(device),
437
+ in_keys=["param"],
438
+ out_keys=["action"],
439
+ module=actor_module,
440
+ distribution_class=dist_class,
441
+ distribution_kwargs=dist_kwargs,
442
+ cache_dist=False,
443
+ return_log_prob=False,
444
+ )
445
+
446
+ # init the lazy layers
447
+ with torch.no_grad(), set_exploration_type(ExplorationType.RANDOM):
448
+ td = proof_environment.fake_tensordict()
449
+ td = td.expand((100, *td.shape))
450
+ td["action"] = td["next", "action"]
451
+ actor(td.to(device))
452
+
453
+ return actor
454
+
455
+
456
+ # ====================================================================
457
+ # Online Decision Transformer Loss
458
+ # ---------
459
+
460
+
461
+ def make_odt_loss(loss_cfg, actor_network):
462
+ loss = OnlineDTLoss(
463
+ actor_network,
464
+ alpha_init=loss_cfg.alpha_init,
465
+ target_entropy=loss_cfg.target_entropy,
466
+ )
467
+ loss.set_keys(action_target="action_cat")
468
+ return loss
469
+
470
+
471
+ def make_dt_loss(loss_cfg, actor_network, device: torch.device | None = None):
472
+ loss = DTLoss(
473
+ actor_network,
474
+ loss_function=loss_cfg.loss_function,
475
+ device=device,
476
+ )
477
+ loss.set_keys(action_target="action_cat")
478
+ return loss
479
+
480
+
481
+ def make_odt_optimizer(optim_cfg, loss_module):
482
+ if optim_cfg.optimizer == "lamb":
483
+ dt_optimizer = Lamb(
484
+ loss_module.actor_network_params.flatten_keys().values(),
485
+ lr=torch.as_tensor(
486
+ optim_cfg.lr, device=next(loss_module.parameters()).device
487
+ ),
488
+ weight_decay=optim_cfg.weight_decay,
489
+ eps=1.0e-8,
490
+ )
491
+ elif optim_cfg.optimizer == "adam":
492
+ dt_optimizer = torch.optim.Adam(
493
+ loss_module.actor_network_params.flatten_keys().values(),
494
+ lr=torch.as_tensor(
495
+ optim_cfg.lr, device=next(loss_module.parameters()).device
496
+ ),
497
+ weight_decay=optim_cfg.weight_decay,
498
+ eps=1.0e-8,
499
+ )
500
+
501
+ scheduler = torch.optim.lr_scheduler.LambdaLR(
502
+ dt_optimizer, lambda steps: min((steps + 1) / optim_cfg.warmup_steps, 1)
503
+ )
504
+
505
+ log_temp_optimizer = torch.optim.Adam(
506
+ [loss_module.log_alpha],
507
+ lr=torch.as_tensor(1e-4, device=next(loss_module.parameters()).device),
508
+ betas=[0.9, 0.999],
509
+ )
510
+
511
+ return dt_optimizer, log_temp_optimizer, scheduler
512
+
513
+
514
+ def make_dt_optimizer(optim_cfg, loss_module, device):
515
+ dt_optimizer = torch.optim.Adam(
516
+ loss_module.actor_network_params.flatten_keys().values(),
517
+ lr=torch.tensor(optim_cfg.lr, device=device),
518
+ weight_decay=optim_cfg.weight_decay,
519
+ eps=1.0e-8,
520
+ )
521
+ scheduler = torch.optim.lr_scheduler.LambdaLR(
522
+ dt_optimizer, lambda steps: min((steps + 1) / optim_cfg.warmup_steps, 1)
523
+ )
524
+
525
+ return dt_optimizer, scheduler
526
+
527
+
528
+ # ====================================================================
529
+ # Logging and recording
530
+ # ---------------------
531
+
532
+
533
+ def make_logger(cfg):
534
+ if not cfg.logger.backend:
535
+ return None
536
+ exp_name = generate_exp_name(cfg.logger.model_name, cfg.logger.exp_name)
537
+ logger = get_logger(
538
+ cfg.logger.backend,
539
+ logger_name=cfg.logger.model_name,
540
+ experiment_name=exp_name,
541
+ wandb_kwargs={
542
+ "config": dict(cfg),
543
+ "project": cfg.logger.project_name,
544
+ "group": cfg.logger.group_name,
545
+ },
546
+ )
547
+ return logger
548
+
549
+
550
+ # ====================================================================
551
+ # General utils
552
+ # ---------
553
+
554
+
555
+ def log_metrics(logger, metrics, step):
556
+ for metric_name, metric_value in metrics.items():
557
+ logger.log_scalar(metric_name, metric_value, step)
558
+
559
+
560
+ def dump_video(module):
561
+ if isinstance(module, VideoRecorder):
562
+ module.dump()