vllm-cpu-avx512bf16 0.14.0__cp313-cp313-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1712) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +1511 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +3206 -0
  6. vllm/_ipex_ops.py +445 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +62 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +0 -0
  14. vllm/attention/layer.py +913 -0
  15. vllm/attention/utils/__init__.py +0 -0
  16. vllm/attention/utils/kv_sharing_utils.py +33 -0
  17. vllm/attention/utils/kv_transfer_utils.py +60 -0
  18. vllm/beam_search.py +88 -0
  19. vllm/benchmarks/__init__.py +0 -0
  20. vllm/benchmarks/datasets.py +3277 -0
  21. vllm/benchmarks/latency.py +172 -0
  22. vllm/benchmarks/lib/__init__.py +3 -0
  23. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  24. vllm/benchmarks/lib/ready_checker.py +72 -0
  25. vllm/benchmarks/lib/utils.py +79 -0
  26. vllm/benchmarks/mm_processor.py +363 -0
  27. vllm/benchmarks/serve.py +1761 -0
  28. vllm/benchmarks/startup.py +321 -0
  29. vllm/benchmarks/sweep/__init__.py +0 -0
  30. vllm/benchmarks/sweep/cli.py +41 -0
  31. vllm/benchmarks/sweep/param_sweep.py +159 -0
  32. vllm/benchmarks/sweep/plot.py +675 -0
  33. vllm/benchmarks/sweep/plot_pareto.py +393 -0
  34. vllm/benchmarks/sweep/serve.py +450 -0
  35. vllm/benchmarks/sweep/serve_sla.py +459 -0
  36. vllm/benchmarks/sweep/server.py +114 -0
  37. vllm/benchmarks/sweep/sla_sweep.py +138 -0
  38. vllm/benchmarks/sweep/utils.py +4 -0
  39. vllm/benchmarks/throughput.py +946 -0
  40. vllm/collect_env.py +857 -0
  41. vllm/compilation/__init__.py +0 -0
  42. vllm/compilation/activation_quant_fusion.py +214 -0
  43. vllm/compilation/backends.py +840 -0
  44. vllm/compilation/base_static_graph.py +57 -0
  45. vllm/compilation/caching.py +196 -0
  46. vllm/compilation/collective_fusion.py +1224 -0
  47. vllm/compilation/compiler_interface.py +639 -0
  48. vllm/compilation/counter.py +50 -0
  49. vllm/compilation/cuda_graph.py +309 -0
  50. vllm/compilation/decorators.py +662 -0
  51. vllm/compilation/fix_functionalization.py +266 -0
  52. vllm/compilation/fusion.py +570 -0
  53. vllm/compilation/fusion_attn.py +363 -0
  54. vllm/compilation/fx_utils.py +92 -0
  55. vllm/compilation/inductor_pass.py +145 -0
  56. vllm/compilation/matcher_utils.py +454 -0
  57. vllm/compilation/monitor.py +62 -0
  58. vllm/compilation/noop_elimination.py +130 -0
  59. vllm/compilation/partition_rules.py +75 -0
  60. vllm/compilation/pass_manager.py +164 -0
  61. vllm/compilation/piecewise_backend.py +191 -0
  62. vllm/compilation/post_cleanup.py +21 -0
  63. vllm/compilation/qk_norm_rope_fusion.py +244 -0
  64. vllm/compilation/rocm_aiter_fusion.py +401 -0
  65. vllm/compilation/sequence_parallelism.py +368 -0
  66. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  67. vllm/compilation/vllm_inductor_pass.py +180 -0
  68. vllm/compilation/wrapper.py +329 -0
  69. vllm/config/__init__.py +112 -0
  70. vllm/config/attention.py +114 -0
  71. vllm/config/cache.py +233 -0
  72. vllm/config/compilation.py +1149 -0
  73. vllm/config/device.py +75 -0
  74. vllm/config/ec_transfer.py +110 -0
  75. vllm/config/kv_events.py +56 -0
  76. vllm/config/kv_transfer.py +119 -0
  77. vllm/config/load.py +124 -0
  78. vllm/config/lora.py +102 -0
  79. vllm/config/model.py +2026 -0
  80. vllm/config/model_arch.py +57 -0
  81. vllm/config/multimodal.py +247 -0
  82. vllm/config/observability.py +157 -0
  83. vllm/config/parallel.py +703 -0
  84. vllm/config/pooler.py +188 -0
  85. vllm/config/profiler.py +199 -0
  86. vllm/config/scheduler.py +298 -0
  87. vllm/config/speculative.py +656 -0
  88. vllm/config/speech_to_text.py +39 -0
  89. vllm/config/structured_outputs.py +78 -0
  90. vllm/config/utils.py +374 -0
  91. vllm/config/vllm.py +1487 -0
  92. vllm/connections.py +189 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +301 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +43 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +509 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +303 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +346 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +190 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  106. vllm/distributed/device_communicators/pynccl.py +386 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +567 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +778 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +697 -0
  113. vllm/distributed/device_communicators/symm_mem.py +156 -0
  114. vllm/distributed/device_communicators/xpu_communicator.py +98 -0
  115. vllm/distributed/ec_transfer/__init__.py +14 -0
  116. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  117. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  118. vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
  119. vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
  120. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  121. vllm/distributed/eplb/__init__.py +3 -0
  122. vllm/distributed/eplb/async_worker.py +115 -0
  123. vllm/distributed/eplb/eplb_state.py +1192 -0
  124. vllm/distributed/eplb/policy/__init__.py +19 -0
  125. vllm/distributed/eplb/policy/abstract.py +43 -0
  126. vllm/distributed/eplb/policy/default.py +376 -0
  127. vllm/distributed/eplb/rebalance_execute.py +699 -0
  128. vllm/distributed/kv_events.py +505 -0
  129. vllm/distributed/kv_transfer/README.md +29 -0
  130. vllm/distributed/kv_transfer/__init__.py +20 -0
  131. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  132. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  133. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  134. vllm/distributed/kv_transfer/kv_connector/factory.py +203 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +459 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +607 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +344 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  142. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +395 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +211 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1431 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +941 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +916 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/moriio/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py +321 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py +1515 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py +609 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +477 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2688 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +557 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  159. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  160. vllm/distributed/parallel_state.py +1809 -0
  161. vllm/distributed/utils.py +545 -0
  162. vllm/engine/__init__.py +0 -0
  163. vllm/engine/arg_utils.py +2137 -0
  164. vllm/engine/async_llm_engine.py +6 -0
  165. vllm/engine/llm_engine.py +6 -0
  166. vllm/engine/protocol.py +194 -0
  167. vllm/entrypoints/__init__.py +0 -0
  168. vllm/entrypoints/anthropic/__init__.py +0 -0
  169. vllm/entrypoints/anthropic/protocol.py +162 -0
  170. vllm/entrypoints/anthropic/serving_messages.py +468 -0
  171. vllm/entrypoints/api_server.py +186 -0
  172. vllm/entrypoints/chat_utils.py +1912 -0
  173. vllm/entrypoints/cli/__init__.py +19 -0
  174. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  175. vllm/entrypoints/cli/benchmark/base.py +25 -0
  176. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  177. vllm/entrypoints/cli/benchmark/main.py +57 -0
  178. vllm/entrypoints/cli/benchmark/mm_processor.py +21 -0
  179. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  180. vllm/entrypoints/cli/benchmark/startup.py +21 -0
  181. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  182. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  183. vllm/entrypoints/cli/collect_env.py +38 -0
  184. vllm/entrypoints/cli/main.py +79 -0
  185. vllm/entrypoints/cli/openai.py +260 -0
  186. vllm/entrypoints/cli/run_batch.py +68 -0
  187. vllm/entrypoints/cli/serve.py +253 -0
  188. vllm/entrypoints/cli/types.py +29 -0
  189. vllm/entrypoints/constants.py +12 -0
  190. vllm/entrypoints/context.py +898 -0
  191. vllm/entrypoints/grpc_server.py +531 -0
  192. vllm/entrypoints/launcher.py +175 -0
  193. vllm/entrypoints/llm.py +1807 -0
  194. vllm/entrypoints/logger.py +86 -0
  195. vllm/entrypoints/openai/__init__.py +0 -0
  196. vllm/entrypoints/openai/api_server.py +1390 -0
  197. vllm/entrypoints/openai/cli_args.py +320 -0
  198. vllm/entrypoints/openai/orca_metrics.py +120 -0
  199. vllm/entrypoints/openai/parser/__init__.py +0 -0
  200. vllm/entrypoints/openai/parser/harmony_utils.py +820 -0
  201. vllm/entrypoints/openai/parser/responses_parser.py +176 -0
  202. vllm/entrypoints/openai/protocol.py +2566 -0
  203. vllm/entrypoints/openai/run_batch.py +635 -0
  204. vllm/entrypoints/openai/serving_chat.py +1897 -0
  205. vllm/entrypoints/openai/serving_chat_stream_harmony.py +101 -0
  206. vllm/entrypoints/openai/serving_completion.py +740 -0
  207. vllm/entrypoints/openai/serving_engine.py +1612 -0
  208. vllm/entrypoints/openai/serving_models.py +309 -0
  209. vllm/entrypoints/openai/serving_responses.py +2552 -0
  210. vllm/entrypoints/openai/serving_transcription.py +168 -0
  211. vllm/entrypoints/openai/speech_to_text.py +711 -0
  212. vllm/entrypoints/openai/utils.py +49 -0
  213. vllm/entrypoints/pooling/__init__.py +16 -0
  214. vllm/entrypoints/pooling/classify/__init__.py +0 -0
  215. vllm/entrypoints/pooling/classify/api_router.py +48 -0
  216. vllm/entrypoints/pooling/classify/protocol.py +181 -0
  217. vllm/entrypoints/pooling/classify/serving.py +233 -0
  218. vllm/entrypoints/pooling/embed/__init__.py +0 -0
  219. vllm/entrypoints/pooling/embed/api_router.py +65 -0
  220. vllm/entrypoints/pooling/embed/conftest.py +28 -0
  221. vllm/entrypoints/pooling/embed/protocol.py +217 -0
  222. vllm/entrypoints/pooling/embed/serving.py +684 -0
  223. vllm/entrypoints/pooling/pooling/__init__.py +0 -0
  224. vllm/entrypoints/pooling/pooling/api_router.py +62 -0
  225. vllm/entrypoints/pooling/pooling/protocol.py +146 -0
  226. vllm/entrypoints/pooling/pooling/serving.py +354 -0
  227. vllm/entrypoints/pooling/score/__init__.py +0 -0
  228. vllm/entrypoints/pooling/score/api_router.py +147 -0
  229. vllm/entrypoints/pooling/score/protocol.py +146 -0
  230. vllm/entrypoints/pooling/score/serving.py +511 -0
  231. vllm/entrypoints/renderer.py +411 -0
  232. vllm/entrypoints/responses_utils.py +218 -0
  233. vllm/entrypoints/sagemaker/__init__.py +4 -0
  234. vllm/entrypoints/sagemaker/routes.py +118 -0
  235. vllm/entrypoints/score_utils.py +271 -0
  236. vllm/entrypoints/serve/__init__.py +94 -0
  237. vllm/entrypoints/serve/cache/__init__.py +0 -0
  238. vllm/entrypoints/serve/cache/api_router.py +61 -0
  239. vllm/entrypoints/serve/disagg/__init__.py +0 -0
  240. vllm/entrypoints/serve/disagg/api_router.py +109 -0
  241. vllm/entrypoints/serve/disagg/protocol.py +90 -0
  242. vllm/entrypoints/serve/disagg/serving.py +285 -0
  243. vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
  244. vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
  245. vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
  246. vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
  247. vllm/entrypoints/serve/instrumentator/health.py +33 -0
  248. vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
  249. vllm/entrypoints/serve/instrumentator/offline_docs.py +50 -0
  250. vllm/entrypoints/serve/instrumentator/server_info.py +56 -0
  251. vllm/entrypoints/serve/instrumentator/static/swagger-ui-bundle.js +2 -0
  252. vllm/entrypoints/serve/instrumentator/static/swagger-ui.css +3 -0
  253. vllm/entrypoints/serve/lora/__init__.py +0 -0
  254. vllm/entrypoints/serve/lora/api_router.py +70 -0
  255. vllm/entrypoints/serve/profile/__init__.py +0 -0
  256. vllm/entrypoints/serve/profile/api_router.py +46 -0
  257. vllm/entrypoints/serve/rlhf/__init__.py +0 -0
  258. vllm/entrypoints/serve/rlhf/api_router.py +102 -0
  259. vllm/entrypoints/serve/rpc/__init__.py +0 -0
  260. vllm/entrypoints/serve/rpc/api_router.py +61 -0
  261. vllm/entrypoints/serve/sleep/__init__.py +0 -0
  262. vllm/entrypoints/serve/sleep/api_router.py +56 -0
  263. vllm/entrypoints/serve/tokenize/__init__.py +0 -0
  264. vllm/entrypoints/serve/tokenize/api_router.py +112 -0
  265. vllm/entrypoints/serve/tokenize/serving.py +204 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +187 -0
  268. vllm/entrypoints/tool_server.py +234 -0
  269. vllm/entrypoints/utils.py +336 -0
  270. vllm/env_override.py +402 -0
  271. vllm/envs.py +1791 -0
  272. vllm/exceptions.py +36 -0
  273. vllm/forward_context.py +375 -0
  274. vllm/grpc/__init__.py +17 -0
  275. vllm/grpc/compile_protos.py +94 -0
  276. vllm/grpc/vllm_engine.proto +195 -0
  277. vllm/grpc/vllm_engine_pb2.py +77 -0
  278. vllm/grpc/vllm_engine_pb2.pyi +213 -0
  279. vllm/grpc/vllm_engine_pb2_grpc.py +330 -0
  280. vllm/inputs/__init__.py +44 -0
  281. vllm/inputs/data.py +359 -0
  282. vllm/inputs/parse.py +147 -0
  283. vllm/inputs/preprocess.py +716 -0
  284. vllm/logger.py +303 -0
  285. vllm/logging_utils/__init__.py +13 -0
  286. vllm/logging_utils/dump_input.py +83 -0
  287. vllm/logging_utils/formatter.py +127 -0
  288. vllm/logging_utils/lazy.py +20 -0
  289. vllm/logging_utils/log_time.py +34 -0
  290. vllm/logits_process.py +121 -0
  291. vllm/logprobs.py +206 -0
  292. vllm/lora/__init__.py +0 -0
  293. vllm/lora/layers/__init__.py +43 -0
  294. vllm/lora/layers/base.py +66 -0
  295. vllm/lora/layers/base_linear.py +172 -0
  296. vllm/lora/layers/column_parallel_linear.py +577 -0
  297. vllm/lora/layers/fused_moe.py +739 -0
  298. vllm/lora/layers/logits_processor.py +203 -0
  299. vllm/lora/layers/replicated_linear.py +70 -0
  300. vllm/lora/layers/row_parallel_linear.py +176 -0
  301. vllm/lora/layers/utils.py +115 -0
  302. vllm/lora/layers/vocal_parallel_embedding.py +140 -0
  303. vllm/lora/lora_model.py +221 -0
  304. vllm/lora/lora_weights.py +227 -0
  305. vllm/lora/model_manager.py +858 -0
  306. vllm/lora/ops/__init__.py +0 -0
  307. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  308. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  309. vllm/lora/ops/torch_ops/__init__.py +20 -0
  310. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  311. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  312. vllm/lora/ops/triton_ops/__init__.py +21 -0
  313. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +677 -0
  314. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  315. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  316. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  317. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  318. vllm/lora/ops/triton_ops/utils.py +313 -0
  319. vllm/lora/peft_helper.py +128 -0
  320. vllm/lora/punica_wrapper/__init__.py +10 -0
  321. vllm/lora/punica_wrapper/punica_base.py +493 -0
  322. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  323. vllm/lora/punica_wrapper/punica_gpu.py +413 -0
  324. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  325. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  326. vllm/lora/punica_wrapper/utils.py +150 -0
  327. vllm/lora/request.py +60 -0
  328. vllm/lora/resolver.py +88 -0
  329. vllm/lora/utils.py +281 -0
  330. vllm/lora/worker_manager.py +278 -0
  331. vllm/model_executor/__init__.py +9 -0
  332. vllm/model_executor/custom_op.py +203 -0
  333. vllm/model_executor/layers/__init__.py +0 -0
  334. vllm/model_executor/layers/activation.py +628 -0
  335. vllm/model_executor/layers/attention/__init__.py +0 -0
  336. vllm/model_executor/layers/attention/chunked_local_attention.py +130 -0
  337. vllm/model_executor/layers/attention/cross_attention.py +182 -0
  338. vllm/model_executor/layers/attention/encoder_only_attention.py +103 -0
  339. vllm/model_executor/layers/attention/mm_encoder_attention.py +234 -0
  340. vllm/model_executor/layers/attention/static_sink_attention.py +254 -0
  341. vllm/model_executor/layers/attention_layer_base.py +34 -0
  342. vllm/model_executor/layers/batch_invariant.py +1063 -0
  343. vllm/model_executor/layers/conv.py +262 -0
  344. vllm/model_executor/layers/fla/__init__.py +8 -0
  345. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  346. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  347. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  348. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  349. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  350. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  351. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  352. vllm/model_executor/layers/fla/ops/index.py +41 -0
  353. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  354. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  355. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  356. vllm/model_executor/layers/fla/ops/op.py +60 -0
  357. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  358. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  359. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  360. vllm/model_executor/layers/fused_moe/__init__.py +120 -0
  361. vllm/model_executor/layers/fused_moe/all2all_utils.py +173 -0
  362. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +411 -0
  363. vllm/model_executor/layers/fused_moe/config.py +1111 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200.json +147 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=129,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=160,N=768,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=64,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  625. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  626. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  627. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  628. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  629. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  630. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  631. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  632. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  633. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  634. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  635. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  636. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  637. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  638. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  639. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  640. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  641. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  642. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  643. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  644. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  645. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  646. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  647. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  648. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  649. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  650. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  651. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +444 -0
  652. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1086 -0
  653. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +364 -0
  654. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
  655. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  656. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +436 -0
  657. vllm/model_executor/layers/fused_moe/fallback.py +127 -0
  658. vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +338 -0
  659. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +310 -0
  660. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +371 -0
  661. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  662. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1018 -0
  663. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +824 -0
  664. vllm/model_executor/layers/fused_moe/fused_moe.py +2638 -0
  665. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +119 -0
  666. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +117 -0
  667. vllm/model_executor/layers/fused_moe/fused_moe_router.py +40 -0
  668. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +531 -0
  669. vllm/model_executor/layers/fused_moe/layer.py +2169 -0
  670. vllm/model_executor/layers/fused_moe/modular_kernel.py +1251 -0
  671. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
  672. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  673. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  674. vllm/model_executor/layers/fused_moe/oracle/__init__.py +2 -0
  675. vllm/model_executor/layers/fused_moe/oracle/fp8.py +358 -0
  676. vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +280 -0
  677. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  678. vllm/model_executor/layers/fused_moe/prepare_finalize.py +87 -0
  679. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +347 -0
  680. vllm/model_executor/layers/fused_moe/routed_experts_capturer.py +324 -0
  681. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  682. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
  683. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  684. vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py +78 -0
  685. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +75 -0
  686. vllm/model_executor/layers/fused_moe/trtllm_moe.py +144 -0
  687. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +403 -0
  688. vllm/model_executor/layers/fused_moe/utils.py +382 -0
  689. vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py +189 -0
  690. vllm/model_executor/layers/kda.py +442 -0
  691. vllm/model_executor/layers/layernorm.py +451 -0
  692. vllm/model_executor/layers/lightning_attn.py +735 -0
  693. vllm/model_executor/layers/linear.py +1478 -0
  694. vllm/model_executor/layers/logits_processor.py +109 -0
  695. vllm/model_executor/layers/mamba/__init__.py +0 -0
  696. vllm/model_executor/layers/mamba/abstract.py +68 -0
  697. vllm/model_executor/layers/mamba/linear_attn.py +410 -0
  698. vllm/model_executor/layers/mamba/mamba_mixer.py +541 -0
  699. vllm/model_executor/layers/mamba/mamba_mixer2.py +936 -0
  700. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  701. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  702. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  703. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  704. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
  705. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  706. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  707. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  708. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  709. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  710. vllm/model_executor/layers/mamba/short_conv.py +254 -0
  711. vllm/model_executor/layers/mla.py +179 -0
  712. vllm/model_executor/layers/pooler/__init__.py +5 -0
  713. vllm/model_executor/layers/pooler/abstract.py +39 -0
  714. vllm/model_executor/layers/pooler/activations.py +162 -0
  715. vllm/model_executor/layers/pooler/common.py +32 -0
  716. vllm/model_executor/layers/pooler/seqwise/__init__.py +45 -0
  717. vllm/model_executor/layers/pooler/seqwise/heads.py +151 -0
  718. vllm/model_executor/layers/pooler/seqwise/methods.py +93 -0
  719. vllm/model_executor/layers/pooler/seqwise/poolers.py +127 -0
  720. vllm/model_executor/layers/pooler/special.py +128 -0
  721. vllm/model_executor/layers/pooler/tokwise/__init__.py +39 -0
  722. vllm/model_executor/layers/pooler/tokwise/heads.py +133 -0
  723. vllm/model_executor/layers/pooler/tokwise/methods.py +122 -0
  724. vllm/model_executor/layers/pooler/tokwise/poolers.py +127 -0
  725. vllm/model_executor/layers/quantization/__init__.py +195 -0
  726. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  727. vllm/model_executor/layers/quantization/awq.py +277 -0
  728. vllm/model_executor/layers/quantization/awq_marlin.py +795 -0
  729. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  730. vllm/model_executor/layers/quantization/base_config.py +170 -0
  731. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  732. vllm/model_executor/layers/quantization/bitsandbytes.py +631 -0
  733. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  734. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +982 -0
  735. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2368 -0
  736. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +37 -0
  737. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  738. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  739. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  740. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py +106 -0
  741. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  742. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  743. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
  744. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  745. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  746. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +203 -0
  747. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  748. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
  749. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  750. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  751. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  752. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  753. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  754. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  755. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  756. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  757. vllm/model_executor/layers/quantization/cpu_wna16.py +299 -0
  758. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  759. vllm/model_executor/layers/quantization/experts_int8.py +209 -0
  760. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  761. vllm/model_executor/layers/quantization/fp8.py +1224 -0
  762. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  763. vllm/model_executor/layers/quantization/gguf.py +682 -0
  764. vllm/model_executor/layers/quantization/gptq.py +393 -0
  765. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  766. vllm/model_executor/layers/quantization/gptq_marlin.py +934 -0
  767. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  768. vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
  769. vllm/model_executor/layers/quantization/inc.py +65 -0
  770. vllm/model_executor/layers/quantization/input_quant_fp8.py +212 -0
  771. vllm/model_executor/layers/quantization/ipex_quant.py +403 -0
  772. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  773. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  774. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +113 -0
  775. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  776. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  777. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  778. vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py +126 -0
  779. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
  780. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  781. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +168 -0
  782. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  783. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
  784. vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
  785. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
  786. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +77 -0
  787. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
  788. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
  789. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
  790. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +88 -0
  791. vllm/model_executor/layers/quantization/kv_cache.py +153 -0
  792. vllm/model_executor/layers/quantization/modelopt.py +1665 -0
  793. vllm/model_executor/layers/quantization/moe_wna16.py +518 -0
  794. vllm/model_executor/layers/quantization/mxfp4.py +1145 -0
  795. vllm/model_executor/layers/quantization/petit.py +319 -0
  796. vllm/model_executor/layers/quantization/ptpc_fp8.py +140 -0
  797. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  798. vllm/model_executor/layers/quantization/quark/quark.py +570 -0
  799. vllm/model_executor/layers/quantization/quark/quark_moe.py +797 -0
  800. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  801. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
  802. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  803. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  804. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  805. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  806. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  807. vllm/model_executor/layers/quantization/rtn.py +626 -0
  808. vllm/model_executor/layers/quantization/schema.py +90 -0
  809. vllm/model_executor/layers/quantization/torchao.py +380 -0
  810. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  811. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  812. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  976. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  977. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  978. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  979. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  980. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  981. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  982. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  983. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  984. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  985. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  986. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  987. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  988. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  989. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  990. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  991. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  992. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  993. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  994. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  995. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  996. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  997. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  998. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  999. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1000. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1001. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1002. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1003. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1004. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1005. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1006. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1007. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1008. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1009. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1010. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1011. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1012. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1013. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1014. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1015. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1016. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1017. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1018. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1019. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1020. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1021. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1022. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1023. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1024. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1025. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1026. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1027. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  1028. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +514 -0
  1029. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +370 -0
  1030. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1658 -0
  1031. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  1032. vllm/model_executor/layers/quantization/utils/int8_utils.py +477 -0
  1033. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  1034. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  1035. vllm/model_executor/layers/quantization/utils/marlin_utils.py +720 -0
  1036. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +565 -0
  1037. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
  1038. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
  1039. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  1040. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
  1041. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  1042. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  1043. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  1044. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
  1045. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  1046. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  1047. vllm/model_executor/layers/quantization/utils/quant_utils.py +767 -0
  1048. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
  1049. vllm/model_executor/layers/resampler.py +283 -0
  1050. vllm/model_executor/layers/rotary_embedding/__init__.py +291 -0
  1051. vllm/model_executor/layers/rotary_embedding/base.py +282 -0
  1052. vllm/model_executor/layers/rotary_embedding/common.py +289 -0
  1053. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +184 -0
  1054. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +218 -0
  1055. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1056. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1057. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
  1058. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1059. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1060. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +83 -0
  1061. vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
  1062. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1063. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1064. vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
  1065. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
  1066. vllm/model_executor/layers/utils.py +251 -0
  1067. vllm/model_executor/layers/vocab_parallel_embedding.py +564 -0
  1068. vllm/model_executor/model_loader/__init__.py +150 -0
  1069. vllm/model_executor/model_loader/base_loader.py +71 -0
  1070. vllm/model_executor/model_loader/bitsandbytes_loader.py +821 -0
  1071. vllm/model_executor/model_loader/default_loader.py +304 -0
  1072. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1073. vllm/model_executor/model_loader/gguf_loader.py +371 -0
  1074. vllm/model_executor/model_loader/online_quantization.py +275 -0
  1075. vllm/model_executor/model_loader/runai_streamer_loader.py +115 -0
  1076. vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
  1077. vllm/model_executor/model_loader/tensorizer.py +793 -0
  1078. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1079. vllm/model_executor/model_loader/utils.py +299 -0
  1080. vllm/model_executor/model_loader/weight_utils.py +1183 -0
  1081. vllm/model_executor/models/__init__.py +44 -0
  1082. vllm/model_executor/models/adapters.py +592 -0
  1083. vllm/model_executor/models/afmoe.py +697 -0
  1084. vllm/model_executor/models/aimv2.py +248 -0
  1085. vllm/model_executor/models/apertus.py +567 -0
  1086. vllm/model_executor/models/arcee.py +428 -0
  1087. vllm/model_executor/models/arctic.py +633 -0
  1088. vllm/model_executor/models/aria.py +663 -0
  1089. vllm/model_executor/models/audioflamingo3.py +639 -0
  1090. vllm/model_executor/models/aya_vision.py +448 -0
  1091. vllm/model_executor/models/bagel.py +591 -0
  1092. vllm/model_executor/models/baichuan.py +493 -0
  1093. vllm/model_executor/models/bailing_moe.py +643 -0
  1094. vllm/model_executor/models/bamba.py +511 -0
  1095. vllm/model_executor/models/bee.py +157 -0
  1096. vllm/model_executor/models/bert.py +911 -0
  1097. vllm/model_executor/models/bert_with_rope.py +729 -0
  1098. vllm/model_executor/models/blip.py +350 -0
  1099. vllm/model_executor/models/blip2.py +736 -0
  1100. vllm/model_executor/models/bloom.py +390 -0
  1101. vllm/model_executor/models/chameleon.py +1095 -0
  1102. vllm/model_executor/models/chatglm.py +502 -0
  1103. vllm/model_executor/models/clip.py +1045 -0
  1104. vllm/model_executor/models/cohere2_vision.py +470 -0
  1105. vllm/model_executor/models/commandr.py +469 -0
  1106. vllm/model_executor/models/config.py +571 -0
  1107. vllm/model_executor/models/dbrx.py +484 -0
  1108. vllm/model_executor/models/deepencoder.py +679 -0
  1109. vllm/model_executor/models/deepseek_eagle.py +253 -0
  1110. vllm/model_executor/models/deepseek_mtp.py +447 -0
  1111. vllm/model_executor/models/deepseek_ocr.py +601 -0
  1112. vllm/model_executor/models/deepseek_v2.py +1727 -0
  1113. vllm/model_executor/models/deepseek_vl2.py +642 -0
  1114. vllm/model_executor/models/dots1.py +566 -0
  1115. vllm/model_executor/models/dots_ocr.py +830 -0
  1116. vllm/model_executor/models/ernie45.py +53 -0
  1117. vllm/model_executor/models/ernie45_moe.py +755 -0
  1118. vllm/model_executor/models/ernie45_vl.py +1702 -0
  1119. vllm/model_executor/models/ernie45_vl_moe.py +801 -0
  1120. vllm/model_executor/models/ernie_mtp.py +278 -0
  1121. vllm/model_executor/models/exaone.py +524 -0
  1122. vllm/model_executor/models/exaone4.py +518 -0
  1123. vllm/model_executor/models/exaone_moe.py +579 -0
  1124. vllm/model_executor/models/exaone_moe_mtp.py +255 -0
  1125. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1126. vllm/model_executor/models/falcon.py +543 -0
  1127. vllm/model_executor/models/falcon_h1.py +675 -0
  1128. vllm/model_executor/models/flex_olmo.py +155 -0
  1129. vllm/model_executor/models/fuyu.py +371 -0
  1130. vllm/model_executor/models/gemma.py +425 -0
  1131. vllm/model_executor/models/gemma2.py +435 -0
  1132. vllm/model_executor/models/gemma3.py +520 -0
  1133. vllm/model_executor/models/gemma3_mm.py +664 -0
  1134. vllm/model_executor/models/gemma3n.py +1166 -0
  1135. vllm/model_executor/models/gemma3n_audio_utils.py +57 -0
  1136. vllm/model_executor/models/gemma3n_mm.py +820 -0
  1137. vllm/model_executor/models/glm.py +24 -0
  1138. vllm/model_executor/models/glm4.py +295 -0
  1139. vllm/model_executor/models/glm4_1v.py +1823 -0
  1140. vllm/model_executor/models/glm4_moe.py +725 -0
  1141. vllm/model_executor/models/glm4_moe_mtp.py +365 -0
  1142. vllm/model_executor/models/glm4v.py +783 -0
  1143. vllm/model_executor/models/glmasr.py +1154 -0
  1144. vllm/model_executor/models/glmasr_utils.py +188 -0
  1145. vllm/model_executor/models/gpt2.py +385 -0
  1146. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1147. vllm/model_executor/models/gpt_j.py +346 -0
  1148. vllm/model_executor/models/gpt_neox.py +340 -0
  1149. vllm/model_executor/models/gpt_oss.py +745 -0
  1150. vllm/model_executor/models/granite.py +475 -0
  1151. vllm/model_executor/models/granite_speech.py +919 -0
  1152. vllm/model_executor/models/granitemoe.py +561 -0
  1153. vllm/model_executor/models/granitemoehybrid.py +703 -0
  1154. vllm/model_executor/models/granitemoeshared.py +328 -0
  1155. vllm/model_executor/models/gritlm.py +242 -0
  1156. vllm/model_executor/models/grok1.py +803 -0
  1157. vllm/model_executor/models/h2ovl.py +554 -0
  1158. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1159. vllm/model_executor/models/hunyuan_vision.py +1034 -0
  1160. vllm/model_executor/models/hyperclovax_vision.py +1163 -0
  1161. vllm/model_executor/models/idefics2_vision_model.py +427 -0
  1162. vllm/model_executor/models/idefics3.py +734 -0
  1163. vllm/model_executor/models/interfaces.py +1180 -0
  1164. vllm/model_executor/models/interfaces_base.py +252 -0
  1165. vllm/model_executor/models/intern_vit.py +454 -0
  1166. vllm/model_executor/models/internlm2.py +451 -0
  1167. vllm/model_executor/models/internlm2_ve.py +139 -0
  1168. vllm/model_executor/models/interns1.py +828 -0
  1169. vllm/model_executor/models/interns1_vit.py +433 -0
  1170. vllm/model_executor/models/internvl.py +1436 -0
  1171. vllm/model_executor/models/iquest_loopcoder.py +595 -0
  1172. vllm/model_executor/models/isaac.py +1503 -0
  1173. vllm/model_executor/models/jais.py +397 -0
  1174. vllm/model_executor/models/jais2.py +508 -0
  1175. vllm/model_executor/models/jamba.py +599 -0
  1176. vllm/model_executor/models/jina_vl.py +145 -0
  1177. vllm/model_executor/models/kanana_v.py +756 -0
  1178. vllm/model_executor/models/keye.py +1709 -0
  1179. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1180. vllm/model_executor/models/kimi_linear.py +659 -0
  1181. vllm/model_executor/models/kimi_vl.py +577 -0
  1182. vllm/model_executor/models/lfm2.py +515 -0
  1183. vllm/model_executor/models/lfm2_moe.py +746 -0
  1184. vllm/model_executor/models/lfm2_vl.py +732 -0
  1185. vllm/model_executor/models/lightonocr.py +197 -0
  1186. vllm/model_executor/models/llama.py +724 -0
  1187. vllm/model_executor/models/llama4.py +860 -0
  1188. vllm/model_executor/models/llama4_eagle.py +225 -0
  1189. vllm/model_executor/models/llama_eagle.py +213 -0
  1190. vllm/model_executor/models/llama_eagle3.py +375 -0
  1191. vllm/model_executor/models/llava.py +879 -0
  1192. vllm/model_executor/models/llava_next.py +583 -0
  1193. vllm/model_executor/models/llava_next_video.py +467 -0
  1194. vllm/model_executor/models/llava_onevision.py +922 -0
  1195. vllm/model_executor/models/longcat_flash.py +767 -0
  1196. vllm/model_executor/models/longcat_flash_mtp.py +348 -0
  1197. vllm/model_executor/models/mamba.py +276 -0
  1198. vllm/model_executor/models/mamba2.py +288 -0
  1199. vllm/model_executor/models/medusa.py +179 -0
  1200. vllm/model_executor/models/midashenglm.py +826 -0
  1201. vllm/model_executor/models/mimo.py +188 -0
  1202. vllm/model_executor/models/mimo_mtp.py +294 -0
  1203. vllm/model_executor/models/mimo_v2_flash.py +718 -0
  1204. vllm/model_executor/models/minicpm.py +660 -0
  1205. vllm/model_executor/models/minicpm3.py +233 -0
  1206. vllm/model_executor/models/minicpm_eagle.py +386 -0
  1207. vllm/model_executor/models/minicpmo.py +768 -0
  1208. vllm/model_executor/models/minicpmv.py +1742 -0
  1209. vllm/model_executor/models/minimax_m2.py +552 -0
  1210. vllm/model_executor/models/minimax_text_01.py +1008 -0
  1211. vllm/model_executor/models/minimax_vl_01.py +395 -0
  1212. vllm/model_executor/models/mistral3.py +638 -0
  1213. vllm/model_executor/models/mistral_large_3.py +63 -0
  1214. vllm/model_executor/models/mistral_large_3_eagle.py +137 -0
  1215. vllm/model_executor/models/mixtral.py +599 -0
  1216. vllm/model_executor/models/mllama4.py +1170 -0
  1217. vllm/model_executor/models/mlp_speculator.py +235 -0
  1218. vllm/model_executor/models/modernbert.py +458 -0
  1219. vllm/model_executor/models/module_mapping.py +74 -0
  1220. vllm/model_executor/models/molmo.py +1592 -0
  1221. vllm/model_executor/models/moonvit.py +601 -0
  1222. vllm/model_executor/models/mpt.py +335 -0
  1223. vllm/model_executor/models/nano_nemotron_vl.py +1725 -0
  1224. vllm/model_executor/models/nemotron.py +499 -0
  1225. vllm/model_executor/models/nemotron_h.py +902 -0
  1226. vllm/model_executor/models/nemotron_nas.py +474 -0
  1227. vllm/model_executor/models/nemotron_parse.py +958 -0
  1228. vllm/model_executor/models/nemotron_vl.py +651 -0
  1229. vllm/model_executor/models/nvlm_d.py +216 -0
  1230. vllm/model_executor/models/olmo.py +412 -0
  1231. vllm/model_executor/models/olmo2.py +454 -0
  1232. vllm/model_executor/models/olmoe.py +498 -0
  1233. vllm/model_executor/models/opencua.py +262 -0
  1234. vllm/model_executor/models/openpangu.py +1378 -0
  1235. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1236. vllm/model_executor/models/opt.py +426 -0
  1237. vllm/model_executor/models/orion.py +365 -0
  1238. vllm/model_executor/models/ouro.py +507 -0
  1239. vllm/model_executor/models/ovis.py +557 -0
  1240. vllm/model_executor/models/ovis2_5.py +661 -0
  1241. vllm/model_executor/models/paddleocr_vl.py +1261 -0
  1242. vllm/model_executor/models/paligemma.py +429 -0
  1243. vllm/model_executor/models/persimmon.py +373 -0
  1244. vllm/model_executor/models/phi.py +363 -0
  1245. vllm/model_executor/models/phi3.py +18 -0
  1246. vllm/model_executor/models/phi3v.py +729 -0
  1247. vllm/model_executor/models/phi4mm.py +1250 -0
  1248. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1249. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1250. vllm/model_executor/models/phimoe.py +671 -0
  1251. vllm/model_executor/models/pixtral.py +1437 -0
  1252. vllm/model_executor/models/plamo2.py +993 -0
  1253. vllm/model_executor/models/plamo3.py +437 -0
  1254. vllm/model_executor/models/qwen.py +377 -0
  1255. vllm/model_executor/models/qwen2.py +600 -0
  1256. vllm/model_executor/models/qwen2_5_omni_thinker.py +1200 -0
  1257. vllm/model_executor/models/qwen2_5_vl.py +1598 -0
  1258. vllm/model_executor/models/qwen2_audio.py +478 -0
  1259. vllm/model_executor/models/qwen2_moe.py +604 -0
  1260. vllm/model_executor/models/qwen2_rm.py +120 -0
  1261. vllm/model_executor/models/qwen2_vl.py +1588 -0
  1262. vllm/model_executor/models/qwen3.py +331 -0
  1263. vllm/model_executor/models/qwen3_moe.py +752 -0
  1264. vllm/model_executor/models/qwen3_next.py +1410 -0
  1265. vllm/model_executor/models/qwen3_next_mtp.py +293 -0
  1266. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1814 -0
  1267. vllm/model_executor/models/qwen3_vl.py +2120 -0
  1268. vllm/model_executor/models/qwen3_vl_moe.py +474 -0
  1269. vllm/model_executor/models/qwen_vl.py +821 -0
  1270. vllm/model_executor/models/radio.py +573 -0
  1271. vllm/model_executor/models/registry.py +1218 -0
  1272. vllm/model_executor/models/roberta.py +239 -0
  1273. vllm/model_executor/models/rvl.py +107 -0
  1274. vllm/model_executor/models/seed_oss.py +492 -0
  1275. vllm/model_executor/models/siglip.py +1259 -0
  1276. vllm/model_executor/models/siglip2.py +495 -0
  1277. vllm/model_executor/models/siglip2navit.py +660 -0
  1278. vllm/model_executor/models/skyworkr1v.py +951 -0
  1279. vllm/model_executor/models/smolvlm.py +38 -0
  1280. vllm/model_executor/models/solar.py +484 -0
  1281. vllm/model_executor/models/stablelm.py +354 -0
  1282. vllm/model_executor/models/starcoder2.py +365 -0
  1283. vllm/model_executor/models/step3_text.py +554 -0
  1284. vllm/model_executor/models/step3_vl.py +1147 -0
  1285. vllm/model_executor/models/swin.py +500 -0
  1286. vllm/model_executor/models/tarsier.py +624 -0
  1287. vllm/model_executor/models/telechat2.py +153 -0
  1288. vllm/model_executor/models/teleflm.py +78 -0
  1289. vllm/model_executor/models/terratorch.py +318 -0
  1290. vllm/model_executor/models/transformers/__init__.py +127 -0
  1291. vllm/model_executor/models/transformers/base.py +523 -0
  1292. vllm/model_executor/models/transformers/causal.py +65 -0
  1293. vllm/model_executor/models/transformers/legacy.py +90 -0
  1294. vllm/model_executor/models/transformers/moe.py +329 -0
  1295. vllm/model_executor/models/transformers/multimodal.py +441 -0
  1296. vllm/model_executor/models/transformers/pooling.py +102 -0
  1297. vllm/model_executor/models/transformers/utils.py +253 -0
  1298. vllm/model_executor/models/ultravox.py +786 -0
  1299. vllm/model_executor/models/utils.py +832 -0
  1300. vllm/model_executor/models/vision.py +546 -0
  1301. vllm/model_executor/models/voxtral.py +867 -0
  1302. vllm/model_executor/models/voxtral_streaming.py +304 -0
  1303. vllm/model_executor/models/whisper.py +993 -0
  1304. vllm/model_executor/models/whisper_utils.py +299 -0
  1305. vllm/model_executor/models/zamba2.py +986 -0
  1306. vllm/model_executor/parameter.py +642 -0
  1307. vllm/model_executor/utils.py +113 -0
  1308. vllm/model_executor/warmup/__init__.py +0 -0
  1309. vllm/model_executor/warmup/deep_gemm_warmup.py +371 -0
  1310. vllm/model_executor/warmup/kernel_warmup.py +97 -0
  1311. vllm/model_inspection.py +136 -0
  1312. vllm/multimodal/__init__.py +38 -0
  1313. vllm/multimodal/audio.py +287 -0
  1314. vllm/multimodal/base.py +60 -0
  1315. vllm/multimodal/cache.py +829 -0
  1316. vllm/multimodal/evs.py +294 -0
  1317. vllm/multimodal/hasher.py +123 -0
  1318. vllm/multimodal/image.py +155 -0
  1319. vllm/multimodal/inputs.py +1027 -0
  1320. vllm/multimodal/parse.py +674 -0
  1321. vllm/multimodal/processing.py +2469 -0
  1322. vllm/multimodal/profiling.py +351 -0
  1323. vllm/multimodal/registry.py +375 -0
  1324. vllm/multimodal/utils.py +550 -0
  1325. vllm/multimodal/video.py +512 -0
  1326. vllm/outputs.py +347 -0
  1327. vllm/platforms/__init__.py +277 -0
  1328. vllm/platforms/cpu.py +423 -0
  1329. vllm/platforms/cuda.py +618 -0
  1330. vllm/platforms/interface.py +707 -0
  1331. vllm/platforms/rocm.py +586 -0
  1332. vllm/platforms/tpu.py +20 -0
  1333. vllm/platforms/xpu.py +262 -0
  1334. vllm/plugins/__init__.py +81 -0
  1335. vllm/plugins/io_processors/__init__.py +68 -0
  1336. vllm/plugins/io_processors/interface.py +77 -0
  1337. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1338. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1339. vllm/pooling_params.py +229 -0
  1340. vllm/profiler/__init__.py +0 -0
  1341. vllm/profiler/layerwise_profile.py +392 -0
  1342. vllm/profiler/utils.py +151 -0
  1343. vllm/profiler/wrapper.py +241 -0
  1344. vllm/py.typed +2 -0
  1345. vllm/ray/__init__.py +0 -0
  1346. vllm/ray/lazy_utils.py +30 -0
  1347. vllm/ray/ray_env.py +79 -0
  1348. vllm/reasoning/__init__.py +96 -0
  1349. vllm/reasoning/abs_reasoning_parsers.py +318 -0
  1350. vllm/reasoning/basic_parsers.py +175 -0
  1351. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1352. vllm/reasoning/deepseek_v3_reasoning_parser.py +69 -0
  1353. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1354. vllm/reasoning/glm4_moe_reasoning_parser.py +13 -0
  1355. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1356. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1357. vllm/reasoning/holo2_reasoning_parser.py +89 -0
  1358. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1359. vllm/reasoning/identity_reasoning_parser.py +63 -0
  1360. vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
  1361. vllm/reasoning/mistral_reasoning_parser.py +154 -0
  1362. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1363. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1364. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1365. vllm/reasoning/step3_reasoning_parser.py +113 -0
  1366. vllm/sampling_params.py +629 -0
  1367. vllm/scalar_type.py +355 -0
  1368. vllm/scripts.py +17 -0
  1369. vllm/sequence.py +64 -0
  1370. vllm/tasks.py +13 -0
  1371. vllm/third_party/__init__.py +0 -0
  1372. vllm/third_party/pynvml.py +6140 -0
  1373. vllm/tokenizers/__init__.py +18 -0
  1374. vllm/tokenizers/deepseek_v32.py +187 -0
  1375. vllm/tokenizers/deepseek_v32_encoding.py +463 -0
  1376. vllm/tokenizers/detokenizer_utils.py +198 -0
  1377. vllm/tokenizers/grok2.py +443 -0
  1378. vllm/tokenizers/hf.py +119 -0
  1379. vllm/tokenizers/mistral.py +543 -0
  1380. vllm/tokenizers/protocol.py +123 -0
  1381. vllm/tokenizers/registry.py +238 -0
  1382. vllm/tool_parsers/__init__.py +158 -0
  1383. vllm/tool_parsers/abstract_tool_parser.py +274 -0
  1384. vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
  1385. vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
  1386. vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
  1387. vllm/tool_parsers/ernie45_tool_parser.py +210 -0
  1388. vllm/tool_parsers/functiongemma_tool_parser.py +321 -0
  1389. vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
  1390. vllm/tool_parsers/glm47_moe_tool_parser.py +23 -0
  1391. vllm/tool_parsers/glm4_moe_tool_parser.py +215 -0
  1392. vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  1393. vllm/tool_parsers/granite_tool_parser.py +253 -0
  1394. vllm/tool_parsers/hermes_tool_parser.py +495 -0
  1395. vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  1396. vllm/tool_parsers/internlm2_tool_parser.py +227 -0
  1397. vllm/tool_parsers/jamba_tool_parser.py +323 -0
  1398. vllm/tool_parsers/kimi_k2_tool_parser.py +598 -0
  1399. vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  1400. vllm/tool_parsers/llama_tool_parser.py +324 -0
  1401. vllm/tool_parsers/longcat_tool_parser.py +37 -0
  1402. vllm/tool_parsers/minimax_m2_tool_parser.py +776 -0
  1403. vllm/tool_parsers/minimax_tool_parser.py +849 -0
  1404. vllm/tool_parsers/mistral_tool_parser.py +612 -0
  1405. vllm/tool_parsers/olmo3_tool_parser.py +366 -0
  1406. vllm/tool_parsers/openai_tool_parser.py +111 -0
  1407. vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
  1408. vllm/tool_parsers/pythonic_tool_parser.py +332 -0
  1409. vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
  1410. vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  1411. vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
  1412. vllm/tool_parsers/step3_tool_parser.py +303 -0
  1413. vllm/tool_parsers/utils.py +229 -0
  1414. vllm/tool_parsers/xlam_tool_parser.py +556 -0
  1415. vllm/tracing.py +135 -0
  1416. vllm/transformers_utils/__init__.py +26 -0
  1417. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1418. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1419. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1420. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1421. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1422. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1423. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1424. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1425. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1426. vllm/transformers_utils/config.py +1169 -0
  1427. vllm/transformers_utils/config_parser_base.py +20 -0
  1428. vllm/transformers_utils/configs/__init__.py +106 -0
  1429. vllm/transformers_utils/configs/afmoe.py +87 -0
  1430. vllm/transformers_utils/configs/arctic.py +216 -0
  1431. vllm/transformers_utils/configs/bagel.py +53 -0
  1432. vllm/transformers_utils/configs/chatglm.py +75 -0
  1433. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1434. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1435. vllm/transformers_utils/configs/eagle.py +90 -0
  1436. vllm/transformers_utils/configs/falcon.py +89 -0
  1437. vllm/transformers_utils/configs/flex_olmo.py +82 -0
  1438. vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
  1439. vllm/transformers_utils/configs/isaac.py +100 -0
  1440. vllm/transformers_utils/configs/jais.py +243 -0
  1441. vllm/transformers_utils/configs/kimi_linear.py +148 -0
  1442. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1443. vllm/transformers_utils/configs/lfm2_moe.py +163 -0
  1444. vllm/transformers_utils/configs/medusa.py +65 -0
  1445. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1446. vllm/transformers_utils/configs/mistral.py +263 -0
  1447. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1448. vllm/transformers_utils/configs/moonvit.py +33 -0
  1449. vllm/transformers_utils/configs/nemotron.py +220 -0
  1450. vllm/transformers_utils/configs/nemotron_h.py +284 -0
  1451. vllm/transformers_utils/configs/olmo3.py +83 -0
  1452. vllm/transformers_utils/configs/ovis.py +182 -0
  1453. vllm/transformers_utils/configs/qwen3_next.py +277 -0
  1454. vllm/transformers_utils/configs/radio.py +98 -0
  1455. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1456. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1457. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1458. vllm/transformers_utils/configs/step3_vl.py +178 -0
  1459. vllm/transformers_utils/configs/tarsier2.py +24 -0
  1460. vllm/transformers_utils/configs/ultravox.py +120 -0
  1461. vllm/transformers_utils/dynamic_module.py +70 -0
  1462. vllm/transformers_utils/gguf_utils.py +280 -0
  1463. vllm/transformers_utils/model_arch_config_convertor.py +402 -0
  1464. vllm/transformers_utils/processor.py +424 -0
  1465. vllm/transformers_utils/processors/__init__.py +25 -0
  1466. vllm/transformers_utils/processors/bagel.py +78 -0
  1467. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1468. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1469. vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
  1470. vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
  1471. vllm/transformers_utils/processors/ovis.py +453 -0
  1472. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1473. vllm/transformers_utils/repo_utils.py +287 -0
  1474. vllm/transformers_utils/runai_utils.py +102 -0
  1475. vllm/transformers_utils/s3_utils.py +95 -0
  1476. vllm/transformers_utils/tokenizer.py +19 -0
  1477. vllm/transformers_utils/utils.py +112 -0
  1478. vllm/triton_utils/__init__.py +20 -0
  1479. vllm/triton_utils/importing.py +103 -0
  1480. vllm/usage/__init__.py +0 -0
  1481. vllm/usage/usage_lib.py +278 -0
  1482. vllm/utils/__init__.py +36 -0
  1483. vllm/utils/argparse_utils.py +491 -0
  1484. vllm/utils/async_utils.py +310 -0
  1485. vllm/utils/cache.py +214 -0
  1486. vllm/utils/collection_utils.py +112 -0
  1487. vllm/utils/counter.py +45 -0
  1488. vllm/utils/deep_gemm.py +424 -0
  1489. vllm/utils/flashinfer.py +602 -0
  1490. vllm/utils/func_utils.py +236 -0
  1491. vllm/utils/gc_utils.py +151 -0
  1492. vllm/utils/hashing.py +117 -0
  1493. vllm/utils/import_utils.py +438 -0
  1494. vllm/utils/jsontree.py +158 -0
  1495. vllm/utils/math_utils.py +32 -0
  1496. vllm/utils/mem_constants.py +13 -0
  1497. vllm/utils/mem_utils.py +285 -0
  1498. vllm/utils/nccl.py +64 -0
  1499. vllm/utils/network_utils.py +331 -0
  1500. vllm/utils/nvtx_pytorch_hooks.py +286 -0
  1501. vllm/utils/platform_utils.py +59 -0
  1502. vllm/utils/profiling.py +56 -0
  1503. vllm/utils/registry.py +51 -0
  1504. vllm/utils/serial_utils.py +214 -0
  1505. vllm/utils/system_utils.py +296 -0
  1506. vllm/utils/tensor_schema.py +255 -0
  1507. vllm/utils/torch_utils.py +781 -0
  1508. vllm/v1/__init__.py +0 -0
  1509. vllm/v1/attention/__init__.py +0 -0
  1510. vllm/v1/attention/backend.py +736 -0
  1511. vllm/v1/attention/backends/__init__.py +0 -0
  1512. vllm/v1/attention/backends/cpu_attn.py +501 -0
  1513. vllm/v1/attention/backends/fa_utils.py +126 -0
  1514. vllm/v1/attention/backends/flash_attn.py +1092 -0
  1515. vllm/v1/attention/backends/flash_attn_diffkv.py +277 -0
  1516. vllm/v1/attention/backends/flashinfer.py +1713 -0
  1517. vllm/v1/attention/backends/flex_attention.py +1024 -0
  1518. vllm/v1/attention/backends/gdn_attn.py +382 -0
  1519. vllm/v1/attention/backends/linear_attn.py +77 -0
  1520. vllm/v1/attention/backends/mamba1_attn.py +28 -0
  1521. vllm/v1/attention/backends/mamba2_attn.py +256 -0
  1522. vllm/v1/attention/backends/mamba_attn.py +313 -0
  1523. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1524. vllm/v1/attention/backends/mla/aiter_triton_mla.py +66 -0
  1525. vllm/v1/attention/backends/mla/common.py +2156 -0
  1526. vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
  1527. vllm/v1/attention/backends/mla/flashattn_mla.py +348 -0
  1528. vllm/v1/attention/backends/mla/flashinfer_mla.py +175 -0
  1529. vllm/v1/attention/backends/mla/flashmla.py +321 -0
  1530. vllm/v1/attention/backends/mla/flashmla_sparse.py +1021 -0
  1531. vllm/v1/attention/backends/mla/indexer.py +345 -0
  1532. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +284 -0
  1533. vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +321 -0
  1534. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1535. vllm/v1/attention/backends/registry.py +258 -0
  1536. vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
  1537. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
  1538. vllm/v1/attention/backends/rocm_attn.py +405 -0
  1539. vllm/v1/attention/backends/short_conv_attn.py +26 -0
  1540. vllm/v1/attention/backends/tree_attn.py +430 -0
  1541. vllm/v1/attention/backends/triton_attn.py +578 -0
  1542. vllm/v1/attention/backends/utils.py +978 -0
  1543. vllm/v1/attention/ops/__init__.py +0 -0
  1544. vllm/v1/attention/ops/chunked_prefill_paged_decode.py +459 -0
  1545. vllm/v1/attention/ops/common.py +469 -0
  1546. vllm/v1/attention/ops/flashmla.py +254 -0
  1547. vllm/v1/attention/ops/merge_attn_states.py +47 -0
  1548. vllm/v1/attention/ops/paged_attn.py +51 -0
  1549. vllm/v1/attention/ops/pallas_kv_cache_update.py +130 -0
  1550. vllm/v1/attention/ops/prefix_prefill.py +862 -0
  1551. vllm/v1/attention/ops/rocm_aiter_mla_sparse.py +210 -0
  1552. vllm/v1/attention/ops/triton_decode_attention.py +709 -0
  1553. vllm/v1/attention/ops/triton_merge_attn_states.py +116 -0
  1554. vllm/v1/attention/ops/triton_prefill_attention.py +272 -0
  1555. vllm/v1/attention/ops/triton_reshape_and_cache_flash.py +395 -0
  1556. vllm/v1/attention/ops/triton_unified_attention.py +1088 -0
  1557. vllm/v1/attention/ops/vit_attn_wrappers.py +185 -0
  1558. vllm/v1/attention/selector.py +145 -0
  1559. vllm/v1/core/__init__.py +0 -0
  1560. vllm/v1/core/block_pool.py +489 -0
  1561. vllm/v1/core/encoder_cache_manager.py +402 -0
  1562. vllm/v1/core/kv_cache_coordinator.py +560 -0
  1563. vllm/v1/core/kv_cache_manager.py +485 -0
  1564. vllm/v1/core/kv_cache_metrics.py +96 -0
  1565. vllm/v1/core/kv_cache_utils.py +1642 -0
  1566. vllm/v1/core/sched/__init__.py +0 -0
  1567. vllm/v1/core/sched/async_scheduler.py +66 -0
  1568. vllm/v1/core/sched/interface.py +205 -0
  1569. vllm/v1/core/sched/output.py +261 -0
  1570. vllm/v1/core/sched/request_queue.py +208 -0
  1571. vllm/v1/core/sched/scheduler.py +1936 -0
  1572. vllm/v1/core/sched/utils.py +64 -0
  1573. vllm/v1/core/single_type_kv_cache_manager.py +926 -0
  1574. vllm/v1/cudagraph_dispatcher.py +183 -0
  1575. vllm/v1/engine/__init__.py +224 -0
  1576. vllm/v1/engine/async_llm.py +874 -0
  1577. vllm/v1/engine/coordinator.py +396 -0
  1578. vllm/v1/engine/core.py +1614 -0
  1579. vllm/v1/engine/core_client.py +1422 -0
  1580. vllm/v1/engine/detokenizer.py +351 -0
  1581. vllm/v1/engine/exceptions.py +18 -0
  1582. vllm/v1/engine/input_processor.py +713 -0
  1583. vllm/v1/engine/llm_engine.py +415 -0
  1584. vllm/v1/engine/logprobs.py +245 -0
  1585. vllm/v1/engine/output_processor.py +715 -0
  1586. vllm/v1/engine/parallel_sampling.py +150 -0
  1587. vllm/v1/engine/utils.py +1086 -0
  1588. vllm/v1/executor/__init__.py +6 -0
  1589. vllm/v1/executor/abstract.py +352 -0
  1590. vllm/v1/executor/multiproc_executor.py +888 -0
  1591. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1592. vllm/v1/executor/ray_executor.py +623 -0
  1593. vllm/v1/executor/ray_utils.py +468 -0
  1594. vllm/v1/executor/uniproc_executor.py +186 -0
  1595. vllm/v1/kv_cache_interface.py +485 -0
  1596. vllm/v1/kv_offload/__init__.py +0 -0
  1597. vllm/v1/kv_offload/abstract.py +161 -0
  1598. vllm/v1/kv_offload/arc_manager.py +237 -0
  1599. vllm/v1/kv_offload/backend.py +97 -0
  1600. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1601. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1602. vllm/v1/kv_offload/cpu.py +109 -0
  1603. vllm/v1/kv_offload/factory.py +58 -0
  1604. vllm/v1/kv_offload/lru_manager.py +139 -0
  1605. vllm/v1/kv_offload/mediums.py +39 -0
  1606. vllm/v1/kv_offload/spec.py +70 -0
  1607. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1608. vllm/v1/kv_offload/worker/cpu_gpu.py +287 -0
  1609. vllm/v1/kv_offload/worker/worker.py +163 -0
  1610. vllm/v1/metrics/__init__.py +0 -0
  1611. vllm/v1/metrics/loggers.py +1320 -0
  1612. vllm/v1/metrics/perf.py +1244 -0
  1613. vllm/v1/metrics/prometheus.py +82 -0
  1614. vllm/v1/metrics/ray_wrappers.py +194 -0
  1615. vllm/v1/metrics/reader.py +257 -0
  1616. vllm/v1/metrics/stats.py +440 -0
  1617. vllm/v1/outputs.py +242 -0
  1618. vllm/v1/pool/__init__.py +0 -0
  1619. vllm/v1/pool/metadata.py +124 -0
  1620. vllm/v1/request.py +281 -0
  1621. vllm/v1/sample/__init__.py +0 -0
  1622. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1623. vllm/v1/sample/logits_processor/builtin.py +278 -0
  1624. vllm/v1/sample/logits_processor/interface.py +106 -0
  1625. vllm/v1/sample/logits_processor/state.py +165 -0
  1626. vllm/v1/sample/metadata.py +44 -0
  1627. vllm/v1/sample/ops/__init__.py +0 -0
  1628. vllm/v1/sample/ops/bad_words.py +57 -0
  1629. vllm/v1/sample/ops/logprobs.py +25 -0
  1630. vllm/v1/sample/ops/penalties.py +57 -0
  1631. vllm/v1/sample/ops/topk_topp_sampler.py +388 -0
  1632. vllm/v1/sample/rejection_sampler.py +822 -0
  1633. vllm/v1/sample/sampler.py +319 -0
  1634. vllm/v1/sample/tpu/__init__.py +0 -0
  1635. vllm/v1/sample/tpu/metadata.py +120 -0
  1636. vllm/v1/sample/tpu/sampler.py +215 -0
  1637. vllm/v1/serial_utils.py +514 -0
  1638. vllm/v1/spec_decode/__init__.py +0 -0
  1639. vllm/v1/spec_decode/eagle.py +1346 -0
  1640. vllm/v1/spec_decode/medusa.py +73 -0
  1641. vllm/v1/spec_decode/metadata.py +66 -0
  1642. vllm/v1/spec_decode/metrics.py +225 -0
  1643. vllm/v1/spec_decode/ngram_proposer.py +281 -0
  1644. vllm/v1/spec_decode/suffix_decoding.py +95 -0
  1645. vllm/v1/spec_decode/utils.py +109 -0
  1646. vllm/v1/structured_output/__init__.py +337 -0
  1647. vllm/v1/structured_output/backend_guidance.py +291 -0
  1648. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1649. vllm/v1/structured_output/backend_outlines.py +324 -0
  1650. vllm/v1/structured_output/backend_types.py +136 -0
  1651. vllm/v1/structured_output/backend_xgrammar.py +378 -0
  1652. vllm/v1/structured_output/request.py +91 -0
  1653. vllm/v1/structured_output/utils.py +457 -0
  1654. vllm/v1/utils.py +466 -0
  1655. vllm/v1/worker/__init__.py +0 -0
  1656. vllm/v1/worker/block_table.py +343 -0
  1657. vllm/v1/worker/cp_utils.py +42 -0
  1658. vllm/v1/worker/cpu_model_runner.py +122 -0
  1659. vllm/v1/worker/cpu_worker.py +192 -0
  1660. vllm/v1/worker/dp_utils.py +240 -0
  1661. vllm/v1/worker/ec_connector_model_runner_mixin.py +85 -0
  1662. vllm/v1/worker/gpu/README.md +4 -0
  1663. vllm/v1/worker/gpu/__init__.py +0 -0
  1664. vllm/v1/worker/gpu/async_utils.py +98 -0
  1665. vllm/v1/worker/gpu/attn_utils.py +183 -0
  1666. vllm/v1/worker/gpu/block_table.py +222 -0
  1667. vllm/v1/worker/gpu/buffer_utils.py +224 -0
  1668. vllm/v1/worker/gpu/cudagraph_utils.py +264 -0
  1669. vllm/v1/worker/gpu/dp_utils.py +31 -0
  1670. vllm/v1/worker/gpu/input_batch.py +526 -0
  1671. vllm/v1/worker/gpu/metrics/__init__.py +0 -0
  1672. vllm/v1/worker/gpu/metrics/logits.py +42 -0
  1673. vllm/v1/worker/gpu/mm/__init__.py +0 -0
  1674. vllm/v1/worker/gpu/mm/mrope_utils.py +127 -0
  1675. vllm/v1/worker/gpu/model_runner.py +1005 -0
  1676. vllm/v1/worker/gpu/sample/__init__.py +0 -0
  1677. vllm/v1/worker/gpu/sample/gumbel.py +106 -0
  1678. vllm/v1/worker/gpu/sample/logit_bias.py +270 -0
  1679. vllm/v1/worker/gpu/sample/logprob.py +167 -0
  1680. vllm/v1/worker/gpu/sample/metadata.py +79 -0
  1681. vllm/v1/worker/gpu/sample/min_p.py +58 -0
  1682. vllm/v1/worker/gpu/sample/output.py +14 -0
  1683. vllm/v1/worker/gpu/sample/penalties.py +155 -0
  1684. vllm/v1/worker/gpu/sample/sampler.py +88 -0
  1685. vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
  1686. vllm/v1/worker/gpu/spec_decode/eagle.py +566 -0
  1687. vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
  1688. vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
  1689. vllm/v1/worker/gpu/states.py +282 -0
  1690. vllm/v1/worker/gpu/structured_outputs.py +100 -0
  1691. vllm/v1/worker/gpu_input_batch.py +1030 -0
  1692. vllm/v1/worker/gpu_model_runner.py +5761 -0
  1693. vllm/v1/worker/gpu_ubatch_wrapper.py +475 -0
  1694. vllm/v1/worker/gpu_worker.py +968 -0
  1695. vllm/v1/worker/kv_connector_model_runner_mixin.py +300 -0
  1696. vllm/v1/worker/lora_model_runner_mixin.py +225 -0
  1697. vllm/v1/worker/tpu_input_batch.py +574 -0
  1698. vllm/v1/worker/tpu_worker.py +18 -0
  1699. vllm/v1/worker/ubatch_utils.py +112 -0
  1700. vllm/v1/worker/ubatching.py +242 -0
  1701. vllm/v1/worker/utils.py +400 -0
  1702. vllm/v1/worker/worker_base.py +372 -0
  1703. vllm/v1/worker/workspace.py +253 -0
  1704. vllm/v1/worker/xpu_model_runner.py +48 -0
  1705. vllm/v1/worker/xpu_worker.py +174 -0
  1706. vllm/version.py +39 -0
  1707. vllm/vllm_flash_attn/.gitkeep +0 -0
  1708. vllm_cpu_avx512bf16-0.14.0.dist-info/METADATA +348 -0
  1709. vllm_cpu_avx512bf16-0.14.0.dist-info/RECORD +1712 -0
  1710. vllm_cpu_avx512bf16-0.14.0.dist-info/WHEEL +5 -0
  1711. vllm_cpu_avx512bf16-0.14.0.dist-info/entry_points.txt +5 -0
  1712. vllm_cpu_avx512bf16-0.14.0.dist-info/top_level.txt +1 -0
vllm/envs.py ADDED
@@ -0,0 +1,1791 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import functools
5
+ import json
6
+ import logging
7
+ import os
8
+ import sys
9
+ import tempfile
10
+ from collections.abc import Callable
11
+ from typing import TYPE_CHECKING, Any, Literal
12
+
13
+ if TYPE_CHECKING:
14
+ VLLM_HOST_IP: str = ""
15
+ VLLM_PORT: int | None = None
16
+ VLLM_RPC_BASE_PATH: str = tempfile.gettempdir()
17
+ VLLM_USE_MODELSCOPE: bool = False
18
+ VLLM_RINGBUFFER_WARNING_INTERVAL: int = 60
19
+ VLLM_NCCL_SO_PATH: str | None = None
20
+ LD_LIBRARY_PATH: str | None = None
21
+ VLLM_ROCM_SLEEP_MEM_CHUNK_SIZE: int = 256
22
+ VLLM_V1_USE_PREFILL_DECODE_ATTENTION: bool = False
23
+ VLLM_FLASH_ATTN_VERSION: int | None = None
24
+ LOCAL_RANK: int = 0
25
+ CUDA_VISIBLE_DEVICES: str | None = None
26
+ VLLM_ENGINE_ITERATION_TIMEOUT_S: int = 60
27
+ VLLM_ENGINE_READY_TIMEOUT_S: int = 600
28
+ VLLM_API_KEY: str | None = None
29
+ VLLM_DEBUG_LOG_API_SERVER_RESPONSE: bool = False
30
+ S3_ACCESS_KEY_ID: str | None = None
31
+ S3_SECRET_ACCESS_KEY: str | None = None
32
+ S3_ENDPOINT_URL: str | None = None
33
+ VLLM_MODEL_REDIRECT_PATH: str | None = None
34
+ VLLM_CACHE_ROOT: str = os.path.expanduser("~/.cache/vllm")
35
+ VLLM_CONFIG_ROOT: str = os.path.expanduser("~/.config/vllm")
36
+ VLLM_USAGE_STATS_SERVER: str = "https://stats.vllm.ai"
37
+ VLLM_NO_USAGE_STATS: bool = False
38
+ VLLM_DISABLE_FLASHINFER_PREFILL: bool = False
39
+ VLLM_DO_NOT_TRACK: bool = False
40
+ VLLM_USAGE_SOURCE: str = ""
41
+ VLLM_CONFIGURE_LOGGING: bool = True
42
+ VLLM_LOGGING_LEVEL: str = "INFO"
43
+ VLLM_LOGGING_PREFIX: str = ""
44
+ VLLM_LOGGING_STREAM: str = "ext://sys.stdout"
45
+ VLLM_LOGGING_CONFIG_PATH: str | None = None
46
+ VLLM_LOGGING_COLOR: str = "auto"
47
+ NO_COLOR: bool = False
48
+ VLLM_LOG_STATS_INTERVAL: float = 10.0
49
+ VLLM_TRACE_FUNCTION: int = 0
50
+ VLLM_ATTENTION_BACKEND: str | None = None
51
+ VLLM_USE_FLASHINFER_SAMPLER: bool | None = None
52
+ VLLM_PP_LAYER_PARTITION: str | None = None
53
+ VLLM_CPU_KVCACHE_SPACE: int | None = 0
54
+ VLLM_CPU_OMP_THREADS_BIND: str = ""
55
+ VLLM_CPU_NUM_OF_RESERVED_CPU: int | None = None
56
+ VLLM_CPU_SGL_KERNEL: bool = False
57
+ VLLM_XLA_CACHE_PATH: str = os.path.join(VLLM_CACHE_ROOT, "xla_cache")
58
+ VLLM_XLA_CHECK_RECOMPILATION: bool = False
59
+ VLLM_FUSED_MOE_CHUNK_SIZE: int = 16 * 1024
60
+ VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING: bool = True
61
+ VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: Literal["auto", "nccl", "shm"] = "auto"
62
+ VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM: bool = False
63
+ VLLM_USE_RAY_WRAPPED_PP_COMM: bool = True
64
+ VLLM_XLA_USE_SPMD: bool = False
65
+ VLLM_WORKER_MULTIPROC_METHOD: Literal["fork", "spawn"] = "fork"
66
+ VLLM_ASSETS_CACHE: str = os.path.join(VLLM_CACHE_ROOT, "assets")
67
+ VLLM_ASSETS_CACHE_MODEL_CLEAN: bool = False
68
+ VLLM_IMAGE_FETCH_TIMEOUT: int = 5
69
+ VLLM_VIDEO_FETCH_TIMEOUT: int = 30
70
+ VLLM_AUDIO_FETCH_TIMEOUT: int = 10
71
+ VLLM_MEDIA_URL_ALLOW_REDIRECTS: bool = True
72
+ VLLM_MEDIA_LOADING_THREAD_COUNT: int = 8
73
+ VLLM_MAX_AUDIO_CLIP_FILESIZE_MB: int = 25
74
+ VLLM_VIDEO_LOADER_BACKEND: str = "opencv"
75
+ VLLM_MEDIA_CONNECTOR: str = "http"
76
+ VLLM_TARGET_DEVICE: str = "cuda"
77
+ VLLM_MAIN_CUDA_VERSION: str = "12.9"
78
+ VLLM_FLOAT32_MATMUL_PRECISION: Literal["highest", "high", "medium"] = "highest"
79
+ MAX_JOBS: str | None = None
80
+ NVCC_THREADS: str | None = None
81
+ VLLM_USE_PRECOMPILED: bool = False
82
+ VLLM_SKIP_PRECOMPILED_VERSION_SUFFIX: bool = False
83
+ VLLM_DOCKER_BUILD_CONTEXT: bool = False
84
+ VLLM_KEEP_ALIVE_ON_ENGINE_DEATH: bool = False
85
+ CMAKE_BUILD_TYPE: Literal["Debug", "Release", "RelWithDebInfo"] | None = None
86
+ VERBOSE: bool = False
87
+ VLLM_ALLOW_LONG_MAX_MODEL_LEN: bool = False
88
+ VLLM_RPC_TIMEOUT: int = 10000 # ms
89
+ VLLM_HTTP_TIMEOUT_KEEP_ALIVE: int = 5 # seconds
90
+ VLLM_PLUGINS: list[str] | None = None
91
+ VLLM_LORA_RESOLVER_CACHE_DIR: str | None = None
92
+ # Deprecated env variables for profiling, kept for backward compatibility
93
+ # See also vllm/config/profiler.py and `--profiler-config` argument
94
+ VLLM_TORCH_CUDA_PROFILE: str | None = None
95
+ VLLM_TORCH_PROFILER_DIR: str | None = None
96
+ VLLM_TORCH_PROFILER_RECORD_SHAPES: str | None = None
97
+ VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY: str | None = None
98
+ VLLM_TORCH_PROFILER_DISABLE_ASYNC_LLM: str | None = None
99
+ VLLM_TORCH_PROFILER_WITH_STACK: str | None = None
100
+ VLLM_TORCH_PROFILER_WITH_FLOPS: str | None = None
101
+ VLLM_TORCH_PROFILER_USE_GZIP: str | None = None
102
+ VLLM_TORCH_PROFILER_DUMP_CUDA_TIME_TOTAL: str | None = None
103
+ VLLM_PROFILER_DELAY_ITERS: str | None = None
104
+ VLLM_PROFILER_MAX_ITERS: str | None = None
105
+ # End of deprecated env variables for profiling
106
+ VLLM_USE_AOT_COMPILE: bool = False
107
+ VLLM_USE_BYTECODE_HOOK: bool = False
108
+ VLLM_FORCE_AOT_LOAD: bool = False
109
+ VLLM_USE_TRITON_AWQ: bool = False
110
+ VLLM_ALLOW_RUNTIME_LORA_UPDATING: bool = False
111
+ VLLM_SKIP_P2P_CHECK: bool = False
112
+ VLLM_DISABLED_KERNELS: list[str] = []
113
+ VLLM_DISABLE_PYNCCL: bool = False
114
+ VLLM_ROCM_USE_AITER: bool = False
115
+ VLLM_ROCM_USE_AITER_PAGED_ATTN: bool = False
116
+ VLLM_ROCM_USE_AITER_LINEAR: bool = True
117
+ VLLM_ROCM_USE_AITER_MOE: bool = True
118
+ VLLM_ROCM_USE_AITER_RMSNORM: bool = True
119
+ VLLM_ROCM_USE_AITER_MLA: bool = True
120
+ VLLM_ROCM_USE_AITER_MHA: bool = True
121
+ VLLM_ROCM_USE_AITER_FP4_ASM_GEMM: bool = False
122
+ VLLM_ROCM_USE_AITER_TRITON_ROPE: bool = False
123
+ VLLM_ROCM_USE_AITER_FP8BMM: bool = True
124
+ VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION: bool = False
125
+ VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS: bool = False
126
+ VLLM_ROCM_USE_AITER_TRITON_GEMM: bool = True
127
+ VLLM_ROCM_USE_SKINNY_GEMM: bool = True
128
+ VLLM_ROCM_FP8_PADDING: bool = True
129
+ VLLM_ROCM_MOE_PADDING: bool = True
130
+ VLLM_ROCM_CUSTOM_PAGED_ATTN: bool = True
131
+ VLLM_ENABLE_V1_MULTIPROCESSING: bool = True
132
+ VLLM_LOG_BATCHSIZE_INTERVAL: float = -1
133
+ VLLM_DISABLE_COMPILE_CACHE: bool = False
134
+ Q_SCALE_CONSTANT: int = 200
135
+ K_SCALE_CONSTANT: int = 200
136
+ V_SCALE_CONSTANT: int = 100
137
+ VLLM_SERVER_DEV_MODE: bool = False
138
+ VLLM_V1_OUTPUT_PROC_CHUNK_SIZE: int = 128
139
+ VLLM_MLA_DISABLE: bool = False
140
+ VLLM_FLASH_ATTN_MAX_NUM_SPLITS_FOR_CUDA_GRAPH: int = 32
141
+ VLLM_RAY_PER_WORKER_GPUS: float = 1.0
142
+ VLLM_RAY_BUNDLE_INDICES: str = ""
143
+ VLLM_CUDART_SO_PATH: str | None = None
144
+ VLLM_DP_RANK: int = 0
145
+ VLLM_DP_RANK_LOCAL: int = -1
146
+ VLLM_DP_SIZE: int = 1
147
+ VLLM_USE_STANDALONE_COMPILE: bool = True
148
+ VLLM_DP_MASTER_IP: str = ""
149
+ VLLM_DP_MASTER_PORT: int = 0
150
+ VLLM_MOE_DP_CHUNK_SIZE: int = 256
151
+ VLLM_ENABLE_MOE_DP_CHUNK: bool = True
152
+ VLLM_RANDOMIZE_DP_DUMMY_INPUTS: bool = False
153
+ VLLM_RAY_DP_PACK_STRATEGY: Literal["strict", "fill", "span"] = "strict"
154
+ VLLM_MARLIN_USE_ATOMIC_ADD: bool = False
155
+ VLLM_MARLIN_INPUT_DTYPE: Literal["int8", "fp8"] | None = None
156
+ VLLM_MXFP4_USE_MARLIN: bool | None = None
157
+ VLLM_DEEPEPLL_NVFP4_DISPATCH: bool = False
158
+ VLLM_V1_USE_OUTLINES_CACHE: bool = False
159
+ VLLM_TPU_BUCKET_PADDING_GAP: int = 0
160
+ VLLM_TPU_MOST_MODEL_LEN: int | None = None
161
+ VLLM_TPU_USING_PATHWAYS: bool = False
162
+ VLLM_USE_DEEP_GEMM: bool = True
163
+ VLLM_MOE_USE_DEEP_GEMM: bool = True
164
+ VLLM_USE_DEEP_GEMM_E8M0: bool = True
165
+ VLLM_DEEP_GEMM_WARMUP: Literal[
166
+ "skip",
167
+ "full",
168
+ "relax",
169
+ ] = "relax"
170
+ VLLM_USE_FUSED_MOE_GROUPED_TOPK: bool = True
171
+ VLLM_BLOCKSCALE_FP8_GEMM_FLASHINFER: bool = False
172
+ VLLM_USE_FLASHINFER_MOE_FP16: bool = False
173
+ VLLM_USE_FLASHINFER_MOE_FP8: bool = False
174
+ VLLM_USE_FLASHINFER_MOE_FP4: bool = False
175
+ VLLM_FLASHINFER_MOE_BACKEND: Literal["throughput", "latency", "masked_gemm"] = (
176
+ "latency"
177
+ )
178
+ VLLM_FLASHINFER_WORKSPACE_BUFFER_SIZE: int = 394 * 1024 * 1024
179
+ VLLM_XGRAMMAR_CACHE_MB: int = 0
180
+ VLLM_MSGPACK_ZERO_COPY_THRESHOLD: int = 256
181
+ VLLM_ALLOW_INSECURE_SERIALIZATION: bool = False
182
+ VLLM_NIXL_SIDE_CHANNEL_HOST: str = "localhost"
183
+ VLLM_NIXL_SIDE_CHANNEL_PORT: int = 5600
184
+ VLLM_MOONCAKE_BOOTSTRAP_PORT: int = 8998
185
+ VLLM_ALL2ALL_BACKEND: Literal[
186
+ "naive",
187
+ "pplx",
188
+ "deepep_high_throughput",
189
+ "deepep_low_latency",
190
+ "allgather_reducescatter",
191
+ "flashinfer_all2allv",
192
+ ] = "allgather_reducescatter"
193
+ VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE: int = 163840
194
+ VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS: int = 1
195
+ VLLM_SLEEP_WHEN_IDLE: bool = False
196
+ VLLM_MQ_MAX_CHUNK_BYTES_MB: int = 16
197
+ VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS: int = 300
198
+ VLLM_KV_CACHE_LAYOUT: Literal["NHD", "HND"] | None = None
199
+ VLLM_COMPUTE_NANS_IN_LOGITS: bool = False
200
+ VLLM_USE_NVFP4_CT_EMULATIONS: bool = False
201
+ VLLM_ROCM_QUICK_REDUCE_QUANTIZATION: Literal[
202
+ "FP", "INT8", "INT6", "INT4", "NONE"
203
+ ] = "NONE"
204
+ VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16: bool = True
205
+ VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB: int | None = None
206
+ VLLM_NIXL_ABORT_REQUEST_TIMEOUT: int = 480
207
+ VLLM_MORIIO_CONNECTOR_READ_MODE: bool = False
208
+ VLLM_MORIIO_QP_PER_TRANSFER: int = 1
209
+ VLLM_MORIIO_POST_BATCH_SIZE: int = -1
210
+ VLLM_MORIIO_NUM_WORKERS: int = 1
211
+ VLLM_MOONCAKE_ABORT_REQUEST_TIMEOUT: int = 480
212
+ VLLM_USE_CUDNN_PREFILL: bool = False
213
+ VLLM_USE_TRTLLM_RAGGED_DEEPSEEK_PREFILL: bool = False
214
+ VLLM_ENABLE_CUDAGRAPH_GC: bool = False
215
+ VLLM_LOOPBACK_IP: str = ""
216
+ VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE: bool = True
217
+ VLLM_ENABLE_RESPONSES_API_STORE: bool = False
218
+ VLLM_USE_TRTLLM_ATTENTION: str | None = None
219
+ VLLM_NVFP4_GEMM_BACKEND: str | None = None
220
+ VLLM_FLASHINFER_DISABLE_Q_QUANTIZATION: bool = False
221
+ VLLM_HAS_FLASHINFER_CUBIN: bool = False
222
+ VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8: bool = False
223
+ VLLM_USE_FLASHINFER_MOE_MXFP4_BF16: bool = False
224
+ VLLM_ROCM_FP8_MFMA_PAGE_ATTN: bool = False
225
+ VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS: bool = False
226
+ VLLM_ALLREDUCE_USE_SYMM_MEM: bool = True
227
+ VLLM_TUNED_CONFIG_FOLDER: str | None = None
228
+ VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS: set[str] = set()
229
+ VLLM_USE_EXPERIMENTAL_PARSER_CONTEXT: bool = False
230
+ VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS: bool = False
231
+ VLLM_TOOL_JSON_ERROR_AUTOMATIC_RETRY: bool = False
232
+ VLLM_CUSTOM_SCOPES_FOR_PROFILING: bool = False
233
+ VLLM_NVTX_SCOPES_FOR_PROFILING: bool = False
234
+ VLLM_KV_EVENTS_USE_INT_BLOCK_HASHES: bool = True
235
+ VLLM_OBJECT_STORAGE_SHM_BUFFER_NAME: str = "VLLM_OBJECT_STORAGE_SHM_BUFFER"
236
+ VLLM_DEEPEP_BUFFER_SIZE_MB: int = 1024
237
+ VLLM_DEEPEP_HIGH_THROUGHPUT_FORCE_INTRA_NODE: bool = False
238
+ VLLM_DEEPEP_LOW_LATENCY_USE_MNNVL: bool = False
239
+ VLLM_DBO_COMM_SMS: int = 20
240
+ VLLM_PATTERN_MATCH_DEBUG: str | None = None
241
+ VLLM_DEBUG_DUMP_PATH: str | None = None
242
+ VLLM_ENABLE_INDUCTOR_MAX_AUTOTUNE: bool = True
243
+ VLLM_ENABLE_INDUCTOR_COORDINATE_DESCENT_TUNING: bool = True
244
+ VLLM_USE_NCCL_SYMM_MEM: bool = False
245
+ VLLM_NCCL_INCLUDE_PATH: str | None = None
246
+ VLLM_USE_FBGEMM: bool = False
247
+ VLLM_GC_DEBUG: str = ""
248
+ VLLM_DEBUG_WORKSPACE: bool = False
249
+ VLLM_DISABLE_SHARED_EXPERTS_STREAM: bool = False
250
+ VLLM_SHARED_EXPERTS_STREAM_TOKEN_THRESHOLD: int = 256
251
+ VLLM_COMPILE_CACHE_SAVE_FORMAT: Literal["binary", "unpacked"] = "binary"
252
+ VLLM_USE_V2_MODEL_RUNNER: bool = False
253
+ VLLM_LOG_MODEL_INSPECTION: bool = False
254
+ VLLM_DEBUG_MFU_METRICS: bool = False
255
+
256
+
257
+ def get_default_cache_root():
258
+ return os.getenv(
259
+ "XDG_CACHE_HOME",
260
+ os.path.join(os.path.expanduser("~"), ".cache"),
261
+ )
262
+
263
+
264
+ def get_default_config_root():
265
+ return os.getenv(
266
+ "XDG_CONFIG_HOME",
267
+ os.path.join(os.path.expanduser("~"), ".config"),
268
+ )
269
+
270
+
271
+ def maybe_convert_int(value: str | None) -> int | None:
272
+ if value is None:
273
+ return None
274
+ return int(value)
275
+
276
+
277
+ def maybe_convert_bool(value: str | None) -> bool | None:
278
+ if value is None:
279
+ return None
280
+ return bool(int(value))
281
+
282
+
283
+ def disable_compile_cache() -> bool:
284
+ return bool(int(os.getenv("VLLM_DISABLE_COMPILE_CACHE", "0")))
285
+
286
+
287
+ def use_aot_compile() -> bool:
288
+ from vllm.model_executor.layers.batch_invariant import (
289
+ vllm_is_batch_invariant,
290
+ )
291
+ from vllm.platforms import current_platform
292
+ from vllm.utils.torch_utils import is_torch_equal_or_newer
293
+
294
+ default_value = (
295
+ "1"
296
+ if is_torch_equal_or_newer("2.10.0.dev")
297
+ and not disable_compile_cache()
298
+ # Disabling AOT_COMPILE for CPU
299
+ # See: https://github.com/vllm-project/vllm/issues/32033
300
+ and not current_platform.is_cpu()
301
+ else "0"
302
+ )
303
+
304
+ return (
305
+ not vllm_is_batch_invariant()
306
+ and os.environ.get("VLLM_USE_AOT_COMPILE", default_value) == "1"
307
+ )
308
+
309
+
310
+ def env_with_choices(
311
+ env_name: str,
312
+ default: str | None,
313
+ choices: list[str] | Callable[[], list[str]],
314
+ case_sensitive: bool = True,
315
+ ) -> Callable[[], str | None]:
316
+ """
317
+ Create a lambda that validates environment variable against allowed choices
318
+
319
+ Args:
320
+ env_name: Name of the environment variable
321
+ default: Default value if not set (can be None)
322
+ choices: List of valid string options or callable that returns list
323
+ case_sensitive: Whether validation should be case sensitive
324
+
325
+ Returns:
326
+ Lambda function for environment_variables dict
327
+ """
328
+
329
+ def _get_validated_env() -> str | None:
330
+ value = os.getenv(env_name)
331
+ if value is None:
332
+ return default
333
+
334
+ # Resolve choices if it's a callable (for lazy loading)
335
+ actual_choices = choices() if callable(choices) else choices
336
+
337
+ if not case_sensitive:
338
+ check_value = value.lower()
339
+ check_choices = [choice.lower() for choice in actual_choices]
340
+ else:
341
+ check_value = value
342
+ check_choices = actual_choices
343
+
344
+ if check_value not in check_choices:
345
+ raise ValueError(
346
+ f"Invalid value '{value}' for {env_name}. "
347
+ f"Valid options: {actual_choices}."
348
+ )
349
+
350
+ return value
351
+
352
+ return _get_validated_env
353
+
354
+
355
+ def env_list_with_choices(
356
+ env_name: str,
357
+ default: list[str],
358
+ choices: list[str] | Callable[[], list[str]],
359
+ case_sensitive: bool = True,
360
+ ) -> Callable[[], list[str]]:
361
+ """
362
+ Create a lambda that validates environment variable
363
+ containing comma-separated values against allowed choices
364
+
365
+ Args:
366
+ env_name: Name of the environment variable
367
+ default: Default list of values if not set
368
+ choices: List of valid string options or callable that returns list
369
+ case_sensitive: Whether validation should be case sensitive
370
+
371
+ Returns:
372
+ Lambda function for environment_variables
373
+ dict that returns list of strings
374
+ """
375
+
376
+ def _get_validated_env_list() -> list[str]:
377
+ value = os.getenv(env_name)
378
+ if value is None:
379
+ return default
380
+
381
+ # Split comma-separated values and strip whitespace
382
+ values = [v.strip() for v in value.split(",") if v.strip()]
383
+
384
+ if not values:
385
+ return default
386
+
387
+ # Resolve choices if it's a callable (for lazy loading)
388
+ actual_choices = choices() if callable(choices) else choices
389
+
390
+ # Validate each value
391
+ for val in values:
392
+ if not case_sensitive:
393
+ check_value = val.lower()
394
+ check_choices = [choice.lower() for choice in actual_choices]
395
+ else:
396
+ check_value = val
397
+ check_choices = actual_choices
398
+
399
+ if check_value not in check_choices:
400
+ raise ValueError(
401
+ f"Invalid value '{val}' in {env_name}. "
402
+ f"Valid options: {actual_choices}."
403
+ )
404
+
405
+ return values
406
+
407
+ return _get_validated_env_list
408
+
409
+
410
+ def env_set_with_choices(
411
+ env_name: str,
412
+ default: list[str],
413
+ choices: list[str] | Callable[[], list[str]],
414
+ case_sensitive: bool = True,
415
+ ) -> Callable[[], set[str]]:
416
+ """
417
+ Creates a lambda which that validates environment variable
418
+ containing comma-separated values against allowed choices which
419
+ returns choices as a set.
420
+ """
421
+
422
+ def _get_validated_env_set() -> set[str]:
423
+ return set(env_list_with_choices(env_name, default, choices, case_sensitive)())
424
+
425
+ return _get_validated_env_set
426
+
427
+
428
+ def get_vllm_port() -> int | None:
429
+ """Get the port from VLLM_PORT environment variable.
430
+
431
+ Returns:
432
+ The port number as an integer if VLLM_PORT is set, None otherwise.
433
+
434
+ Raises:
435
+ ValueError: If VLLM_PORT is a URI, suggest k8s service discovery issue.
436
+ """
437
+ if "VLLM_PORT" not in os.environ:
438
+ return None
439
+
440
+ port = os.getenv("VLLM_PORT", "0")
441
+
442
+ try:
443
+ return int(port)
444
+ except ValueError as err:
445
+ from urllib.parse import urlparse
446
+
447
+ parsed = urlparse(port)
448
+ if parsed.scheme:
449
+ raise ValueError(
450
+ f"VLLM_PORT '{port}' appears to be a URI. "
451
+ "This may be caused by a Kubernetes service discovery issue,"
452
+ "check the warning in: https://docs.vllm.ai/en/stable/serving/env_vars.html"
453
+ ) from None
454
+ raise ValueError(f"VLLM_PORT '{port}' must be a valid integer") from err
455
+
456
+
457
+ # The start-* and end* here are used by the documentation generator
458
+ # to extract the used env vars.
459
+
460
+ # --8<-- [start:env-vars-definition]
461
+
462
+ logger = logging.getLogger(__name__)
463
+
464
+ environment_variables: dict[str, Callable[[], Any]] = {
465
+ # ================== Installation Time Env Vars ==================
466
+ # Target device of vLLM, supporting [cuda (by default),
467
+ # rocm, cpu]
468
+ "VLLM_TARGET_DEVICE": lambda: os.getenv("VLLM_TARGET_DEVICE", "cuda").lower(),
469
+ # Main CUDA version of vLLM. This follows PyTorch but can be overridden.
470
+ "VLLM_MAIN_CUDA_VERSION": lambda: os.getenv("VLLM_MAIN_CUDA_VERSION", "").lower()
471
+ or "12.9",
472
+ # Controls PyTorch float32 matmul precision mode within vLLM workers.
473
+ # Valid options mirror torch.set_float32_matmul_precision
474
+ "VLLM_FLOAT32_MATMUL_PRECISION": env_with_choices(
475
+ "VLLM_FLOAT32_MATMUL_PRECISION",
476
+ "highest",
477
+ ["highest", "high", "medium"],
478
+ case_sensitive=False,
479
+ ),
480
+ # Maximum number of compilation jobs to run in parallel.
481
+ # By default this is the number of CPUs
482
+ "MAX_JOBS": lambda: os.getenv("MAX_JOBS", None),
483
+ # Number of threads to use for nvcc
484
+ # By default this is 1.
485
+ # If set, `MAX_JOBS` will be reduced to avoid oversubscribing the CPU.
486
+ "NVCC_THREADS": lambda: os.getenv("NVCC_THREADS", None),
487
+ # If set, vllm will use precompiled binaries (*.so)
488
+ "VLLM_USE_PRECOMPILED": lambda: os.environ.get("VLLM_USE_PRECOMPILED", "")
489
+ .strip()
490
+ .lower()
491
+ in ("1", "true")
492
+ or bool(os.environ.get("VLLM_PRECOMPILED_WHEEL_LOCATION")),
493
+ # If set, skip adding +precompiled suffix to version string
494
+ "VLLM_SKIP_PRECOMPILED_VERSION_SUFFIX": lambda: bool(
495
+ int(os.environ.get("VLLM_SKIP_PRECOMPILED_VERSION_SUFFIX", "0"))
496
+ ),
497
+ # Used to mark that setup.py is running in a Docker build context,
498
+ # in order to force the use of precompiled binaries.
499
+ "VLLM_DOCKER_BUILD_CONTEXT": lambda: os.environ.get("VLLM_DOCKER_BUILD_CONTEXT", "")
500
+ .strip()
501
+ .lower()
502
+ in ("1", "true"),
503
+ # CMake build type
504
+ # If not set, defaults to "Debug" or "RelWithDebInfo"
505
+ # Available options: "Debug", "Release", "RelWithDebInfo"
506
+ "CMAKE_BUILD_TYPE": env_with_choices(
507
+ "CMAKE_BUILD_TYPE", None, ["Debug", "Release", "RelWithDebInfo"]
508
+ ),
509
+ # If set, vllm will print verbose logs during installation
510
+ "VERBOSE": lambda: bool(int(os.getenv("VERBOSE", "0"))),
511
+ # Root directory for vLLM configuration files
512
+ # Defaults to `~/.config/vllm` unless `XDG_CONFIG_HOME` is set
513
+ # Note that this not only affects how vllm finds its configuration files
514
+ # during runtime, but also affects how vllm installs its configuration
515
+ # files during **installation**.
516
+ "VLLM_CONFIG_ROOT": lambda: os.path.expanduser(
517
+ os.getenv(
518
+ "VLLM_CONFIG_ROOT",
519
+ os.path.join(get_default_config_root(), "vllm"),
520
+ )
521
+ ),
522
+ # ================== Runtime Env Vars ==================
523
+ # Root directory for vLLM cache files
524
+ # Defaults to `~/.cache/vllm` unless `XDG_CACHE_HOME` is set
525
+ "VLLM_CACHE_ROOT": lambda: os.path.expanduser(
526
+ os.getenv(
527
+ "VLLM_CACHE_ROOT",
528
+ os.path.join(get_default_cache_root(), "vllm"),
529
+ )
530
+ ),
531
+ # used in distributed environment to determine the ip address
532
+ # of the current node, when the node has multiple network interfaces.
533
+ # If you are using multi-node inference, you should set this differently
534
+ # on each node.
535
+ "VLLM_HOST_IP": lambda: os.getenv("VLLM_HOST_IP", ""),
536
+ # used in distributed environment to manually set the communication port
537
+ # Note: if VLLM_PORT is set, and some code asks for multiple ports, the
538
+ # VLLM_PORT will be used as the first port, and the rest will be generated
539
+ # by incrementing the VLLM_PORT value.
540
+ "VLLM_PORT": get_vllm_port,
541
+ # path used for ipc when the frontend api server is running in
542
+ # multi-processing mode to communicate with the backend engine process.
543
+ "VLLM_RPC_BASE_PATH": lambda: os.getenv(
544
+ "VLLM_RPC_BASE_PATH", tempfile.gettempdir()
545
+ ),
546
+ # If true, will load models from ModelScope instead of Hugging Face Hub.
547
+ # note that the value is true or false, not numbers
548
+ "VLLM_USE_MODELSCOPE": lambda: os.environ.get(
549
+ "VLLM_USE_MODELSCOPE", "False"
550
+ ).lower()
551
+ == "true",
552
+ # Interval in seconds to log a warning message when the ring buffer is full
553
+ "VLLM_RINGBUFFER_WARNING_INTERVAL": lambda: int(
554
+ os.environ.get("VLLM_RINGBUFFER_WARNING_INTERVAL", "60")
555
+ ),
556
+ # path to cudatoolkit home directory, under which should be bin, include,
557
+ # and lib directories.
558
+ "CUDA_HOME": lambda: os.environ.get("CUDA_HOME", None),
559
+ # Path to the NCCL library file. It is needed because nccl>=2.19 brought
560
+ # by PyTorch contains a bug: https://github.com/NVIDIA/nccl/issues/1234
561
+ "VLLM_NCCL_SO_PATH": lambda: os.environ.get("VLLM_NCCL_SO_PATH", None),
562
+ # when `VLLM_NCCL_SO_PATH` is not set, vllm will try to find the nccl
563
+ # library file in the locations specified by `LD_LIBRARY_PATH`
564
+ "LD_LIBRARY_PATH": lambda: os.environ.get("LD_LIBRARY_PATH", None),
565
+ # flag to control the chunk size (in MB) for sleeping memory allocations under ROCm
566
+ "VLLM_ROCM_SLEEP_MEM_CHUNK_SIZE": lambda: int(
567
+ os.environ.get("VLLM_ROCM_SLEEP_MEM_CHUNK_SIZE", "256")
568
+ ),
569
+ # Use separate prefill and decode kernels for V1 attention instead of
570
+ # the unified triton kernel.
571
+ "VLLM_V1_USE_PREFILL_DECODE_ATTENTION": lambda: (
572
+ os.getenv("VLLM_V1_USE_PREFILL_DECODE_ATTENTION", "False").lower()
573
+ in ("true", "1")
574
+ ),
575
+ # Force vllm to use a specific flash-attention version (2 or 3), only valid
576
+ # when using the flash-attention backend.
577
+ "VLLM_FLASH_ATTN_VERSION": lambda: maybe_convert_int(
578
+ os.environ.get("VLLM_FLASH_ATTN_VERSION", None)
579
+ ),
580
+ # Feature flag to enable/disable Inductor standalone compile.
581
+ # In torch <= 2.7 we ignore this flag; in torch >= 2.9 this is
582
+ # enabled by default.
583
+ "VLLM_USE_STANDALONE_COMPILE": lambda: os.environ.get(
584
+ "VLLM_USE_STANDALONE_COMPILE", "1"
585
+ )
586
+ == "1",
587
+ # Debug pattern matching inside custom passes.
588
+ # Should be set to the fx.Node name (e.g. 'getitem_34' or 'scaled_mm_3').
589
+ "VLLM_PATTERN_MATCH_DEBUG": lambda: os.environ.get(
590
+ "VLLM_PATTERN_MATCH_DEBUG", None
591
+ ),
592
+ # Dump fx graphs to the given directory.
593
+ # It will override CompilationConfig.debug_dump_path if set.
594
+ "VLLM_DEBUG_DUMP_PATH": lambda: os.environ.get("VLLM_DEBUG_DUMP_PATH", None),
595
+ # Feature flag to enable/disable AOT compilation. This will ensure
596
+ # compilation is done in warmup phase and the compilation will be
597
+ # reused in subsequent calls.
598
+ "VLLM_USE_AOT_COMPILE": use_aot_compile,
599
+ # Feature flag to enable/disable bytecode in
600
+ # TorchCompileWithNoGuardsWrapper.
601
+ "VLLM_USE_BYTECODE_HOOK": lambda: bool(
602
+ int(os.environ.get("VLLM_USE_BYTECODE_HOOK", "1"))
603
+ ),
604
+ # Force vllm to always load AOT compiled models from disk. Failure
605
+ # to load will result in a hard error when this is enabled.
606
+ # Will be ignored when VLLM_USE_AOT_COMPILE is disabled.
607
+ "VLLM_FORCE_AOT_LOAD": lambda: os.environ.get("VLLM_FORCE_AOT_LOAD", "0") == "1",
608
+ # local rank of the process in the distributed setting, used to determine
609
+ # the GPU device id
610
+ "LOCAL_RANK": lambda: int(os.environ.get("LOCAL_RANK", "0")),
611
+ # used to control the visible devices in the distributed setting
612
+ "CUDA_VISIBLE_DEVICES": lambda: os.environ.get("CUDA_VISIBLE_DEVICES", None),
613
+ # timeout for each iteration in the engine
614
+ "VLLM_ENGINE_ITERATION_TIMEOUT_S": lambda: int(
615
+ os.environ.get("VLLM_ENGINE_ITERATION_TIMEOUT_S", "60")
616
+ ),
617
+ # Timeout in seconds for waiting for engine cores to become ready
618
+ # during startup. Default is 600 seconds (10 minutes).
619
+ "VLLM_ENGINE_READY_TIMEOUT_S": lambda: int(
620
+ os.environ.get("VLLM_ENGINE_READY_TIMEOUT_S", "600")
621
+ ),
622
+ # API key for vLLM API server
623
+ "VLLM_API_KEY": lambda: os.environ.get("VLLM_API_KEY", None),
624
+ # Whether to log responses from API Server for debugging
625
+ "VLLM_DEBUG_LOG_API_SERVER_RESPONSE": lambda: os.environ.get(
626
+ "VLLM_DEBUG_LOG_API_SERVER_RESPONSE", "False"
627
+ ).lower()
628
+ == "true",
629
+ # S3 access information, used for tensorizer to load model from S3
630
+ "S3_ACCESS_KEY_ID": lambda: os.environ.get("S3_ACCESS_KEY_ID", None),
631
+ "S3_SECRET_ACCESS_KEY": lambda: os.environ.get("S3_SECRET_ACCESS_KEY", None),
632
+ "S3_ENDPOINT_URL": lambda: os.environ.get("S3_ENDPOINT_URL", None),
633
+ # Usage stats collection
634
+ "VLLM_USAGE_STATS_SERVER": lambda: os.environ.get(
635
+ "VLLM_USAGE_STATS_SERVER", "https://stats.vllm.ai"
636
+ ),
637
+ "VLLM_NO_USAGE_STATS": lambda: os.environ.get("VLLM_NO_USAGE_STATS", "0") == "1",
638
+ "VLLM_DISABLE_FLASHINFER_PREFILL": lambda: os.environ.get(
639
+ "VLLM_DISABLE_FLASHINFER_PREFILL", "0"
640
+ )
641
+ == "1",
642
+ "VLLM_DO_NOT_TRACK": lambda: (
643
+ os.environ.get("VLLM_DO_NOT_TRACK", None)
644
+ or os.environ.get("DO_NOT_TRACK", None)
645
+ or "0"
646
+ )
647
+ == "1",
648
+ "VLLM_USAGE_SOURCE": lambda: os.environ.get("VLLM_USAGE_SOURCE", "production"),
649
+ # Logging configuration
650
+ # If set to 0, vllm will not configure logging
651
+ # If set to 1, vllm will configure logging using the default configuration
652
+ # or the configuration file specified by VLLM_LOGGING_CONFIG_PATH
653
+ "VLLM_CONFIGURE_LOGGING": lambda: bool(
654
+ int(os.getenv("VLLM_CONFIGURE_LOGGING", "1"))
655
+ ),
656
+ "VLLM_LOGGING_CONFIG_PATH": lambda: os.getenv("VLLM_LOGGING_CONFIG_PATH"),
657
+ # this is used for configuring the default logging level
658
+ "VLLM_LOGGING_LEVEL": lambda: os.getenv("VLLM_LOGGING_LEVEL", "INFO").upper(),
659
+ # this is used for configuring the default logging stream
660
+ "VLLM_LOGGING_STREAM": lambda: os.getenv("VLLM_LOGGING_STREAM", "ext://sys.stdout"),
661
+ # if set, VLLM_LOGGING_PREFIX will be prepended to all log messages
662
+ "VLLM_LOGGING_PREFIX": lambda: os.getenv("VLLM_LOGGING_PREFIX", ""),
663
+ # Controls colored logging output. Options: "auto" (default, colors when terminal),
664
+ # "1" (always use colors), "0" (never use colors)
665
+ "VLLM_LOGGING_COLOR": lambda: os.getenv("VLLM_LOGGING_COLOR", "auto"),
666
+ # Standard unix flag for disabling ANSI color codes
667
+ "NO_COLOR": lambda: os.getenv("NO_COLOR", "0") != "0",
668
+ # If set, vllm will log stats at this interval in seconds
669
+ # If not set, vllm will log stats every 10 seconds.
670
+ "VLLM_LOG_STATS_INTERVAL": lambda: val
671
+ if (val := float(os.getenv("VLLM_LOG_STATS_INTERVAL", "10."))) > 0.0
672
+ else 10.0,
673
+ # Trace function calls
674
+ # If set to 1, vllm will trace function calls
675
+ # Useful for debugging
676
+ "VLLM_TRACE_FUNCTION": lambda: int(os.getenv("VLLM_TRACE_FUNCTION", "0")),
677
+ # Backend for attention computation
678
+ # Example options:
679
+ # - "TORCH_SDPA": use torch.nn.MultiheadAttention
680
+ # - "FLASH_ATTN": use FlashAttention
681
+ # - "FLASHINFER": use flashinfer
682
+ # - "FLASHMLA": use FlashMLA
683
+ # - "FLASH_ATTN_MLA": use FlashAttention for MLA
684
+ # - "FLASHINFER_MLA": use FlashInfer for MLA
685
+ # - "CUTLASS_MLA": use CUTLASS for MLA
686
+ # All possible options loaded dynamically from AttentionBackendEnum
687
+ "VLLM_ATTENTION_BACKEND": env_with_choices(
688
+ "VLLM_ATTENTION_BACKEND",
689
+ None,
690
+ lambda: list(
691
+ __import__(
692
+ "vllm.v1.attention.backends.registry", fromlist=["AttentionBackendEnum"]
693
+ ).AttentionBackendEnum.__members__.keys()
694
+ ),
695
+ ),
696
+ # If set, vllm will use flashinfer sampler
697
+ "VLLM_USE_FLASHINFER_SAMPLER": lambda: bool(
698
+ int(os.environ["VLLM_USE_FLASHINFER_SAMPLER"])
699
+ )
700
+ if "VLLM_USE_FLASHINFER_SAMPLER" in os.environ
701
+ else None,
702
+ # Pipeline stage partition strategy
703
+ "VLLM_PP_LAYER_PARTITION": lambda: os.getenv("VLLM_PP_LAYER_PARTITION", None),
704
+ # (CPU backend only) CPU key-value cache space.
705
+ # default is None and will be set as 4 GB
706
+ "VLLM_CPU_KVCACHE_SPACE": lambda: int(os.getenv("VLLM_CPU_KVCACHE_SPACE", "0"))
707
+ if "VLLM_CPU_KVCACHE_SPACE" in os.environ
708
+ else None,
709
+ # (CPU backend only) CPU core ids bound by OpenMP threads, e.g., "0-31",
710
+ # "0,1,2", "0-31,33". CPU cores of different ranks are separated by '|'.
711
+ "VLLM_CPU_OMP_THREADS_BIND": lambda: os.getenv("VLLM_CPU_OMP_THREADS_BIND", "auto"),
712
+ # (CPU backend only) CPU cores not used by OMP threads .
713
+ # Those CPU cores will not be used by OMP threads of a rank.
714
+ "VLLM_CPU_NUM_OF_RESERVED_CPU": lambda: int(
715
+ os.getenv("VLLM_CPU_NUM_OF_RESERVED_CPU", "0")
716
+ )
717
+ if "VLLM_CPU_NUM_OF_RESERVED_CPU" in os.environ
718
+ else None,
719
+ # (CPU backend only) whether to use SGL kernels, optimized for small batch.
720
+ "VLLM_CPU_SGL_KERNEL": lambda: bool(int(os.getenv("VLLM_CPU_SGL_KERNEL", "0"))),
721
+ # If the env var is set, Ray Compiled Graph uses the specified
722
+ # channel type to communicate between workers belonging to
723
+ # different pipeline-parallel stages.
724
+ # Available options:
725
+ # - "auto": use the default channel type
726
+ # - "nccl": use NCCL for communication
727
+ # - "shm": use shared memory and gRPC for communication
728
+ "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE": env_with_choices(
729
+ "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE", "auto", ["auto", "nccl", "shm"]
730
+ ),
731
+ # If the env var is set, it enables GPU communication overlap
732
+ # (experimental feature) in Ray's Compiled Graph.
733
+ "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM": lambda: bool(
734
+ int(os.getenv("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM", "0"))
735
+ ),
736
+ # If the env var is set, it uses a Ray Communicator wrapping
737
+ # vLLM's pipeline parallelism communicator to interact with Ray's
738
+ # Compiled Graph. Otherwise, it uses Ray's NCCL communicator.
739
+ "VLLM_USE_RAY_WRAPPED_PP_COMM": lambda: bool(
740
+ int(os.getenv("VLLM_USE_RAY_WRAPPED_PP_COMM", "1"))
741
+ ),
742
+ # Use dedicated multiprocess context for workers.
743
+ # Both spawn and fork work
744
+ "VLLM_WORKER_MULTIPROC_METHOD": env_with_choices(
745
+ "VLLM_WORKER_MULTIPROC_METHOD", "fork", ["spawn", "fork"]
746
+ ),
747
+ # Path to the cache for storing downloaded assets
748
+ "VLLM_ASSETS_CACHE": lambda: os.path.expanduser(
749
+ os.getenv(
750
+ "VLLM_ASSETS_CACHE",
751
+ os.path.join(get_default_cache_root(), "vllm", "assets"),
752
+ )
753
+ ),
754
+ # If the env var is set, we will clean model file in
755
+ # this path $VLLM_ASSETS_CACHE/model_streamer/$model_name
756
+ "VLLM_ASSETS_CACHE_MODEL_CLEAN": lambda: bool(
757
+ int(os.getenv("VLLM_ASSETS_CACHE_MODEL_CLEAN", "0"))
758
+ ),
759
+ # Timeout for fetching images when serving multimodal models
760
+ # Default is 5 seconds
761
+ "VLLM_IMAGE_FETCH_TIMEOUT": lambda: int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", "5")),
762
+ # Timeout for fetching videos when serving multimodal models
763
+ # Default is 30 seconds
764
+ "VLLM_VIDEO_FETCH_TIMEOUT": lambda: int(
765
+ os.getenv("VLLM_VIDEO_FETCH_TIMEOUT", "30")
766
+ ),
767
+ # Timeout for fetching audio when serving multimodal models
768
+ # Default is 10 seconds
769
+ "VLLM_AUDIO_FETCH_TIMEOUT": lambda: int(
770
+ os.getenv("VLLM_AUDIO_FETCH_TIMEOUT", "10")
771
+ ),
772
+ # Whether to allow HTTP redirects when fetching from media URLs.
773
+ # Default to True
774
+ "VLLM_MEDIA_URL_ALLOW_REDIRECTS": lambda: bool(
775
+ int(os.getenv("VLLM_MEDIA_URL_ALLOW_REDIRECTS", "1"))
776
+ ),
777
+ # Max number of workers for the thread pool handling
778
+ # media bytes loading. Set to 1 to disable parallel processing.
779
+ # Default is 8
780
+ "VLLM_MEDIA_LOADING_THREAD_COUNT": lambda: int(
781
+ os.getenv("VLLM_MEDIA_LOADING_THREAD_COUNT", "8")
782
+ ),
783
+ # Maximum filesize in MB for a single audio file when processing
784
+ # speech-to-text requests. Files larger than this will be rejected.
785
+ # Default is 25 MB
786
+ "VLLM_MAX_AUDIO_CLIP_FILESIZE_MB": lambda: int(
787
+ os.getenv("VLLM_MAX_AUDIO_CLIP_FILESIZE_MB", "25")
788
+ ),
789
+ # Backend for Video IO
790
+ # - "opencv": Default backend that uses OpenCV stream buffered backend.
791
+ #
792
+ # Custom backend implementations can be registered
793
+ # via `@VIDEO_LOADER_REGISTRY.register("my_custom_video_loader")` and
794
+ # imported at runtime.
795
+ # If a non-existing backend is used, an AssertionError will be thrown.
796
+ "VLLM_VIDEO_LOADER_BACKEND": lambda: os.getenv(
797
+ "VLLM_VIDEO_LOADER_BACKEND", "opencv"
798
+ ),
799
+ # Media connector implementation.
800
+ # - "http": Default connector that supports fetching media via HTTP.
801
+ #
802
+ # Custom implementations can be registered
803
+ # via `@MEDIA_CONNECTOR_REGISTRY.register("my_custom_media_connector")` and
804
+ # imported at runtime.
805
+ # If a non-existing backend is used, an AssertionError will be thrown.
806
+ "VLLM_MEDIA_CONNECTOR": lambda: os.getenv("VLLM_MEDIA_CONNECTOR", "http"),
807
+ # Path to the XLA persistent cache directory.
808
+ # Only used for XLA devices such as TPUs.
809
+ "VLLM_XLA_CACHE_PATH": lambda: os.path.expanduser(
810
+ os.getenv(
811
+ "VLLM_XLA_CACHE_PATH",
812
+ os.path.join(get_default_cache_root(), "vllm", "xla_cache"),
813
+ )
814
+ ),
815
+ # If set, assert on XLA recompilation after each execution step.
816
+ "VLLM_XLA_CHECK_RECOMPILATION": lambda: bool(
817
+ int(os.getenv("VLLM_XLA_CHECK_RECOMPILATION", "0"))
818
+ ),
819
+ # Enable SPMD mode for TPU backend.
820
+ "VLLM_XLA_USE_SPMD": lambda: bool(int(os.getenv("VLLM_XLA_USE_SPMD", "0"))),
821
+ "VLLM_FUSED_MOE_CHUNK_SIZE": lambda: int(
822
+ os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", str(16 * 1024))
823
+ ),
824
+ # Control whether to use fused MoE activation chunking. Current chunking
825
+ # logic is incompatible with torch.compile and causes IMA. See issue
826
+ # https://github.com/vllm-project/vllm/issues/19631.
827
+ "VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING": lambda: bool(
828
+ int(os.getenv("VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING", "1"))
829
+ ),
830
+ # If set, the OpenAI API server will stay alive even after the underlying
831
+ # AsyncLLMEngine errors and stops serving requests
832
+ "VLLM_KEEP_ALIVE_ON_ENGINE_DEATH": lambda: bool(
833
+ int(os.getenv("VLLM_KEEP_ALIVE_ON_ENGINE_DEATH", "0"))
834
+ ),
835
+ # If the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN is set, it allows
836
+ # the user to specify a max sequence length greater than
837
+ # the max length derived from the model's config.json.
838
+ # To enable this, set VLLM_ALLOW_LONG_MAX_MODEL_LEN=1.
839
+ "VLLM_ALLOW_LONG_MAX_MODEL_LEN": lambda: (
840
+ os.environ.get("VLLM_ALLOW_LONG_MAX_MODEL_LEN", "0").strip().lower()
841
+ in ("1", "true")
842
+ ),
843
+ # If set, forces FP8 Marlin to be used for FP8 quantization regardless
844
+ # of the hardware support for FP8 compute.
845
+ "VLLM_TEST_FORCE_FP8_MARLIN": lambda: (
846
+ os.environ.get("VLLM_TEST_FORCE_FP8_MARLIN", "0").strip().lower()
847
+ in ("1", "true")
848
+ ),
849
+ "VLLM_TEST_FORCE_LOAD_FORMAT": lambda: os.getenv(
850
+ "VLLM_TEST_FORCE_LOAD_FORMAT", "dummy"
851
+ ),
852
+ # Time in ms for the zmq client to wait for a response from the backend
853
+ # server for simple data operations
854
+ "VLLM_RPC_TIMEOUT": lambda: int(os.getenv("VLLM_RPC_TIMEOUT", "10000")),
855
+ # Timeout in seconds for keeping HTTP connections alive in API server
856
+ "VLLM_HTTP_TIMEOUT_KEEP_ALIVE": lambda: int(
857
+ os.environ.get("VLLM_HTTP_TIMEOUT_KEEP_ALIVE", "5")
858
+ ),
859
+ # a list of plugin names to load, separated by commas.
860
+ # if this is not set, it means all plugins will be loaded
861
+ # if this is set to an empty string, no plugins will be loaded
862
+ "VLLM_PLUGINS": lambda: None
863
+ if "VLLM_PLUGINS" not in os.environ
864
+ else os.environ["VLLM_PLUGINS"].split(","),
865
+ # a local directory to look in for unrecognized LoRA adapters.
866
+ # only works if plugins are enabled and
867
+ # VLLM_ALLOW_RUNTIME_LORA_UPDATING is enabled.
868
+ "VLLM_LORA_RESOLVER_CACHE_DIR": lambda: os.getenv(
869
+ "VLLM_LORA_RESOLVER_CACHE_DIR", None
870
+ ),
871
+ # Enables torch CUDA profiling if set to 1.
872
+ # Deprecated, see profiler_config.
873
+ "VLLM_TORCH_CUDA_PROFILE": lambda: os.getenv("VLLM_TORCH_CUDA_PROFILE"),
874
+ # Enables torch profiler if set.
875
+ # Deprecated, see profiler_config.
876
+ "VLLM_TORCH_PROFILER_DIR": lambda: os.getenv("VLLM_TORCH_PROFILER_DIR"),
877
+ # Enable torch profiler to record shapes if set to 1.
878
+ # Deprecated, see profiler_config.
879
+ "VLLM_TORCH_PROFILER_RECORD_SHAPES": lambda: (
880
+ os.getenv("VLLM_TORCH_PROFILER_RECORD_SHAPES")
881
+ ),
882
+ # Enable torch profiler to profile memory if set to 1.
883
+ # Deprecated, see profiler_config.
884
+ "VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY": lambda: (
885
+ os.getenv("VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY")
886
+ ),
887
+ # Enable torch profiler to profile stack if set to 1.
888
+ # Deprecated, see profiler_config.
889
+ "VLLM_TORCH_PROFILER_WITH_STACK": lambda: (
890
+ os.getenv("VLLM_TORCH_PROFILER_WITH_STACK")
891
+ ),
892
+ # Enable torch profiler to profile flops if set to 1.
893
+ # Deprecated, see profiler_config.
894
+ "VLLM_TORCH_PROFILER_WITH_FLOPS": lambda: (
895
+ os.getenv("VLLM_TORCH_PROFILER_WITH_FLOPS")
896
+ ),
897
+ # Disable torch profiling of the AsyncLLMEngine process if set to 1.
898
+ # Deprecated, see profiler_config.
899
+ "VLLM_TORCH_PROFILER_DISABLE_ASYNC_LLM": lambda: (
900
+ os.getenv("VLLM_TORCH_PROFILER_DISABLE_ASYNC_LLM")
901
+ ),
902
+ # Delay number of iterations before starting profiling when using
903
+ # the torch/torch CUDA profiler. If set to 0, will start profiling immediately.
904
+ # Deprecated, see profiler_config.
905
+ "VLLM_PROFILER_DELAY_ITERS": lambda: (os.getenv("VLLM_PROFILER_DELAY_ITERS")),
906
+ # Maximum number of iterations to profile when using the torch/torch CUDA profiler.
907
+ # If set to 0, will not limit the number of iterations.
908
+ "VLLM_PROFILER_MAX_ITERS": lambda: os.getenv("VLLM_PROFILER_MAX_ITERS"),
909
+ # Control whether torch profiler gzip-compresses profiling files.
910
+ # Deprecated, see profiler_config.
911
+ "VLLM_TORCH_PROFILER_USE_GZIP": lambda: os.getenv("VLLM_TORCH_PROFILER_USE_GZIP"),
912
+ # Control whether torch profiler dumps the self_cuda_time_total table.
913
+ # Set to 0 to disable dumping the table.
914
+ # Deprecated, see profiler_config.
915
+ "VLLM_TORCH_PROFILER_DUMP_CUDA_TIME_TOTAL": lambda: (
916
+ os.getenv("VLLM_TORCH_PROFILER_DUMP_CUDA_TIME_TOTAL")
917
+ ),
918
+ # If set, vLLM will use Triton implementations of AWQ.
919
+ "VLLM_USE_TRITON_AWQ": lambda: bool(int(os.getenv("VLLM_USE_TRITON_AWQ", "0"))),
920
+ # If set, allow loading or unloading lora adapters in runtime,
921
+ "VLLM_ALLOW_RUNTIME_LORA_UPDATING": lambda: (
922
+ os.environ.get("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "0").strip().lower()
923
+ in ("1", "true")
924
+ ),
925
+ # We assume drivers can report p2p status correctly.
926
+ # If the program hangs when using custom allreduce,
927
+ # potantially caused by a bug in the driver (535 series),
928
+ # if might be helpful to set VLLM_SKIP_P2P_CHECK=0
929
+ # so that vLLM can verify if p2p is actually working.
930
+ # See https://github.com/vllm-project/vllm/blob/a9b15c606fea67a072416ea0ea115261a2756058/vllm/distributed/device_communicators/custom_all_reduce_utils.py#L101-L108 for details. # noqa
931
+ "VLLM_SKIP_P2P_CHECK": lambda: os.getenv("VLLM_SKIP_P2P_CHECK", "1") == "1",
932
+ # List of quantization kernels that should be disabled, used for testing
933
+ # and performance comparisons. Currently only affects MPLinearKernel
934
+ # selection
935
+ # (kernels: MacheteLinearKernel, MarlinLinearKernel, ExllamaLinearKernel)
936
+ "VLLM_DISABLED_KERNELS": lambda: []
937
+ if "VLLM_DISABLED_KERNELS" not in os.environ
938
+ else os.environ["VLLM_DISABLED_KERNELS"].split(","),
939
+ # Disable pynccl (using torch.distributed instead)
940
+ "VLLM_DISABLE_PYNCCL": lambda: (
941
+ os.getenv("VLLM_DISABLE_PYNCCL", "False").lower() in ("true", "1")
942
+ ),
943
+ # Disable aiter ops unless specifically enabled.
944
+ # Acts as a parent switch to enable the rest of the other operations.
945
+ "VLLM_ROCM_USE_AITER": lambda: (
946
+ os.getenv("VLLM_ROCM_USE_AITER", "False").lower() in ("true", "1")
947
+ ),
948
+ # Whether to use aiter paged attention.
949
+ # By default is disabled.
950
+ "VLLM_ROCM_USE_AITER_PAGED_ATTN": lambda: (
951
+ os.getenv("VLLM_ROCM_USE_AITER_PAGED_ATTN", "False").lower() in ("true", "1")
952
+ ),
953
+ # use aiter linear op if aiter ops are enabled
954
+ # The following list of related ops
955
+ # - scaled_mm (per-tensor / rowwise)
956
+ "VLLM_ROCM_USE_AITER_LINEAR": lambda: (
957
+ os.getenv("VLLM_ROCM_USE_AITER_LINEAR", "True").lower() in ("true", "1")
958
+ ),
959
+ # Whether to use aiter moe ops.
960
+ # By default is enabled.
961
+ "VLLM_ROCM_USE_AITER_MOE": lambda: (
962
+ os.getenv("VLLM_ROCM_USE_AITER_MOE", "True").lower() in ("true", "1")
963
+ ),
964
+ # use aiter rms norm op if aiter ops are enabled.
965
+ "VLLM_ROCM_USE_AITER_RMSNORM": lambda: (
966
+ os.getenv("VLLM_ROCM_USE_AITER_RMSNORM", "True").lower() in ("true", "1")
967
+ ),
968
+ # Whether to use aiter mla ops.
969
+ # By default is enabled.
970
+ "VLLM_ROCM_USE_AITER_MLA": lambda: (
971
+ os.getenv("VLLM_ROCM_USE_AITER_MLA", "True").lower() in ("true", "1")
972
+ ),
973
+ # Whether to use aiter mha ops.
974
+ # By default is enabled.
975
+ "VLLM_ROCM_USE_AITER_MHA": lambda: (
976
+ os.getenv("VLLM_ROCM_USE_AITER_MHA", "True").lower() in ("true", "1")
977
+ ),
978
+ # Whether to use aiter fp4 gemm asm.
979
+ # By default is disabled.
980
+ "VLLM_ROCM_USE_AITER_FP4_ASM_GEMM": lambda: (
981
+ os.getenv("VLLM_ROCM_USE_AITER_FP4_ASM_GEMM", "False").lower() in ("true", "1")
982
+ ),
983
+ # Whether to use aiter rope.
984
+ # By default is disabled.
985
+ "VLLM_ROCM_USE_AITER_TRITON_ROPE": lambda: (
986
+ os.getenv("VLLM_ROCM_USE_AITER_TRITON_ROPE", "False").lower() in ("true", "1")
987
+ ),
988
+ # Whether to use aiter triton fp8 bmm kernel
989
+ # By default is enabled.
990
+ "VLLM_ROCM_USE_AITER_FP8BMM": lambda: (
991
+ os.getenv("VLLM_ROCM_USE_AITER_FP8BMM", "True").lower() in ("true", "1")
992
+ ),
993
+ # Use AITER triton unified attention for V1 attention
994
+ "VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION": lambda: (
995
+ os.getenv("VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION", "False").lower()
996
+ in ("true", "1")
997
+ ),
998
+ # Whether to use aiter fusion shared experts ops.
999
+ # By default is disabled.
1000
+ "VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS": lambda: (
1001
+ os.getenv("VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS", "False").lower()
1002
+ in ("true", "1")
1003
+ ),
1004
+ # Whether to use aiter triton kernels for gemm ops.
1005
+ # By default is enabled.
1006
+ "VLLM_ROCM_USE_AITER_TRITON_GEMM": lambda: (
1007
+ os.getenv("VLLM_ROCM_USE_AITER_TRITON_GEMM", "True").lower() in ("true", "1")
1008
+ ),
1009
+ # use rocm skinny gemms
1010
+ "VLLM_ROCM_USE_SKINNY_GEMM": lambda: (
1011
+ os.getenv("VLLM_ROCM_USE_SKINNY_GEMM", "True").lower() in ("true", "1")
1012
+ ),
1013
+ # Pad the fp8 weights to 256 bytes for ROCm
1014
+ "VLLM_ROCM_FP8_PADDING": lambda: bool(int(os.getenv("VLLM_ROCM_FP8_PADDING", "1"))),
1015
+ # Pad the weights for the moe kernel
1016
+ "VLLM_ROCM_MOE_PADDING": lambda: bool(int(os.getenv("VLLM_ROCM_MOE_PADDING", "1"))),
1017
+ # custom paged attention kernel for MI3* cards
1018
+ "VLLM_ROCM_CUSTOM_PAGED_ATTN": lambda: (
1019
+ os.getenv("VLLM_ROCM_CUSTOM_PAGED_ATTN", "True").lower() in ("true", "1")
1020
+ ),
1021
+ # Custom quick allreduce kernel for MI3* cards
1022
+ # Choice of quantization level: FP, INT8, INT6, INT4 or NONE
1023
+ # Recommended for large models to get allreduce
1024
+ "VLLM_ROCM_QUICK_REDUCE_QUANTIZATION": env_with_choices(
1025
+ "VLLM_ROCM_QUICK_REDUCE_QUANTIZATION",
1026
+ "NONE",
1027
+ ["FP", "INT8", "INT6", "INT4", "NONE"],
1028
+ ),
1029
+ # Custom quick allreduce kernel for MI3* cards
1030
+ # Due to the lack of the bfloat16 asm instruction, bfloat16
1031
+ # kernels are slower than fp16,
1032
+ # If environment variable is set to 1, the input is converted to fp16
1033
+ "VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16": lambda: (
1034
+ os.getenv("VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16", "True").lower()
1035
+ in ("true", "1")
1036
+ ),
1037
+ # Custom quick allreduce kernel for MI3* cards.
1038
+ # Controls the maximum allowed number of data bytes(MB) for custom quick
1039
+ # allreduce communication.
1040
+ # Default: 2048 MB.
1041
+ # Data exceeding this size will use either custom allreduce or RCCL
1042
+ # communication.
1043
+ "VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB": lambda: maybe_convert_int(
1044
+ os.environ.get("VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB", None)
1045
+ ),
1046
+ # Divisor for dynamic query scale factor calculation for FP8 KV Cache
1047
+ "Q_SCALE_CONSTANT": lambda: int(os.getenv("Q_SCALE_CONSTANT", "200")),
1048
+ # Divisor for dynamic key scale factor calculation for FP8 KV Cache
1049
+ "K_SCALE_CONSTANT": lambda: int(os.getenv("K_SCALE_CONSTANT", "200")),
1050
+ # Divisor for dynamic value scale factor calculation for FP8 KV Cache
1051
+ "V_SCALE_CONSTANT": lambda: int(os.getenv("V_SCALE_CONSTANT", "100")),
1052
+ # If set, enable multiprocessing in LLM for the V1 code path.
1053
+ "VLLM_ENABLE_V1_MULTIPROCESSING": lambda: bool(
1054
+ int(os.getenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1"))
1055
+ ),
1056
+ "VLLM_LOG_BATCHSIZE_INTERVAL": lambda: float(
1057
+ os.getenv("VLLM_LOG_BATCHSIZE_INTERVAL", "-1")
1058
+ ),
1059
+ "VLLM_DISABLE_COMPILE_CACHE": disable_compile_cache,
1060
+ # If set, vllm will run in development mode, which will enable
1061
+ # some additional endpoints for developing and debugging,
1062
+ # e.g. `/reset_prefix_cache`
1063
+ "VLLM_SERVER_DEV_MODE": lambda: bool(int(os.getenv("VLLM_SERVER_DEV_MODE", "0"))),
1064
+ # Controls the maximum number of requests to handle in a
1065
+ # single asyncio task when processing per-token outputs in the
1066
+ # V1 AsyncLLM interface. It is applicable when handling a high
1067
+ # concurrency of streaming requests.
1068
+ # Setting this too high can result in a higher variance of
1069
+ # inter-message latencies. Setting it too low can negatively impact
1070
+ # TTFT and overall throughput.
1071
+ "VLLM_V1_OUTPUT_PROC_CHUNK_SIZE": lambda: int(
1072
+ os.getenv("VLLM_V1_OUTPUT_PROC_CHUNK_SIZE", "128")
1073
+ ),
1074
+ # If set, vLLM will disable the MLA attention optimizations.
1075
+ "VLLM_MLA_DISABLE": lambda: bool(int(os.getenv("VLLM_MLA_DISABLE", "0"))),
1076
+ # If set, vLLM will pick up the provided Flash Attention MLA
1077
+ # max number splits for cuda graph decode
1078
+ "VLLM_FLASH_ATTN_MAX_NUM_SPLITS_FOR_CUDA_GRAPH": lambda: int(
1079
+ os.getenv("VLLM_FLASH_ATTN_MAX_NUM_SPLITS_FOR_CUDA_GRAPH", "32")
1080
+ ),
1081
+ # Number of GPUs per worker in Ray, if it is set to be a fraction,
1082
+ # it allows ray to schedule multiple actors on a single GPU,
1083
+ # so that users can colocate other actors on the same GPUs as vLLM.
1084
+ "VLLM_RAY_PER_WORKER_GPUS": lambda: float(
1085
+ os.getenv("VLLM_RAY_PER_WORKER_GPUS", "1.0")
1086
+ ),
1087
+ # Bundle indices for Ray, if it is set, it can control precisely
1088
+ # which indices are used for the Ray bundle, for every worker.
1089
+ # Format: comma-separated list of integers, e.g. "0,1,2,3"
1090
+ "VLLM_RAY_BUNDLE_INDICES": lambda: os.getenv("VLLM_RAY_BUNDLE_INDICES", ""),
1091
+ # In some system, find_loaded_library() may not work. So we allow users to
1092
+ # specify the path through environment variable VLLM_CUDART_SO_PATH.
1093
+ "VLLM_CUDART_SO_PATH": lambda: os.getenv("VLLM_CUDART_SO_PATH", None),
1094
+ # Rank of the process in the data parallel setting
1095
+ "VLLM_DP_RANK": lambda: int(os.getenv("VLLM_DP_RANK", "0")),
1096
+ # Rank of the process in the data parallel setting.
1097
+ # Defaults to VLLM_DP_RANK when not set.
1098
+ "VLLM_DP_RANK_LOCAL": lambda: int(
1099
+ os.getenv("VLLM_DP_RANK_LOCAL", sys.modules[__name__].VLLM_DP_RANK)
1100
+ ),
1101
+ # World size of the data parallel setting
1102
+ "VLLM_DP_SIZE": lambda: int(os.getenv("VLLM_DP_SIZE", "1")),
1103
+ # IP address of the master node in the data parallel setting
1104
+ "VLLM_DP_MASTER_IP": lambda: os.getenv("VLLM_DP_MASTER_IP", "127.0.0.1"),
1105
+ # Port of the master node in the data parallel setting
1106
+ "VLLM_DP_MASTER_PORT": lambda: int(os.getenv("VLLM_DP_MASTER_PORT", "0")),
1107
+ # In the context of executing MoE models with Data-Parallel, Expert-Parallel
1108
+ # and Batched All-to-All dispatch/combine kernels, VLLM_MOE_DP_CHUNK_SIZE
1109
+ # dictates the quantum of tokens that can be dispatched from a DP
1110
+ # rank. All DP ranks process the activations in VLLM_MOE_DP_CHUNK_SIZE
1111
+ # units.
1112
+ "VLLM_MOE_DP_CHUNK_SIZE": lambda: int(os.getenv("VLLM_MOE_DP_CHUNK_SIZE", "256")),
1113
+ "VLLM_ENABLE_MOE_DP_CHUNK": lambda: bool(
1114
+ int(os.getenv("VLLM_ENABLE_MOE_DP_CHUNK", "1"))
1115
+ ),
1116
+ # Randomize inputs during dummy runs when using Data Parallel
1117
+ "VLLM_RANDOMIZE_DP_DUMMY_INPUTS": lambda: os.environ.get(
1118
+ "VLLM_RANDOMIZE_DP_DUMMY_INPUTS", "0"
1119
+ )
1120
+ == "1",
1121
+ # Strategy to pack the data parallel ranks for Ray.
1122
+ # Available options:
1123
+ # - "fill":
1124
+ # for DP master node, allocate exactly data-parallel-size-local DP ranks,
1125
+ # for non-master nodes, allocate as many DP ranks as can fit;
1126
+ # - "strict":
1127
+ # allocate exactly data-parallel-size-local DP ranks to each picked node;
1128
+ # - "span":
1129
+ # Should be used only when a single DP rank requires multiple nodes.
1130
+ # allocate one DP rank over as many nodes as required for set world_size;
1131
+ # This environment variable is ignored if data-parallel-backend is not Ray.
1132
+ "VLLM_RAY_DP_PACK_STRATEGY": lambda: os.getenv(
1133
+ "VLLM_RAY_DP_PACK_STRATEGY", "strict"
1134
+ ),
1135
+ # Whether to use S3 path for model loading in CI via RunAI Streamer
1136
+ "VLLM_CI_USE_S3": lambda: os.environ.get("VLLM_CI_USE_S3", "0") == "1",
1137
+ # Use model_redirect to redirect the model name to a local folder.
1138
+ # `model_redirect` can be a json file mapping the model between
1139
+ # repo_id and local folder:
1140
+ # {"meta-llama/Llama-3.2-1B": "/tmp/Llama-3.2-1B"}
1141
+ # or a space separated values table file:
1142
+ # meta-llama/Llama-3.2-1B /tmp/Llama-3.2-1B
1143
+ "VLLM_MODEL_REDIRECT_PATH": lambda: os.environ.get(
1144
+ "VLLM_MODEL_REDIRECT_PATH", None
1145
+ ),
1146
+ # Whether to use atomicAdd reduce in gptq/awq marlin kernel.
1147
+ "VLLM_MARLIN_USE_ATOMIC_ADD": lambda: os.environ.get(
1148
+ "VLLM_MARLIN_USE_ATOMIC_ADD", "0"
1149
+ )
1150
+ == "1",
1151
+ # Whether to use marlin kernel in mxfp4 quantization method
1152
+ "VLLM_MXFP4_USE_MARLIN": lambda: maybe_convert_bool(
1153
+ os.environ.get("VLLM_MXFP4_USE_MARLIN", None)
1154
+ ),
1155
+ # The activation dtype for marlin kernel
1156
+ "VLLM_MARLIN_INPUT_DTYPE": env_with_choices(
1157
+ "VLLM_MARLIN_INPUT_DTYPE", None, ["int8", "fp8"]
1158
+ ),
1159
+ # Whether to use DeepEPLL kernels for NVFP4 quantization and dispatch method
1160
+ # only supported on Blackwell GPUs and with
1161
+ # https://github.com/deepseek-ai/DeepEP/pull/341
1162
+ "VLLM_DEEPEPLL_NVFP4_DISPATCH": lambda: bool(
1163
+ int(os.getenv("VLLM_DEEPEPLL_NVFP4_DISPATCH", "0"))
1164
+ ),
1165
+ # Whether to turn on the outlines cache for V1
1166
+ # This cache is unbounded and on disk, so it's not safe to use in
1167
+ # an environment with potentially malicious users.
1168
+ "VLLM_V1_USE_OUTLINES_CACHE": lambda: os.environ.get(
1169
+ "VLLM_V1_USE_OUTLINES_CACHE", "0"
1170
+ )
1171
+ == "1",
1172
+ # Gap between padding buckets for the forward pass. So we have
1173
+ # 8, we will run forward pass with [16, 24, 32, ...].
1174
+ "VLLM_TPU_BUCKET_PADDING_GAP": lambda: int(
1175
+ os.environ["VLLM_TPU_BUCKET_PADDING_GAP"]
1176
+ )
1177
+ if "VLLM_TPU_BUCKET_PADDING_GAP" in os.environ
1178
+ else 0,
1179
+ "VLLM_TPU_MOST_MODEL_LEN": lambda: maybe_convert_int(
1180
+ os.environ.get("VLLM_TPU_MOST_MODEL_LEN", None)
1181
+ ),
1182
+ # Whether using Pathways
1183
+ "VLLM_TPU_USING_PATHWAYS": lambda: bool(
1184
+ "proxy" in os.getenv("JAX_PLATFORMS", "").lower()
1185
+ ),
1186
+ # Allow use of DeepGemm kernels for fused moe ops.
1187
+ "VLLM_USE_DEEP_GEMM": lambda: bool(int(os.getenv("VLLM_USE_DEEP_GEMM", "1"))),
1188
+ # Allow use of DeepGemm specifically for MoE fused ops (overrides only MoE).
1189
+ "VLLM_MOE_USE_DEEP_GEMM": lambda: bool(
1190
+ int(os.getenv("VLLM_MOE_USE_DEEP_GEMM", "1"))
1191
+ ),
1192
+ # Whether to use E8M0 scaling when DeepGEMM is used on Blackwell GPUs.
1193
+ "VLLM_USE_DEEP_GEMM_E8M0": lambda: bool(
1194
+ int(os.getenv("VLLM_USE_DEEP_GEMM_E8M0", "1"))
1195
+ ),
1196
+ # DeepGemm JITs the kernels on-demand. The warmup attempts to make DeepGemm
1197
+ # JIT all the required kernels before model execution so there is no
1198
+ # JIT'ing in the hot-path. However, this warmup increases the engine
1199
+ # startup time by a couple of minutes.
1200
+ # Available options:
1201
+ # - "skip" : Skip warmup.
1202
+ # - "full" : Warmup deepgemm by running all possible gemm shapes the
1203
+ # engine could encounter.
1204
+ # - "relax" : Select gemm shapes to run based on some heuristics. The
1205
+ # heuristic aims to have the same effect as running all possible gemm
1206
+ # shapes, but provides no guarantees.
1207
+ "VLLM_DEEP_GEMM_WARMUP": env_with_choices(
1208
+ "VLLM_DEEP_GEMM_WARMUP",
1209
+ "relax",
1210
+ [
1211
+ "skip",
1212
+ "full",
1213
+ "relax",
1214
+ ],
1215
+ ),
1216
+ # Whether to use fused grouped_topk used for MoE expert selection.
1217
+ "VLLM_USE_FUSED_MOE_GROUPED_TOPK": lambda: bool(
1218
+ int(os.getenv("VLLM_USE_FUSED_MOE_GROUPED_TOPK", "1"))
1219
+ ),
1220
+ # Allow use of FlashInfer FP8 block-scale GEMM for linear layers.
1221
+ # This uses TensorRT-LLM kernels and requires SM90+ (Hopper).
1222
+ "VLLM_BLOCKSCALE_FP8_GEMM_FLASHINFER": lambda: bool(
1223
+ int(os.getenv("VLLM_BLOCKSCALE_FP8_GEMM_FLASHINFER", "0"))
1224
+ ),
1225
+ # Allow use of FlashInfer MoE kernels for fused moe ops.
1226
+ "VLLM_USE_FLASHINFER_MOE_FP16": lambda: bool(
1227
+ int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP16", "0"))
1228
+ ),
1229
+ # Allow use of FlashInfer MoE kernels for fused moe ops.
1230
+ "VLLM_USE_FLASHINFER_MOE_FP8": lambda: bool(
1231
+ int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP8", "0"))
1232
+ ),
1233
+ # Allow use of FlashInfer CUTLASS kernels for fused moe ops.
1234
+ "VLLM_USE_FLASHINFER_MOE_FP4": lambda: bool(
1235
+ int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP4", "0"))
1236
+ ),
1237
+ # If set to 1, use the FlashInfer
1238
+ # MXFP8 (activation) x MXFP4 (weight) MoE backend.
1239
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8": lambda: bool(
1240
+ int(os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8", "0"))
1241
+ ),
1242
+ # If set to 1, use the FlashInfer CUTLASS backend for
1243
+ # MXFP8 (activation) x MXFP4 (weight) MoE.
1244
+ # This is separate from the TRTLLMGEN path controlled by
1245
+ # VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8.
1246
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS": lambda: bool(
1247
+ int(os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS", "0"))
1248
+ ),
1249
+ # If set to 1, use the FlashInfer
1250
+ # BF16 (activation) x MXFP4 (weight) MoE backend.
1251
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_BF16": lambda: bool(
1252
+ int(os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_BF16", "0"))
1253
+ ),
1254
+ # Control the cache sized used by the xgrammar compiler. The default
1255
+ # of 512 MB should be enough for roughly 1000 JSON schemas.
1256
+ # It can be changed with this variable if needed for some reason.
1257
+ "VLLM_XGRAMMAR_CACHE_MB": lambda: int(os.getenv("VLLM_XGRAMMAR_CACHE_MB", "512")),
1258
+ # Control the threshold for msgspec to use 'zero copy' for
1259
+ # serialization/deserialization of tensors. Tensors below
1260
+ # this limit will be encoded into the msgpack buffer, and
1261
+ # tensors above will instead be sent via a separate message.
1262
+ # While the sending side still actually copies the tensor
1263
+ # in all cases, on the receiving side, tensors above this
1264
+ # limit will actually be zero-copy decoded.
1265
+ "VLLM_MSGPACK_ZERO_COPY_THRESHOLD": lambda: int(
1266
+ os.getenv("VLLM_MSGPACK_ZERO_COPY_THRESHOLD", "256")
1267
+ ),
1268
+ # If set, allow insecure serialization using pickle.
1269
+ # This is useful for environments where it is deemed safe to use the
1270
+ # insecure method and it is needed for some reason.
1271
+ "VLLM_ALLOW_INSECURE_SERIALIZATION": lambda: bool(
1272
+ int(os.getenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "0"))
1273
+ ),
1274
+ # IP address used for NIXL handshake between remote agents.
1275
+ "VLLM_NIXL_SIDE_CHANNEL_HOST": lambda: os.getenv(
1276
+ "VLLM_NIXL_SIDE_CHANNEL_HOST", "localhost"
1277
+ ),
1278
+ # Port used for NIXL handshake between remote agents.
1279
+ "VLLM_NIXL_SIDE_CHANNEL_PORT": lambda: int(
1280
+ os.getenv("VLLM_NIXL_SIDE_CHANNEL_PORT", "5600")
1281
+ ),
1282
+ # Port used for Mooncake handshake between remote agents.
1283
+ "VLLM_MOONCAKE_BOOTSTRAP_PORT": lambda: int(
1284
+ os.getenv("VLLM_MOONCAKE_BOOTSTRAP_PORT", "8998")
1285
+ ),
1286
+ # [DEPRECATED - will be removed in v0.15.0] all2all backend for vllm's
1287
+ # expert parallel communication. Use --all2all-backend CLI argument instead.
1288
+ # Available options:
1289
+ # - "naive": naive all2all implementation using broadcasts
1290
+ # - "allgather_reducescatter": all2all implementation based on allgather and
1291
+ # reducescatter
1292
+ # - "pplx": use pplx kernels
1293
+ # - "deepep_high_throughput", use deepep high-throughput kernels
1294
+ # - "deepep_low_latency", use deepep low-latency kernels
1295
+ # - "flashinfer_all2allv", use flashinfer alltoallv kernels for mnnvl
1296
+ "VLLM_ALL2ALL_BACKEND": env_with_choices(
1297
+ "VLLM_ALL2ALL_BACKEND",
1298
+ None,
1299
+ [
1300
+ "naive",
1301
+ "pplx",
1302
+ "deepep_high_throughput",
1303
+ "deepep_low_latency",
1304
+ "allgather_reducescatter",
1305
+ "flashinfer_all2allv",
1306
+ ],
1307
+ ),
1308
+ # Flashinfer MoE backend for vLLM's fused Mixture-of-Experts support.
1309
+ # Both require compute capability 10.0 or above.
1310
+ # Available options:
1311
+ # - "throughput": [default]
1312
+ # Uses CUTLASS kernels optimized for high-throughput batch inference.
1313
+ # - "latency":
1314
+ # Uses TensorRT-LLM kernels optimized for low-latency inference.
1315
+ "VLLM_FLASHINFER_MOE_BACKEND": env_with_choices(
1316
+ "VLLM_FLASHINFER_MOE_BACKEND",
1317
+ "latency",
1318
+ ["throughput", "latency", "masked_gemm"],
1319
+ ),
1320
+ # Control the workspace buffer size for the FlashInfer backend.
1321
+ "VLLM_FLASHINFER_WORKSPACE_BUFFER_SIZE": lambda: int(
1322
+ os.getenv("VLLM_FLASHINFER_WORKSPACE_BUFFER_SIZE", str(394 * 1024 * 1024))
1323
+ ),
1324
+ # Control the maximum number of tokens per expert supported by the
1325
+ # NVFP4 MoE CUTLASS Kernel. This value is used to create a buffer for
1326
+ # the blockscale tensor of activations NVFP4 Quantization.
1327
+ # This is used to prevent the kernel from running out of memory.
1328
+ "VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE": lambda: int(
1329
+ os.getenv("VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE", "163840")
1330
+ ),
1331
+ # Specifies the thresholds of the communicated tensor sizes under which
1332
+ # vllm should use flashinfer fused allreduce. The variable should be a
1333
+ # JSON with the following format:
1334
+ # { <world size>: <max size in mb> }
1335
+ # Unspecified world sizes will fall back to
1336
+ # { 2: 64, 4: 1, <everything else>: 0.5 }
1337
+ "VLLM_FLASHINFER_ALLREDUCE_FUSION_THRESHOLDS_MB": lambda: json.loads(
1338
+ os.getenv("VLLM_FLASHINFER_ALLREDUCE_FUSION_THRESHOLDS_MB", "{}")
1339
+ ),
1340
+ # MoE routing strategy selector.
1341
+ # See `RoutingSimulator.get_available_strategies()` # for available
1342
+ # strategies.
1343
+ # Custom routing strategies can be registered by
1344
+ # RoutingSimulator.register_strategy()
1345
+ # Note: custom strategies may not produce correct model outputs
1346
+ "VLLM_MOE_ROUTING_SIMULATION_STRATEGY": lambda: os.environ.get(
1347
+ "VLLM_MOE_ROUTING_SIMULATION_STRATEGY", ""
1348
+ ).lower(),
1349
+ # Regex timeout for use by the vLLM tool parsing plugins.
1350
+ "VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS": lambda: int(
1351
+ os.getenv("VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS", "1")
1352
+ ),
1353
+ # Reduce CPU usage when vLLM is idle. Enabling this will incur small
1354
+ # latency penalty when a request eventually comes.
1355
+ "VLLM_SLEEP_WHEN_IDLE": lambda: bool(int(os.getenv("VLLM_SLEEP_WHEN_IDLE", "0"))),
1356
+ # Control the max chunk bytes (in MB) for the rpc message queue.
1357
+ # Object larger than this threshold will be broadcast to worker
1358
+ # processes via zmq.
1359
+ "VLLM_MQ_MAX_CHUNK_BYTES_MB": lambda: int(
1360
+ os.getenv("VLLM_MQ_MAX_CHUNK_BYTES_MB", "16")
1361
+ ),
1362
+ # Timeout in seconds for execute_model RPC calls in multiprocessing
1363
+ # executor (only applies when TP > 1).
1364
+ "VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": lambda: int(
1365
+ os.getenv("VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS", "300")
1366
+ ),
1367
+ # KV Cache layout used throughout vllm.
1368
+ # Some common values are:
1369
+ # - NHD
1370
+ # - HND
1371
+ # Where N=num_blocks, H=num_heads and D=head_size. The default value will
1372
+ # leave the layout choice to the backend. Mind that backends may only
1373
+ # implement and support a subset of all possible layouts.
1374
+ "VLLM_KV_CACHE_LAYOUT": env_with_choices(
1375
+ "VLLM_KV_CACHE_LAYOUT", None, ["NHD", "HND"]
1376
+ ),
1377
+ # Enable checking whether the generated logits contain NaNs,
1378
+ # indicating corrupted output. Useful for debugging low level bugs
1379
+ # or bad hardware but it may add compute overhead.
1380
+ "VLLM_COMPUTE_NANS_IN_LOGITS": lambda: bool(
1381
+ int(os.getenv("VLLM_COMPUTE_NANS_IN_LOGITS", "0"))
1382
+ ),
1383
+ # Controls whether or not emulations are used for NVFP4
1384
+ # generations on machines < 100 for compressed-tensors
1385
+ # models
1386
+ "VLLM_USE_NVFP4_CT_EMULATIONS": lambda: bool(
1387
+ int(os.getenv("VLLM_USE_NVFP4_CT_EMULATIONS", "0"))
1388
+ ),
1389
+ # Time (in seconds) after which the KV cache on the producer side is
1390
+ # automatically cleared if no READ notification is received from the
1391
+ # consumer. This is only applicable when using NixlConnector in a
1392
+ # disaggregated decode-prefill setup.
1393
+ "VLLM_NIXL_ABORT_REQUEST_TIMEOUT": lambda: int(
1394
+ os.getenv("VLLM_NIXL_ABORT_REQUEST_TIMEOUT", "480")
1395
+ ),
1396
+ # Controls the read mode for the Mori-IO connector
1397
+ "VLLM_MORIIO_CONNECTOR_READ_MODE": lambda: (
1398
+ os.getenv("VLLM_MORIIO_CONNECTOR_READ_MODE", "False").lower() in ("true", "1")
1399
+ ),
1400
+ # Controls the QP (Queue Pair) per transfer configuration for the Mori-IO connector
1401
+ "VLLM_MORIIO_QP_PER_TRANSFER": lambda: int(
1402
+ os.getenv("VLLM_MORIIO_QP_PER_TRANSFER", "1")
1403
+ ),
1404
+ # Controls the post-processing batch size for the Mori-IO connector
1405
+ "VLLM_MORIIO_POST_BATCH_SIZE": lambda: int(
1406
+ os.getenv("VLLM_MORIIO_POST_BATCH_SIZE", "-1")
1407
+ ),
1408
+ # Controls the number of workers for Mori operations for the Mori-IO connector
1409
+ "VLLM_MORIIO_NUM_WORKERS": lambda: int(os.getenv("VLLM_MORIIO_NUM_WORKERS", "1")),
1410
+ # Timeout (in seconds) for MooncakeConnector in PD disaggregated setup.
1411
+ "VLLM_MOONCAKE_ABORT_REQUEST_TIMEOUT": lambda: int(
1412
+ os.getenv("VLLM_MOONCAKE_ABORT_REQUEST_TIMEOUT", "480")
1413
+ ),
1414
+ # Controls whether or not to use cudnn prefill
1415
+ "VLLM_USE_CUDNN_PREFILL": lambda: bool(
1416
+ int(os.getenv("VLLM_USE_CUDNN_PREFILL", "0"))
1417
+ ),
1418
+ # Controls whether to use TRT-LLM ragged DeepSeek prefill
1419
+ "VLLM_USE_TRTLLM_RAGGED_DEEPSEEK_PREFILL": lambda: bool(
1420
+ int(os.getenv("VLLM_USE_TRTLLM_RAGGED_DEEPSEEK_PREFILL", "0"))
1421
+ ),
1422
+ # If set to 1/True, use the TRTLLM attention backend in flashinfer.
1423
+ # If set to 0/False, use the default attention backend in flashinfer.
1424
+ # If not set, auto-detect the attention backend in flashinfer.
1425
+ "VLLM_USE_TRTLLM_ATTENTION": lambda: (
1426
+ None
1427
+ if "VLLM_USE_TRTLLM_ATTENTION" not in os.environ
1428
+ else os.environ["VLLM_USE_TRTLLM_ATTENTION"].lower() in ("1", "true")
1429
+ ),
1430
+ # If set to 1, when we use fp8 kv, we do not quantize Q to fp8
1431
+ "VLLM_FLASHINFER_DISABLE_Q_QUANTIZATION": lambda: bool(
1432
+ int(os.getenv("VLLM_FLASHINFER_DISABLE_Q_QUANTIZATION", "0"))
1433
+ ),
1434
+ # If set, it means we pre-downloaded cubin files and flashinfer will
1435
+ # read the cubin files directly.
1436
+ "VLLM_HAS_FLASHINFER_CUBIN": lambda: bool(
1437
+ int(os.getenv("VLLM_HAS_FLASHINFER_CUBIN", "0"))
1438
+ ),
1439
+ # Supported options:
1440
+ # - "flashinfer-cudnn": use flashinfer cudnn GEMM backend
1441
+ # - "flashinfer-trtllm": use flashinfer trtllm GEMM backend
1442
+ # - "flashinfer-cutlass": use flashinfer cutlass GEMM backend
1443
+ # - <none>: automatically pick an available backend
1444
+ "VLLM_NVFP4_GEMM_BACKEND": env_with_choices(
1445
+ "VLLM_NVFP4_GEMM_BACKEND",
1446
+ None,
1447
+ ["flashinfer-cudnn", "flashinfer-trtllm", "flashinfer-cutlass", "cutlass"],
1448
+ ),
1449
+ # Controls garbage collection during CUDA graph capture.
1450
+ # If set to 0 (default), enables GC freezing to speed up capture time.
1451
+ # If set to 1, allows GC to run during capture.
1452
+ "VLLM_ENABLE_CUDAGRAPH_GC": lambda: bool(
1453
+ int(os.getenv("VLLM_ENABLE_CUDAGRAPH_GC", "0"))
1454
+ ),
1455
+ # Used to force set up loopback IP
1456
+ "VLLM_LOOPBACK_IP": lambda: os.getenv("VLLM_LOOPBACK_IP", ""),
1457
+ # Used to set the process name prefix for vLLM processes.
1458
+ # This is useful for debugging and monitoring purposes.
1459
+ # The default value is "VLLM".
1460
+ "VLLM_PROCESS_NAME_PREFIX": lambda: os.getenv("VLLM_PROCESS_NAME_PREFIX", "VLLM"),
1461
+ # Allow chunked local attention with hybrid kv cache manager.
1462
+ # Currently using the Hybrid KV cache manager with chunked local attention
1463
+ # in the Llama4 models (the only models currently using chunked local attn)
1464
+ # causes a latency regression. For this reason, we disable it by default.
1465
+ # This flag is used to allow users to enable it if they want to (to save on
1466
+ # kv-cache memory usage and enable longer contexts)
1467
+ # TODO(lucas): Remove this flag once latency regression is resolved.
1468
+ "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE": lambda: bool(
1469
+ int(os.getenv("VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE", "1"))
1470
+ ),
1471
+ # Enables support for the "store" option in the OpenAI Responses API.
1472
+ # When set to 1, vLLM's OpenAI server will retain the input and output
1473
+ # messages for those requests in memory. By default, this is disabled (0),
1474
+ # and the "store" option is ignored.
1475
+ # NOTE/WARNING:
1476
+ # 1. Messages are kept in memory only (not persisted to disk) and will be
1477
+ # lost when the vLLM server shuts down.
1478
+ # 2. Enabling this option will cause a memory leak, as stored messages are
1479
+ # never removed from memory until the server terminates.
1480
+ "VLLM_ENABLE_RESPONSES_API_STORE": lambda: bool(
1481
+ int(os.getenv("VLLM_ENABLE_RESPONSES_API_STORE", "0"))
1482
+ ),
1483
+ # If set, use the fp8 mfma in rocm paged attention.
1484
+ "VLLM_ROCM_FP8_MFMA_PAGE_ATTN": lambda: bool(
1485
+ int(os.getenv("VLLM_ROCM_FP8_MFMA_PAGE_ATTN", "0"))
1486
+ ),
1487
+ # Whether to use pytorch symmetric memory for allreduce
1488
+ "VLLM_ALLREDUCE_USE_SYMM_MEM": lambda: bool(
1489
+ int(os.getenv("VLLM_ALLREDUCE_USE_SYMM_MEM", "1"))
1490
+ ),
1491
+ # Experimental: use this to enable MCP tool calling for non harmony models
1492
+ "VLLM_USE_EXPERIMENTAL_PARSER_CONTEXT": lambda: bool(
1493
+ int(os.getenv("VLLM_USE_EXPERIMENTAL_PARSER_CONTEXT", "0"))
1494
+ ),
1495
+ # Allows vllm to find tuned config under customized folder
1496
+ "VLLM_TUNED_CONFIG_FOLDER": lambda: os.getenv("VLLM_TUNED_CONFIG_FOLDER", None),
1497
+ # Valid values are container,code_interpreter,web_search_preview
1498
+ # ex VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS=container,code_interpreter
1499
+ # If the server_label of your mcp tool is not in this list it will
1500
+ # be completely ignored.
1501
+ "VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS": env_set_with_choices(
1502
+ "VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS",
1503
+ default=[],
1504
+ choices=["container", "code_interpreter", "web_search_preview"],
1505
+ ),
1506
+ # Allows harmony instructions to be injected on system messages
1507
+ "VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS": lambda: bool(
1508
+ int(os.getenv("VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS", "0"))
1509
+ ),
1510
+ # Enable automatic retry when tool call JSON parsing fails
1511
+ # If enabled, returns an error message to the model to retry
1512
+ # If disabled (default), raises an exception and fails the request
1513
+ "VLLM_TOOL_JSON_ERROR_AUTOMATIC_RETRY": lambda: bool(
1514
+ int(os.getenv("VLLM_TOOL_JSON_ERROR_AUTOMATIC_RETRY", "0"))
1515
+ ),
1516
+ # Add optional custom scopes for profiling, disable to avoid overheads
1517
+ "VLLM_CUSTOM_SCOPES_FOR_PROFILING": lambda: bool(
1518
+ int(os.getenv("VLLM_CUSTOM_SCOPES_FOR_PROFILING", "0"))
1519
+ ),
1520
+ # Add optional nvtx scopes for profiling, disable to avoid overheads
1521
+ "VLLM_NVTX_SCOPES_FOR_PROFILING": lambda: bool(
1522
+ int(os.getenv("VLLM_NVTX_SCOPES_FOR_PROFILING", "0"))
1523
+ ),
1524
+ # Represent block hashes in KV cache events as 64-bit integers instead of
1525
+ # raw bytes. Defaults to True for backward compatibility.
1526
+ "VLLM_KV_EVENTS_USE_INT_BLOCK_HASHES": lambda: bool(
1527
+ int(os.getenv("VLLM_KV_EVENTS_USE_INT_BLOCK_HASHES", "1"))
1528
+ ),
1529
+ # Name of the shared memory buffer used for object storage.
1530
+ # Only effective when mm_config.mm_processor_cache_type == "shm".
1531
+ "VLLM_OBJECT_STORAGE_SHM_BUFFER_NAME": lambda: os.getenv(
1532
+ "VLLM_OBJECT_STORAGE_SHM_BUFFER_NAME", "VLLM_OBJECT_STORAGE_SHM_BUFFER"
1533
+ ),
1534
+ # The size in MB of the buffers (NVL and RDMA) used by DeepEP
1535
+ "VLLM_DEEPEP_BUFFER_SIZE_MB": lambda: int(
1536
+ os.getenv("VLLM_DEEPEP_BUFFER_SIZE_MB", "1024")
1537
+ ),
1538
+ # Force DeepEP to use intranode kernel for inter-node communication in
1539
+ # high throughput mode. This is useful archive higher prefill throuhgput
1540
+ # on system supports multi-node nvlink (e.g GB200).
1541
+ "VLLM_DEEPEP_HIGH_THROUGHPUT_FORCE_INTRA_NODE": lambda: bool(
1542
+ int(os.getenv("VLLM_DEEPEP_HIGH_THROUGHPUT_FORCE_INTRA_NODE", "0"))
1543
+ ),
1544
+ # Allow DeepEP to use MNNVL (multi-node nvlink) for internode_ll kernel,
1545
+ # turn this for better latency on GB200 like system
1546
+ "VLLM_DEEPEP_LOW_LATENCY_USE_MNNVL": lambda: bool(
1547
+ int(os.getenv("VLLM_DEEPEP_LOW_LATENCY_USE_MNNVL", "0"))
1548
+ ),
1549
+ # The number of SMs to allocate for communication kernels when running DBO
1550
+ # the rest of the SMs on the device will be allocated to compute
1551
+ "VLLM_DBO_COMM_SMS": lambda: int(os.getenv("VLLM_DBO_COMM_SMS", "20")),
1552
+ # Enable max_autotune & coordinate_descent_tuning in inductor_config
1553
+ # to compile static shapes passed from compile_sizes in compilation_config
1554
+ # If set to 1, enable max_autotune; By default, this is enabled (1)
1555
+ "VLLM_ENABLE_INDUCTOR_MAX_AUTOTUNE": lambda: bool(
1556
+ int(os.getenv("VLLM_ENABLE_INDUCTOR_MAX_AUTOTUNE", "1"))
1557
+ ),
1558
+ # If set to 1, enable coordinate_descent_tuning;
1559
+ # By default, this is enabled (1)
1560
+ "VLLM_ENABLE_INDUCTOR_COORDINATE_DESCENT_TUNING": lambda: bool(
1561
+ int(os.getenv("VLLM_ENABLE_INDUCTOR_COORDINATE_DESCENT_TUNING", "1"))
1562
+ ),
1563
+ # Flag to enable NCCL symmetric memory allocation and registration
1564
+ "VLLM_USE_NCCL_SYMM_MEM": lambda: bool(
1565
+ int(os.getenv("VLLM_USE_NCCL_SYMM_MEM", "0"))
1566
+ ),
1567
+ # NCCL header path
1568
+ "VLLM_NCCL_INCLUDE_PATH": lambda: os.environ.get("VLLM_NCCL_INCLUDE_PATH", None),
1569
+ # Flag to enable FBGemm kernels on model execution
1570
+ "VLLM_USE_FBGEMM": lambda: bool(int(os.getenv("VLLM_USE_FBGEMM", "0"))),
1571
+ # GC debug config
1572
+ # - VLLM_GC_DEBUG=0: disable GC debugger
1573
+ # - VLLM_GC_DEBUG=1: enable GC debugger with gc.collect elpased times
1574
+ # - VLLM_GC_DEBUG='{"top_objects":5}': enable GC debugger with
1575
+ # top 5 collected objects
1576
+ "VLLM_GC_DEBUG": lambda: os.getenv("VLLM_GC_DEBUG", ""),
1577
+ # Debug workspace allocations.
1578
+ # logging of workspace resize operations.
1579
+ "VLLM_DEBUG_WORKSPACE": lambda: bool(int(os.getenv("VLLM_DEBUG_WORKSPACE", "0"))),
1580
+ # Disables parallel execution of shared_experts via separate cuda stream
1581
+ "VLLM_DISABLE_SHARED_EXPERTS_STREAM": lambda: bool(
1582
+ int(os.getenv("VLLM_DISABLE_SHARED_EXPERTS_STREAM", "0"))
1583
+ ),
1584
+ # Limits when we run shared_experts in a separate stream.
1585
+ # We found out that for large batch sizes, the separate stream
1586
+ # execution is not beneficial (most likely because of the input clone)
1587
+ # TODO(alexm-redhat): Tune to be more dynamic based on GPU type
1588
+ "VLLM_SHARED_EXPERTS_STREAM_TOKEN_THRESHOLD": lambda: int(
1589
+ int(os.getenv("VLLM_SHARED_EXPERTS_STREAM_TOKEN_THRESHOLD", 256))
1590
+ ),
1591
+ # Format for saving torch.compile cache artifacts
1592
+ # - "binary": saves as binary file
1593
+ # Safe for multiple vllm serve processes accessing the same torch compile cache.
1594
+ # - "unpacked": saves as directory structure (for inspection/debugging)
1595
+ # NOT multiprocess safe - race conditions may occur with multiple processes.
1596
+ # Allows viewing and setting breakpoints in Inductor's code output files.
1597
+ "VLLM_COMPILE_CACHE_SAVE_FORMAT": env_with_choices(
1598
+ "VLLM_COMPILE_CACHE_SAVE_FORMAT", "binary", ["binary", "unpacked"]
1599
+ ),
1600
+ # Flag to enable v2 model runner.
1601
+ "VLLM_USE_V2_MODEL_RUNNER": lambda: bool(
1602
+ int(os.getenv("VLLM_USE_V2_MODEL_RUNNER", "0"))
1603
+ ),
1604
+ # Log model inspection after loading.
1605
+ # If enabled, logs a transformers-style hierarchical view of the model
1606
+ # with quantization methods and attention backends.
1607
+ "VLLM_LOG_MODEL_INSPECTION": lambda: bool(
1608
+ int(os.getenv("VLLM_LOG_MODEL_INSPECTION", "0"))
1609
+ ),
1610
+ # Debug logging for --enable-mfu-metrics
1611
+ "VLLM_DEBUG_MFU_METRICS": lambda: bool(
1612
+ int(os.getenv("VLLM_DEBUG_MFU_METRICS", "0"))
1613
+ ),
1614
+ }
1615
+
1616
+ # --8<-- [end:env-vars-definition]
1617
+
1618
+
1619
+ def __getattr__(name: str):
1620
+ """
1621
+ Gets environment variables lazily.
1622
+
1623
+ NOTE: After enable_envs_cache() invocation (which triggered after service
1624
+ initialization), all environment variables will be cached.
1625
+ """
1626
+ if name in environment_variables:
1627
+ return environment_variables[name]()
1628
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1629
+
1630
+
1631
+ def _is_envs_cache_enabled() -> bool:
1632
+ """Checked if __getattr__ is wrapped with functools.cache"""
1633
+ global __getattr__
1634
+ return hasattr(__getattr__, "cache_clear")
1635
+
1636
+
1637
+ def enable_envs_cache() -> None:
1638
+ """
1639
+ Enables caching of environment variables. This is useful for performance
1640
+ reasons, as it avoids the need to re-evaluate environment variables on
1641
+ every call.
1642
+
1643
+ NOTE: Currently, it's invoked after service initialization to reduce
1644
+ runtime overhead. This also means that environment variables should NOT
1645
+ be updated after the service is initialized.
1646
+ """
1647
+ if _is_envs_cache_enabled():
1648
+ # Avoid wrapping functools.cache multiple times
1649
+ return
1650
+ # Tag __getattr__ with functools.cache
1651
+ global __getattr__
1652
+ __getattr__ = functools.cache(__getattr__)
1653
+
1654
+ # Cache all environment variables
1655
+ for key in environment_variables:
1656
+ __getattr__(key)
1657
+
1658
+
1659
+ def disable_envs_cache() -> None:
1660
+ """
1661
+ Resets the environment variables cache. It could be used to isolate environments
1662
+ between unit tests.
1663
+ """
1664
+ global __getattr__
1665
+ # If __getattr__ is wrapped by functions.cache, unwrap the caching layer.
1666
+ if _is_envs_cache_enabled():
1667
+ __getattr__ = __getattr__.__wrapped__
1668
+
1669
+
1670
+ def __dir__():
1671
+ return list(environment_variables.keys())
1672
+
1673
+
1674
+ def is_set(name: str):
1675
+ """Check if an environment variable is explicitly set."""
1676
+ if name in environment_variables:
1677
+ return name in os.environ
1678
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1679
+
1680
+
1681
+ def compile_factors() -> dict[str, object]:
1682
+ """Return env vars used for torch.compile cache keys.
1683
+
1684
+ Start with every known vLLM env var; drop entries in `ignored_factors`;
1685
+ hash everything else. This keeps the cache key aligned across workers."""
1686
+
1687
+ ignored_factors: set[str] = {
1688
+ "MAX_JOBS",
1689
+ "VLLM_RPC_BASE_PATH",
1690
+ "VLLM_USE_MODELSCOPE",
1691
+ "VLLM_RINGBUFFER_WARNING_INTERVAL",
1692
+ "VLLM_DEBUG_DUMP_PATH",
1693
+ "VLLM_PORT",
1694
+ "VLLM_CACHE_ROOT",
1695
+ "LD_LIBRARY_PATH",
1696
+ "VLLM_SERVER_DEV_MODE",
1697
+ "VLLM_DP_MASTER_IP",
1698
+ "VLLM_DP_MASTER_PORT",
1699
+ "VLLM_RANDOMIZE_DP_DUMMY_INPUTS",
1700
+ "VLLM_CI_USE_S3",
1701
+ "VLLM_MODEL_REDIRECT_PATH",
1702
+ "VLLM_HOST_IP",
1703
+ "VLLM_FORCE_AOT_LOAD",
1704
+ "S3_ACCESS_KEY_ID",
1705
+ "S3_SECRET_ACCESS_KEY",
1706
+ "S3_ENDPOINT_URL",
1707
+ "VLLM_USAGE_STATS_SERVER",
1708
+ "VLLM_NO_USAGE_STATS",
1709
+ "VLLM_DO_NOT_TRACK",
1710
+ "VLLM_LOGGING_LEVEL",
1711
+ "VLLM_LOGGING_PREFIX",
1712
+ "VLLM_LOGGING_STREAM",
1713
+ "VLLM_LOGGING_CONFIG_PATH",
1714
+ "VLLM_LOGGING_COLOR",
1715
+ "VLLM_LOG_STATS_INTERVAL",
1716
+ "VLLM_DEBUG_LOG_API_SERVER_RESPONSE",
1717
+ "VLLM_TUNED_CONFIG_FOLDER",
1718
+ "VLLM_ENGINE_ITERATION_TIMEOUT_S",
1719
+ "VLLM_HTTP_TIMEOUT_KEEP_ALIVE",
1720
+ "VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS",
1721
+ "VLLM_KEEP_ALIVE_ON_ENGINE_DEATH",
1722
+ "VLLM_SLEEP_WHEN_IDLE",
1723
+ "VLLM_IMAGE_FETCH_TIMEOUT",
1724
+ "VLLM_VIDEO_FETCH_TIMEOUT",
1725
+ "VLLM_AUDIO_FETCH_TIMEOUT",
1726
+ "VLLM_MEDIA_URL_ALLOW_REDIRECTS",
1727
+ "VLLM_MEDIA_LOADING_THREAD_COUNT",
1728
+ "VLLM_MAX_AUDIO_CLIP_FILESIZE_MB",
1729
+ "VLLM_VIDEO_LOADER_BACKEND",
1730
+ "VLLM_MEDIA_CONNECTOR",
1731
+ "VLLM_ASSETS_CACHE",
1732
+ "VLLM_ASSETS_CACHE_MODEL_CLEAN",
1733
+ "VLLM_WORKER_MULTIPROC_METHOD",
1734
+ "VLLM_ENABLE_V1_MULTIPROCESSING",
1735
+ "VLLM_V1_OUTPUT_PROC_CHUNK_SIZE",
1736
+ "VLLM_CPU_KVCACHE_SPACE",
1737
+ "VLLM_CPU_OMP_THREADS_BIND",
1738
+ "VLLM_CPU_NUM_OF_RESERVED_CPU",
1739
+ "VLLM_CPU_MOE_PREPACK",
1740
+ "VLLM_CPU_SGL_KERNEL",
1741
+ "VLLM_TEST_FORCE_LOAD_FORMAT",
1742
+ "LOCAL_RANK",
1743
+ "CUDA_VISIBLE_DEVICES",
1744
+ "NO_COLOR",
1745
+ }
1746
+
1747
+ from vllm.config.utils import normalize_value
1748
+
1749
+ factors: dict[str, object] = {}
1750
+ for factor, getter in environment_variables.items():
1751
+ if factor in ignored_factors:
1752
+ continue
1753
+
1754
+ try:
1755
+ raw = getter()
1756
+ except Exception as exc: # pragma: no cover - defensive logging
1757
+ logger.warning(
1758
+ "Skipping environment variable %s while hashing compile factors: %s",
1759
+ factor,
1760
+ exc,
1761
+ )
1762
+ continue
1763
+
1764
+ factors[factor] = normalize_value(raw)
1765
+
1766
+ ray_noset_env_vars = [
1767
+ # Refer to
1768
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/nvidia_gpu.py#L11
1769
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/amd_gpu.py#L11
1770
+ # https://github.com/ray-project/ray/blob/b97d21dab233c2bd8ed7db749a82a1e594222b5c/python/ray/_private/accelerators/amd_gpu.py#L10
1771
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/npu.py#L12
1772
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/hpu.py#L12
1773
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/neuron.py#L14
1774
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/tpu.py#L38
1775
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/intel_gpu.py#L10
1776
+ # https://github.com/ray-project/ray/blob/c584b1ea97b00793d1def71eaf81537d70efba42/python/ray/_private/accelerators/rbln.py#L10
1777
+ "RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES",
1778
+ "RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES",
1779
+ "RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES",
1780
+ "RAY_EXPERIMENTAL_NOSET_ASCEND_RT_VISIBLE_DEVICES",
1781
+ "RAY_EXPERIMENTAL_NOSET_HABANA_VISIBLE_MODULES",
1782
+ "RAY_EXPERIMENTAL_NOSET_NEURON_RT_VISIBLE_CORES",
1783
+ "RAY_EXPERIMENTAL_NOSET_TPU_VISIBLE_CHIPS",
1784
+ "RAY_EXPERIMENTAL_NOSET_ONEAPI_DEVICE_SELECTOR",
1785
+ "RAY_EXPERIMENTAL_NOSET_RBLN_RT_VISIBLE_DEVICES",
1786
+ ]
1787
+
1788
+ for var in ray_noset_env_vars:
1789
+ factors[var] = normalize_value(os.getenv(var))
1790
+
1791
+ return factors