vllm-cpu-amxbf16 0.11.2.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1536) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +983 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +2863 -0
  6. vllm/_ipex_ops.py +457 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +59 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +18 -0
  14. vllm/attention/backends/__init__.py +0 -0
  15. vllm/attention/backends/abstract.py +391 -0
  16. vllm/attention/backends/registry.py +195 -0
  17. vllm/attention/backends/utils.py +33 -0
  18. vllm/attention/layer.py +1052 -0
  19. vllm/attention/layers/__init__.py +0 -0
  20. vllm/attention/layers/chunked_local_attention.py +121 -0
  21. vllm/attention/layers/cross_attention.py +178 -0
  22. vllm/attention/layers/encoder_only_attention.py +103 -0
  23. vllm/attention/ops/__init__.py +0 -0
  24. vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
  25. vllm/attention/ops/common.py +414 -0
  26. vllm/attention/ops/flashmla.py +251 -0
  27. vllm/attention/ops/merge_attn_states.py +47 -0
  28. vllm/attention/ops/paged_attn.py +262 -0
  29. vllm/attention/ops/pallas_kv_cache_update.py +130 -0
  30. vllm/attention/ops/prefix_prefill.py +814 -0
  31. vllm/attention/ops/rocm_aiter_paged_attn.py +123 -0
  32. vllm/attention/ops/triton_decode_attention.py +712 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +105 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
  35. vllm/attention/ops/triton_unified_attention.py +941 -0
  36. vllm/attention/ops/vit_attn_wrappers.py +178 -0
  37. vllm/attention/selector.py +231 -0
  38. vllm/attention/utils/__init__.py +0 -0
  39. vllm/attention/utils/fa_utils.py +109 -0
  40. vllm/attention/utils/kv_sharing_utils.py +33 -0
  41. vllm/attention/utils/kv_transfer_utils.py +60 -0
  42. vllm/beam_search.py +88 -0
  43. vllm/benchmarks/__init__.py +0 -0
  44. vllm/benchmarks/datasets.py +3222 -0
  45. vllm/benchmarks/latency.py +172 -0
  46. vllm/benchmarks/lib/__init__.py +3 -0
  47. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  48. vllm/benchmarks/lib/ready_checker.py +72 -0
  49. vllm/benchmarks/lib/utils.py +79 -0
  50. vllm/benchmarks/serve.py +1531 -0
  51. vllm/benchmarks/sweep/__init__.py +0 -0
  52. vllm/benchmarks/sweep/cli.py +38 -0
  53. vllm/benchmarks/sweep/param_sweep.py +91 -0
  54. vllm/benchmarks/sweep/plot.py +580 -0
  55. vllm/benchmarks/sweep/serve.py +416 -0
  56. vllm/benchmarks/sweep/serve_sla.py +492 -0
  57. vllm/benchmarks/sweep/server.py +114 -0
  58. vllm/benchmarks/sweep/sla_sweep.py +132 -0
  59. vllm/benchmarks/sweep/utils.py +4 -0
  60. vllm/benchmarks/throughput.py +799 -0
  61. vllm/collect_env.py +857 -0
  62. vllm/compilation/__init__.py +0 -0
  63. vllm/compilation/activation_quant_fusion.py +209 -0
  64. vllm/compilation/backends.py +759 -0
  65. vllm/compilation/base_static_graph.py +57 -0
  66. vllm/compilation/caching.py +178 -0
  67. vllm/compilation/collective_fusion.py +1234 -0
  68. vllm/compilation/compiler_interface.py +639 -0
  69. vllm/compilation/counter.py +48 -0
  70. vllm/compilation/cuda_graph.py +208 -0
  71. vllm/compilation/decorators.py +571 -0
  72. vllm/compilation/fix_functionalization.py +253 -0
  73. vllm/compilation/fusion.py +374 -0
  74. vllm/compilation/fusion_attn.py +359 -0
  75. vllm/compilation/fx_utils.py +91 -0
  76. vllm/compilation/inductor_pass.py +133 -0
  77. vllm/compilation/matcher_utils.py +317 -0
  78. vllm/compilation/monitor.py +62 -0
  79. vllm/compilation/noop_elimination.py +134 -0
  80. vllm/compilation/partition_rules.py +72 -0
  81. vllm/compilation/pass_manager.py +135 -0
  82. vllm/compilation/piecewise_backend.py +121 -0
  83. vllm/compilation/post_cleanup.py +21 -0
  84. vllm/compilation/qk_norm_rope_fusion.py +238 -0
  85. vllm/compilation/sequence_parallelism.py +363 -0
  86. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  87. vllm/compilation/vllm_inductor_pass.py +173 -0
  88. vllm/compilation/wrapper.py +238 -0
  89. vllm/config/__init__.py +102 -0
  90. vllm/config/cache.py +207 -0
  91. vllm/config/compilation.py +975 -0
  92. vllm/config/device.py +75 -0
  93. vllm/config/ec_transfer.py +110 -0
  94. vllm/config/kv_events.py +56 -0
  95. vllm/config/kv_transfer.py +114 -0
  96. vllm/config/load.py +124 -0
  97. vllm/config/lora.py +112 -0
  98. vllm/config/model.py +2162 -0
  99. vllm/config/multimodal.py +248 -0
  100. vllm/config/observability.py +123 -0
  101. vllm/config/parallel.py +655 -0
  102. vllm/config/pooler.py +122 -0
  103. vllm/config/scheduler.py +298 -0
  104. vllm/config/speculative.py +654 -0
  105. vllm/config/speech_to_text.py +38 -0
  106. vllm/config/structured_outputs.py +92 -0
  107. vllm/config/utils.py +178 -0
  108. vllm/config/vllm.py +1166 -0
  109. vllm/connections.py +189 -0
  110. vllm/device_allocator/__init__.py +0 -0
  111. vllm/device_allocator/cumem.py +327 -0
  112. vllm/distributed/__init__.py +6 -0
  113. vllm/distributed/communication_op.py +43 -0
  114. vllm/distributed/device_communicators/__init__.py +0 -0
  115. vllm/distributed/device_communicators/all2all.py +490 -0
  116. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  117. vllm/distributed/device_communicators/base_device_communicator.py +297 -0
  118. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  119. vllm/distributed/device_communicators/cuda_communicator.py +340 -0
  120. vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
  121. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  122. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  123. vllm/distributed/device_communicators/pynccl.py +386 -0
  124. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  125. vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
  126. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  127. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  128. vllm/distributed/device_communicators/shm_broadcast.py +733 -0
  129. vllm/distributed/device_communicators/shm_object_storage.py +660 -0
  130. vllm/distributed/device_communicators/symm_mem.py +156 -0
  131. vllm/distributed/device_communicators/tpu_communicator.py +107 -0
  132. vllm/distributed/device_communicators/xpu_communicator.py +95 -0
  133. vllm/distributed/ec_transfer/__init__.py +14 -0
  134. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  135. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  136. vllm/distributed/ec_transfer/ec_connector/factory.py +88 -0
  137. vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
  138. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  139. vllm/distributed/eplb/__init__.py +8 -0
  140. vllm/distributed/eplb/eplb_state.py +837 -0
  141. vllm/distributed/eplb/rebalance_algo.py +260 -0
  142. vllm/distributed/eplb/rebalance_execute.py +431 -0
  143. vllm/distributed/kv_events.py +371 -0
  144. vllm/distributed/kv_transfer/README.md +29 -0
  145. vllm/distributed/kv_transfer/__init__.py +20 -0
  146. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  147. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  149. vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
  150. vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/base.py +546 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +379 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
  159. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +867 -0
  160. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
  161. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
  162. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2440 -0
  163. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +504 -0
  164. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  165. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  166. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  167. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  168. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
  169. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  170. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
  171. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
  172. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
  173. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  174. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  175. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
  176. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
  177. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  178. vllm/distributed/parallel_state.py +1759 -0
  179. vllm/distributed/tpu_distributed_utils.py +188 -0
  180. vllm/distributed/utils.py +543 -0
  181. vllm/engine/__init__.py +0 -0
  182. vllm/engine/arg_utils.py +2144 -0
  183. vllm/engine/async_llm_engine.py +6 -0
  184. vllm/engine/llm_engine.py +6 -0
  185. vllm/engine/protocol.py +170 -0
  186. vllm/entrypoints/__init__.py +0 -0
  187. vllm/entrypoints/anthropic/__init__.py +0 -0
  188. vllm/entrypoints/anthropic/protocol.py +162 -0
  189. vllm/entrypoints/anthropic/serving_messages.py +460 -0
  190. vllm/entrypoints/api_server.py +184 -0
  191. vllm/entrypoints/chat_utils.py +1690 -0
  192. vllm/entrypoints/cli/__init__.py +13 -0
  193. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  194. vllm/entrypoints/cli/benchmark/base.py +25 -0
  195. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  196. vllm/entrypoints/cli/benchmark/main.py +56 -0
  197. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  198. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  199. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  200. vllm/entrypoints/cli/collect_env.py +38 -0
  201. vllm/entrypoints/cli/main.py +79 -0
  202. vllm/entrypoints/cli/openai.py +256 -0
  203. vllm/entrypoints/cli/run_batch.py +68 -0
  204. vllm/entrypoints/cli/serve.py +249 -0
  205. vllm/entrypoints/cli/types.py +29 -0
  206. vllm/entrypoints/constants.py +10 -0
  207. vllm/entrypoints/context.py +572 -0
  208. vllm/entrypoints/dynamic_lora.py +57 -0
  209. vllm/entrypoints/harmony_utils.py +535 -0
  210. vllm/entrypoints/launcher.py +175 -0
  211. vllm/entrypoints/llm.py +1768 -0
  212. vllm/entrypoints/logger.py +84 -0
  213. vllm/entrypoints/openai/__init__.py +0 -0
  214. vllm/entrypoints/openai/api_server.py +2096 -0
  215. vllm/entrypoints/openai/cli_args.py +302 -0
  216. vllm/entrypoints/openai/orca_metrics.py +120 -0
  217. vllm/entrypoints/openai/protocol.py +3299 -0
  218. vllm/entrypoints/openai/run_batch.py +547 -0
  219. vllm/entrypoints/openai/serving_chat.py +1772 -0
  220. vllm/entrypoints/openai/serving_classification.py +235 -0
  221. vllm/entrypoints/openai/serving_completion.py +715 -0
  222. vllm/entrypoints/openai/serving_embedding.py +695 -0
  223. vllm/entrypoints/openai/serving_engine.py +1433 -0
  224. vllm/entrypoints/openai/serving_models.py +304 -0
  225. vllm/entrypoints/openai/serving_pooling.py +346 -0
  226. vllm/entrypoints/openai/serving_responses.py +2021 -0
  227. vllm/entrypoints/openai/serving_score.py +503 -0
  228. vllm/entrypoints/openai/serving_tokenization.py +203 -0
  229. vllm/entrypoints/openai/serving_tokens.py +269 -0
  230. vllm/entrypoints/openai/serving_transcription.py +148 -0
  231. vllm/entrypoints/openai/speech_to_text.py +405 -0
  232. vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
  233. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
  234. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
  235. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
  236. vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
  237. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
  238. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  239. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
  240. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
  241. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  242. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
  243. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +323 -0
  244. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
  245. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  246. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +290 -0
  247. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
  248. vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
  249. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
  250. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
  251. vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
  252. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
  253. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
  254. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
  255. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
  256. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  257. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
  258. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
  259. vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
  260. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
  261. vllm/entrypoints/renderer.py +409 -0
  262. vllm/entrypoints/responses_utils.py +77 -0
  263. vllm/entrypoints/sagemaker/__init__.py +4 -0
  264. vllm/entrypoints/sagemaker/routes.py +72 -0
  265. vllm/entrypoints/score_utils.py +242 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +143 -0
  268. vllm/entrypoints/tool_server.py +209 -0
  269. vllm/entrypoints/utils.py +319 -0
  270. vllm/env_override.py +378 -0
  271. vllm/envs.py +1659 -0
  272. vllm/forward_context.py +356 -0
  273. vllm/inputs/__init__.py +44 -0
  274. vllm/inputs/data.py +359 -0
  275. vllm/inputs/parse.py +137 -0
  276. vllm/inputs/preprocess.py +727 -0
  277. vllm/logger.py +267 -0
  278. vllm/logging_utils/__init__.py +10 -0
  279. vllm/logging_utils/dump_input.py +83 -0
  280. vllm/logging_utils/formatter.py +77 -0
  281. vllm/logging_utils/log_time.py +34 -0
  282. vllm/logits_process.py +121 -0
  283. vllm/logprobs.py +208 -0
  284. vllm/lora/__init__.py +0 -0
  285. vllm/lora/layers/__init__.py +41 -0
  286. vllm/lora/layers/base.py +67 -0
  287. vllm/lora/layers/base_linear.py +164 -0
  288. vllm/lora/layers/column_parallel_linear.py +578 -0
  289. vllm/lora/layers/fused_moe.py +472 -0
  290. vllm/lora/layers/logits_processor.py +252 -0
  291. vllm/lora/layers/replicated_linear.py +70 -0
  292. vllm/lora/layers/row_parallel_linear.py +181 -0
  293. vllm/lora/layers/utils.py +65 -0
  294. vllm/lora/layers/vocal_parallel_embedding.py +166 -0
  295. vllm/lora/lora_weights.py +198 -0
  296. vllm/lora/models.py +890 -0
  297. vllm/lora/ops/__init__.py +0 -0
  298. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  299. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  300. vllm/lora/ops/torch_ops/__init__.py +20 -0
  301. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  302. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  303. vllm/lora/ops/triton_ops/__init__.py +21 -0
  304. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +641 -0
  305. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  306. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  307. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  308. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  309. vllm/lora/ops/triton_ops/utils.py +295 -0
  310. vllm/lora/ops/xla_ops/__init__.py +6 -0
  311. vllm/lora/ops/xla_ops/lora_ops.py +141 -0
  312. vllm/lora/peft_helper.py +128 -0
  313. vllm/lora/punica_wrapper/__init__.py +10 -0
  314. vllm/lora/punica_wrapper/punica_base.py +492 -0
  315. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  316. vllm/lora/punica_wrapper/punica_gpu.py +411 -0
  317. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  318. vllm/lora/punica_wrapper/punica_tpu.py +359 -0
  319. vllm/lora/punica_wrapper/punica_xpu.py +279 -0
  320. vllm/lora/punica_wrapper/utils.py +150 -0
  321. vllm/lora/request.py +100 -0
  322. vllm/lora/resolver.py +88 -0
  323. vllm/lora/utils.py +293 -0
  324. vllm/lora/worker_manager.py +279 -0
  325. vllm/model_executor/__init__.py +11 -0
  326. vllm/model_executor/custom_op.py +194 -0
  327. vllm/model_executor/layers/__init__.py +0 -0
  328. vllm/model_executor/layers/activation.py +569 -0
  329. vllm/model_executor/layers/attention_layer_base.py +35 -0
  330. vllm/model_executor/layers/batch_invariant.py +854 -0
  331. vllm/model_executor/layers/conv.py +236 -0
  332. vllm/model_executor/layers/fla/__init__.py +8 -0
  333. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  334. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  335. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  336. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  337. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  338. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  339. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  340. vllm/model_executor/layers/fla/ops/index.py +41 -0
  341. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  342. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  343. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  344. vllm/model_executor/layers/fla/ops/op.py +60 -0
  345. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  346. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  347. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  348. vllm/model_executor/layers/fused_moe/__init__.py +106 -0
  349. vllm/model_executor/layers/fused_moe/all2all_utils.py +160 -0
  350. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
  351. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
  352. vllm/model_executor/layers/fused_moe/config.py +916 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  625. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +354 -0
  626. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
  627. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
  628. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
  629. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  630. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +367 -0
  631. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
  632. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
  633. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  634. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
  635. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +792 -0
  636. vllm/model_executor/layers/fused_moe/fused_moe.py +2175 -0
  637. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +112 -0
  638. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +164 -0
  639. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +316 -0
  640. vllm/model_executor/layers/fused_moe/layer.py +1944 -0
  641. vllm/model_executor/layers/fused_moe/modular_kernel.py +1222 -0
  642. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
  643. vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
  644. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  645. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  646. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  647. vllm/model_executor/layers/fused_moe/prepare_finalize.py +77 -0
  648. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
  649. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  650. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +97 -0
  651. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  652. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
  653. vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
  654. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +578 -0
  655. vllm/model_executor/layers/fused_moe/utils.py +332 -0
  656. vllm/model_executor/layers/kda.py +448 -0
  657. vllm/model_executor/layers/layernorm.py +442 -0
  658. vllm/model_executor/layers/lightning_attn.py +729 -0
  659. vllm/model_executor/layers/linear.py +1424 -0
  660. vllm/model_executor/layers/logits_processor.py +106 -0
  661. vllm/model_executor/layers/mamba/__init__.py +0 -0
  662. vllm/model_executor/layers/mamba/abstract.py +71 -0
  663. vllm/model_executor/layers/mamba/linear_attn.py +402 -0
  664. vllm/model_executor/layers/mamba/mamba_mixer.py +535 -0
  665. vllm/model_executor/layers/mamba/mamba_mixer2.py +928 -0
  666. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  667. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  668. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  669. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  670. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
  671. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  672. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  673. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  674. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  675. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  676. vllm/model_executor/layers/mamba/short_conv.py +264 -0
  677. vllm/model_executor/layers/mla.py +168 -0
  678. vllm/model_executor/layers/pooler.py +817 -0
  679. vllm/model_executor/layers/quantization/__init__.py +174 -0
  680. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  681. vllm/model_executor/layers/quantization/awq.py +277 -0
  682. vllm/model_executor/layers/quantization/awq_marlin.py +659 -0
  683. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  684. vllm/model_executor/layers/quantization/base_config.py +170 -0
  685. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  686. vllm/model_executor/layers/quantization/bitsandbytes.py +658 -0
  687. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  688. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +914 -0
  689. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2284 -0
  690. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
  691. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  692. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  693. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  694. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  695. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  696. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
  697. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  698. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  699. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
  700. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  701. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +219 -0
  702. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  703. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  704. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  705. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  706. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  707. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  708. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  709. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  710. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  711. vllm/model_executor/layers/quantization/experts_int8.py +240 -0
  712. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  713. vllm/model_executor/layers/quantization/fp8.py +1333 -0
  714. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  715. vllm/model_executor/layers/quantization/gguf.py +643 -0
  716. vllm/model_executor/layers/quantization/gptq.py +393 -0
  717. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  718. vllm/model_executor/layers/quantization/gptq_marlin.py +789 -0
  719. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  720. vllm/model_executor/layers/quantization/hqq_marlin.py +371 -0
  721. vllm/model_executor/layers/quantization/inc.py +65 -0
  722. vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
  723. vllm/model_executor/layers/quantization/ipex_quant.py +467 -0
  724. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  725. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  726. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
  727. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  728. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  729. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  730. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
  731. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  732. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
  733. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  734. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +166 -0
  735. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
  736. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
  737. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  738. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
  739. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
  740. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
  741. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  742. vllm/model_executor/layers/quantization/kv_cache.py +146 -0
  743. vllm/model_executor/layers/quantization/modelopt.py +1788 -0
  744. vllm/model_executor/layers/quantization/moe_wna16.py +541 -0
  745. vllm/model_executor/layers/quantization/mxfp4.py +1162 -0
  746. vllm/model_executor/layers/quantization/petit.py +320 -0
  747. vllm/model_executor/layers/quantization/ptpc_fp8.py +137 -0
  748. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  749. vllm/model_executor/layers/quantization/quark/quark.py +528 -0
  750. vllm/model_executor/layers/quantization/quark/quark_moe.py +683 -0
  751. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  752. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +306 -0
  753. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  754. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  755. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  756. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  757. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  758. vllm/model_executor/layers/quantization/rtn.py +652 -0
  759. vllm/model_executor/layers/quantization/schema.py +90 -0
  760. vllm/model_executor/layers/quantization/torchao.py +380 -0
  761. vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
  762. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  763. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  764. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  976. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +89 -0
  977. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +298 -0
  978. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
  979. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  980. vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
  981. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  982. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  983. vllm/model_executor/layers/quantization/utils/marlin_utils.py +575 -0
  984. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +397 -0
  985. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +351 -0
  986. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +161 -0
  987. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  988. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +181 -0
  989. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  990. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  991. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  992. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +63 -0
  993. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  994. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  995. vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
  996. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
  997. vllm/model_executor/layers/resampler.py +283 -0
  998. vllm/model_executor/layers/rotary_embedding/__init__.py +278 -0
  999. vllm/model_executor/layers/rotary_embedding/base.py +235 -0
  1000. vllm/model_executor/layers/rotary_embedding/common.py +188 -0
  1001. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
  1002. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
  1003. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1004. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1005. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
  1006. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1007. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1008. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
  1009. vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
  1010. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1011. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1012. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +81 -0
  1013. vllm/model_executor/layers/utils.py +251 -0
  1014. vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
  1015. vllm/model_executor/model_loader/__init__.py +148 -0
  1016. vllm/model_executor/model_loader/base_loader.py +57 -0
  1017. vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
  1018. vllm/model_executor/model_loader/default_loader.py +327 -0
  1019. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1020. vllm/model_executor/model_loader/gguf_loader.py +176 -0
  1021. vllm/model_executor/model_loader/online_quantization.py +224 -0
  1022. vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
  1023. vllm/model_executor/model_loader/sharded_state_loader.py +206 -0
  1024. vllm/model_executor/model_loader/tensorizer.py +790 -0
  1025. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1026. vllm/model_executor/model_loader/tpu.py +118 -0
  1027. vllm/model_executor/model_loader/utils.py +288 -0
  1028. vllm/model_executor/model_loader/weight_utils.py +1084 -0
  1029. vllm/model_executor/models/__init__.py +44 -0
  1030. vllm/model_executor/models/adapters.py +543 -0
  1031. vllm/model_executor/models/afmoe.py +711 -0
  1032. vllm/model_executor/models/aimv2.py +247 -0
  1033. vllm/model_executor/models/apertus.py +587 -0
  1034. vllm/model_executor/models/arcee.py +439 -0
  1035. vllm/model_executor/models/arctic.py +635 -0
  1036. vllm/model_executor/models/aria.py +655 -0
  1037. vllm/model_executor/models/aya_vision.py +450 -0
  1038. vllm/model_executor/models/baichuan.py +496 -0
  1039. vllm/model_executor/models/bailing_moe.py +646 -0
  1040. vllm/model_executor/models/bamba.py +522 -0
  1041. vllm/model_executor/models/bee.py +157 -0
  1042. vllm/model_executor/models/bert.py +925 -0
  1043. vllm/model_executor/models/bert_with_rope.py +732 -0
  1044. vllm/model_executor/models/blip.py +349 -0
  1045. vllm/model_executor/models/blip2.py +695 -0
  1046. vllm/model_executor/models/bloom.py +390 -0
  1047. vllm/model_executor/models/chameleon.py +1120 -0
  1048. vllm/model_executor/models/chatglm.py +498 -0
  1049. vllm/model_executor/models/clip.py +965 -0
  1050. vllm/model_executor/models/cohere2_vision.py +472 -0
  1051. vllm/model_executor/models/commandr.py +473 -0
  1052. vllm/model_executor/models/config.py +503 -0
  1053. vllm/model_executor/models/dbrx.py +482 -0
  1054. vllm/model_executor/models/deepencoder.py +673 -0
  1055. vllm/model_executor/models/deepseek_eagle.py +260 -0
  1056. vllm/model_executor/models/deepseek_mtp.py +360 -0
  1057. vllm/model_executor/models/deepseek_ocr.py +593 -0
  1058. vllm/model_executor/models/deepseek_v2.py +1649 -0
  1059. vllm/model_executor/models/deepseek_vl2.py +655 -0
  1060. vllm/model_executor/models/dots1.py +574 -0
  1061. vllm/model_executor/models/dots_ocr.py +900 -0
  1062. vllm/model_executor/models/ernie45.py +53 -0
  1063. vllm/model_executor/models/ernie45_moe.py +759 -0
  1064. vllm/model_executor/models/ernie45_vl.py +1742 -0
  1065. vllm/model_executor/models/ernie45_vl_moe.py +803 -0
  1066. vllm/model_executor/models/ernie_mtp.py +279 -0
  1067. vllm/model_executor/models/exaone.py +545 -0
  1068. vllm/model_executor/models/exaone4.py +531 -0
  1069. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1070. vllm/model_executor/models/falcon.py +545 -0
  1071. vllm/model_executor/models/falcon_h1.py +685 -0
  1072. vllm/model_executor/models/flex_olmo.py +155 -0
  1073. vllm/model_executor/models/fuyu.py +373 -0
  1074. vllm/model_executor/models/gemma.py +426 -0
  1075. vllm/model_executor/models/gemma2.py +439 -0
  1076. vllm/model_executor/models/gemma3.py +571 -0
  1077. vllm/model_executor/models/gemma3_mm.py +741 -0
  1078. vllm/model_executor/models/gemma3n.py +1165 -0
  1079. vllm/model_executor/models/gemma3n_mm.py +811 -0
  1080. vllm/model_executor/models/glm.py +23 -0
  1081. vllm/model_executor/models/glm4.py +305 -0
  1082. vllm/model_executor/models/glm4_1v.py +1821 -0
  1083. vllm/model_executor/models/glm4_moe.py +747 -0
  1084. vllm/model_executor/models/glm4_moe_mtp.py +359 -0
  1085. vllm/model_executor/models/glm4v.py +784 -0
  1086. vllm/model_executor/models/gpt2.py +397 -0
  1087. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1088. vllm/model_executor/models/gpt_j.py +346 -0
  1089. vllm/model_executor/models/gpt_neox.py +344 -0
  1090. vllm/model_executor/models/gpt_oss.py +738 -0
  1091. vllm/model_executor/models/granite.py +516 -0
  1092. vllm/model_executor/models/granite_speech.py +913 -0
  1093. vllm/model_executor/models/granitemoe.py +569 -0
  1094. vllm/model_executor/models/granitemoehybrid.py +709 -0
  1095. vllm/model_executor/models/granitemoeshared.py +333 -0
  1096. vllm/model_executor/models/gritlm.py +245 -0
  1097. vllm/model_executor/models/grok1.py +558 -0
  1098. vllm/model_executor/models/h2ovl.py +554 -0
  1099. vllm/model_executor/models/hunyuan_v1.py +1053 -0
  1100. vllm/model_executor/models/hyperclovax_vision.py +1166 -0
  1101. vllm/model_executor/models/idefics2_vision_model.py +426 -0
  1102. vllm/model_executor/models/idefics3.py +717 -0
  1103. vllm/model_executor/models/interfaces.py +1092 -0
  1104. vllm/model_executor/models/interfaces_base.py +214 -0
  1105. vllm/model_executor/models/intern_vit.py +453 -0
  1106. vllm/model_executor/models/internlm2.py +460 -0
  1107. vllm/model_executor/models/internlm2_ve.py +142 -0
  1108. vllm/model_executor/models/interns1.py +830 -0
  1109. vllm/model_executor/models/interns1_vit.py +432 -0
  1110. vllm/model_executor/models/internvl.py +1452 -0
  1111. vllm/model_executor/models/jais.py +397 -0
  1112. vllm/model_executor/models/jamba.py +610 -0
  1113. vllm/model_executor/models/jina_vl.py +147 -0
  1114. vllm/model_executor/models/keye.py +1761 -0
  1115. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1116. vllm/model_executor/models/kimi_linear.py +663 -0
  1117. vllm/model_executor/models/kimi_vl.py +578 -0
  1118. vllm/model_executor/models/lfm2.py +532 -0
  1119. vllm/model_executor/models/lfm2_moe.py +762 -0
  1120. vllm/model_executor/models/lightonocr.py +195 -0
  1121. vllm/model_executor/models/llama.py +732 -0
  1122. vllm/model_executor/models/llama4.py +859 -0
  1123. vllm/model_executor/models/llama4_eagle.py +223 -0
  1124. vllm/model_executor/models/llama_eagle.py +218 -0
  1125. vllm/model_executor/models/llama_eagle3.py +367 -0
  1126. vllm/model_executor/models/llava.py +842 -0
  1127. vllm/model_executor/models/llava_next.py +583 -0
  1128. vllm/model_executor/models/llava_next_video.py +467 -0
  1129. vllm/model_executor/models/llava_onevision.py +923 -0
  1130. vllm/model_executor/models/longcat_flash.py +749 -0
  1131. vllm/model_executor/models/longcat_flash_mtp.py +349 -0
  1132. vllm/model_executor/models/mamba.py +276 -0
  1133. vllm/model_executor/models/mamba2.py +289 -0
  1134. vllm/model_executor/models/medusa.py +179 -0
  1135. vllm/model_executor/models/midashenglm.py +827 -0
  1136. vllm/model_executor/models/mimo.py +188 -0
  1137. vllm/model_executor/models/mimo_mtp.py +294 -0
  1138. vllm/model_executor/models/minicpm.py +664 -0
  1139. vllm/model_executor/models/minicpm3.py +242 -0
  1140. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1141. vllm/model_executor/models/minicpmo.py +768 -0
  1142. vllm/model_executor/models/minicpmv.py +1745 -0
  1143. vllm/model_executor/models/minimax_m2.py +552 -0
  1144. vllm/model_executor/models/minimax_text_01.py +1012 -0
  1145. vllm/model_executor/models/minimax_vl_01.py +396 -0
  1146. vllm/model_executor/models/mistral3.py +637 -0
  1147. vllm/model_executor/models/mixtral.py +621 -0
  1148. vllm/model_executor/models/mllama4.py +1147 -0
  1149. vllm/model_executor/models/mlp_speculator.py +235 -0
  1150. vllm/model_executor/models/modernbert.py +450 -0
  1151. vllm/model_executor/models/module_mapping.py +74 -0
  1152. vllm/model_executor/models/molmo.py +1555 -0
  1153. vllm/model_executor/models/moonvit.py +677 -0
  1154. vllm/model_executor/models/mpt.py +335 -0
  1155. vllm/model_executor/models/nano_nemotron_vl.py +1740 -0
  1156. vllm/model_executor/models/nemotron.py +518 -0
  1157. vllm/model_executor/models/nemotron_h.py +852 -0
  1158. vllm/model_executor/models/nemotron_nas.py +491 -0
  1159. vllm/model_executor/models/nemotron_vl.py +653 -0
  1160. vllm/model_executor/models/nvlm_d.py +216 -0
  1161. vllm/model_executor/models/olmo.py +414 -0
  1162. vllm/model_executor/models/olmo2.py +454 -0
  1163. vllm/model_executor/models/olmoe.py +498 -0
  1164. vllm/model_executor/models/openpangu.py +1062 -0
  1165. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1166. vllm/model_executor/models/opt.py +426 -0
  1167. vllm/model_executor/models/orion.py +372 -0
  1168. vllm/model_executor/models/ouro.py +516 -0
  1169. vllm/model_executor/models/ovis.py +559 -0
  1170. vllm/model_executor/models/ovis2_5.py +673 -0
  1171. vllm/model_executor/models/paddleocr_vl.py +1407 -0
  1172. vllm/model_executor/models/paligemma.py +412 -0
  1173. vllm/model_executor/models/persimmon.py +377 -0
  1174. vllm/model_executor/models/phi.py +374 -0
  1175. vllm/model_executor/models/phi3.py +18 -0
  1176. vllm/model_executor/models/phi3v.py +737 -0
  1177. vllm/model_executor/models/phi4_multimodal.py +1447 -0
  1178. vllm/model_executor/models/phi4mm.py +1253 -0
  1179. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1180. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1181. vllm/model_executor/models/phimoe.py +675 -0
  1182. vllm/model_executor/models/pixtral.py +1352 -0
  1183. vllm/model_executor/models/plamo2.py +981 -0
  1184. vllm/model_executor/models/qwen.py +368 -0
  1185. vllm/model_executor/models/qwen2.py +541 -0
  1186. vllm/model_executor/models/qwen2_5_omni_thinker.py +1246 -0
  1187. vllm/model_executor/models/qwen2_5_vl.py +1613 -0
  1188. vllm/model_executor/models/qwen2_audio.py +473 -0
  1189. vllm/model_executor/models/qwen2_moe.py +596 -0
  1190. vllm/model_executor/models/qwen2_rm.py +123 -0
  1191. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1192. vllm/model_executor/models/qwen3.py +336 -0
  1193. vllm/model_executor/models/qwen3_moe.py +744 -0
  1194. vllm/model_executor/models/qwen3_next.py +1395 -0
  1195. vllm/model_executor/models/qwen3_next_mtp.py +296 -0
  1196. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1721 -0
  1197. vllm/model_executor/models/qwen3_vl.py +1673 -0
  1198. vllm/model_executor/models/qwen3_vl_moe.py +415 -0
  1199. vllm/model_executor/models/qwen_vl.py +802 -0
  1200. vllm/model_executor/models/radio.py +555 -0
  1201. vllm/model_executor/models/registry.py +1155 -0
  1202. vllm/model_executor/models/roberta.py +259 -0
  1203. vllm/model_executor/models/rvl.py +107 -0
  1204. vllm/model_executor/models/seed_oss.py +497 -0
  1205. vllm/model_executor/models/siglip.py +1174 -0
  1206. vllm/model_executor/models/siglip2navit.py +724 -0
  1207. vllm/model_executor/models/skyworkr1v.py +953 -0
  1208. vllm/model_executor/models/smolvlm.py +38 -0
  1209. vllm/model_executor/models/solar.py +502 -0
  1210. vllm/model_executor/models/stablelm.py +359 -0
  1211. vllm/model_executor/models/starcoder2.py +367 -0
  1212. vllm/model_executor/models/step3_text.py +559 -0
  1213. vllm/model_executor/models/step3_vl.py +1148 -0
  1214. vllm/model_executor/models/swin.py +514 -0
  1215. vllm/model_executor/models/tarsier.py +619 -0
  1216. vllm/model_executor/models/telechat2.py +153 -0
  1217. vllm/model_executor/models/teleflm.py +78 -0
  1218. vllm/model_executor/models/terratorch.py +319 -0
  1219. vllm/model_executor/models/transformers/__init__.py +127 -0
  1220. vllm/model_executor/models/transformers/base.py +464 -0
  1221. vllm/model_executor/models/transformers/causal.py +65 -0
  1222. vllm/model_executor/models/transformers/legacy.py +90 -0
  1223. vllm/model_executor/models/transformers/moe.py +318 -0
  1224. vllm/model_executor/models/transformers/multimodal.py +411 -0
  1225. vllm/model_executor/models/transformers/pooling.py +119 -0
  1226. vllm/model_executor/models/transformers/utils.py +207 -0
  1227. vllm/model_executor/models/ultravox.py +681 -0
  1228. vllm/model_executor/models/utils.py +877 -0
  1229. vllm/model_executor/models/vision.py +552 -0
  1230. vllm/model_executor/models/voxtral.py +845 -0
  1231. vllm/model_executor/models/whisper.py +959 -0
  1232. vllm/model_executor/models/zamba2.py +986 -0
  1233. vllm/model_executor/parameter.py +642 -0
  1234. vllm/model_executor/utils.py +94 -0
  1235. vllm/model_executor/warmup/__init__.py +0 -0
  1236. vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
  1237. vllm/model_executor/warmup/kernel_warmup.py +98 -0
  1238. vllm/multimodal/__init__.py +40 -0
  1239. vllm/multimodal/audio.py +118 -0
  1240. vllm/multimodal/base.py +26 -0
  1241. vllm/multimodal/cache.py +755 -0
  1242. vllm/multimodal/evs.py +294 -0
  1243. vllm/multimodal/hasher.py +106 -0
  1244. vllm/multimodal/image.py +130 -0
  1245. vllm/multimodal/inputs.py +1036 -0
  1246. vllm/multimodal/parse.py +544 -0
  1247. vllm/multimodal/processing.py +2186 -0
  1248. vllm/multimodal/profiling.py +369 -0
  1249. vllm/multimodal/registry.py +360 -0
  1250. vllm/multimodal/utils.py +512 -0
  1251. vllm/multimodal/video.py +306 -0
  1252. vllm/outputs.py +345 -0
  1253. vllm/platforms/__init__.py +277 -0
  1254. vllm/platforms/cpu.py +414 -0
  1255. vllm/platforms/cuda.py +657 -0
  1256. vllm/platforms/interface.py +639 -0
  1257. vllm/platforms/rocm.py +466 -0
  1258. vllm/platforms/tpu.py +276 -0
  1259. vllm/platforms/xpu.py +274 -0
  1260. vllm/plugins/__init__.py +78 -0
  1261. vllm/plugins/io_processors/__init__.py +68 -0
  1262. vllm/plugins/io_processors/interface.py +77 -0
  1263. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1264. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1265. vllm/pooling_params.py +228 -0
  1266. vllm/profiler/__init__.py +0 -0
  1267. vllm/profiler/gpu_profiler.py +37 -0
  1268. vllm/profiler/layerwise_profile.py +392 -0
  1269. vllm/profiler/utils.py +151 -0
  1270. vllm/py.typed +2 -0
  1271. vllm/ray/__init__.py +0 -0
  1272. vllm/ray/lazy_utils.py +26 -0
  1273. vllm/ray/ray_env.py +79 -0
  1274. vllm/reasoning/__init__.py +92 -0
  1275. vllm/reasoning/abs_reasoning_parsers.py +290 -0
  1276. vllm/reasoning/basic_parsers.py +162 -0
  1277. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1278. vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
  1279. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1280. vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
  1281. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1282. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1283. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1284. vllm/reasoning/identity_reasoning_parser.py +58 -0
  1285. vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
  1286. vllm/reasoning/mistral_reasoning_parser.py +55 -0
  1287. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1288. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1289. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1290. vllm/reasoning/step3_reasoning_parser.py +107 -0
  1291. vllm/sampling_params.py +669 -0
  1292. vllm/scalar_type.py +355 -0
  1293. vllm/scripts.py +17 -0
  1294. vllm/sequence.py +98 -0
  1295. vllm/tasks.py +13 -0
  1296. vllm/third_party/__init__.py +0 -0
  1297. vllm/third_party/pynvml.py +6140 -0
  1298. vllm/tracing.py +135 -0
  1299. vllm/transformers_utils/__init__.py +26 -0
  1300. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1301. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1302. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1303. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1304. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1305. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1306. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1307. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1308. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1309. vllm/transformers_utils/config.py +1203 -0
  1310. vllm/transformers_utils/config_parser_base.py +20 -0
  1311. vllm/transformers_utils/configs/__init__.py +70 -0
  1312. vllm/transformers_utils/configs/afmoe.py +84 -0
  1313. vllm/transformers_utils/configs/arctic.py +206 -0
  1314. vllm/transformers_utils/configs/chatglm.py +75 -0
  1315. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1316. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1317. vllm/transformers_utils/configs/eagle.py +84 -0
  1318. vllm/transformers_utils/configs/falcon.py +89 -0
  1319. vllm/transformers_utils/configs/flex_olmo.py +77 -0
  1320. vllm/transformers_utils/configs/jais.py +243 -0
  1321. vllm/transformers_utils/configs/kimi_linear.py +144 -0
  1322. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1323. vllm/transformers_utils/configs/lfm2_moe.py +159 -0
  1324. vllm/transformers_utils/configs/medusa.py +65 -0
  1325. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1326. vllm/transformers_utils/configs/mistral.py +174 -0
  1327. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1328. vllm/transformers_utils/configs/moonvit.py +33 -0
  1329. vllm/transformers_utils/configs/nemotron.py +212 -0
  1330. vllm/transformers_utils/configs/nemotron_h.py +282 -0
  1331. vllm/transformers_utils/configs/olmo3.py +79 -0
  1332. vllm/transformers_utils/configs/ovis.py +182 -0
  1333. vllm/transformers_utils/configs/qwen3_next.py +274 -0
  1334. vllm/transformers_utils/configs/radio.py +89 -0
  1335. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1336. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1337. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1338. vllm/transformers_utils/configs/step3_vl.py +174 -0
  1339. vllm/transformers_utils/configs/ultravox.py +118 -0
  1340. vllm/transformers_utils/detokenizer_utils.py +198 -0
  1341. vllm/transformers_utils/dynamic_module.py +59 -0
  1342. vllm/transformers_utils/processor.py +402 -0
  1343. vllm/transformers_utils/processors/__init__.py +15 -0
  1344. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1345. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1346. vllm/transformers_utils/processors/ovis.py +453 -0
  1347. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1348. vllm/transformers_utils/runai_utils.py +104 -0
  1349. vllm/transformers_utils/s3_utils.py +95 -0
  1350. vllm/transformers_utils/tokenizer.py +293 -0
  1351. vllm/transformers_utils/tokenizer_base.py +155 -0
  1352. vllm/transformers_utils/tokenizers/__init__.py +16 -0
  1353. vllm/transformers_utils/tokenizers/mistral.py +502 -0
  1354. vllm/transformers_utils/utils.py +130 -0
  1355. vllm/triton_utils/__init__.py +19 -0
  1356. vllm/triton_utils/importing.py +103 -0
  1357. vllm/usage/__init__.py +0 -0
  1358. vllm/usage/usage_lib.py +294 -0
  1359. vllm/utils/__init__.py +82 -0
  1360. vllm/utils/argparse_utils.py +487 -0
  1361. vllm/utils/async_utils.py +303 -0
  1362. vllm/utils/cache.py +214 -0
  1363. vllm/utils/collection_utils.py +139 -0
  1364. vllm/utils/counter.py +45 -0
  1365. vllm/utils/deep_gemm.py +391 -0
  1366. vllm/utils/flashinfer.py +490 -0
  1367. vllm/utils/func_utils.py +236 -0
  1368. vllm/utils/gc_utils.py +147 -0
  1369. vllm/utils/hashing.py +63 -0
  1370. vllm/utils/import_utils.py +411 -0
  1371. vllm/utils/jsontree.py +165 -0
  1372. vllm/utils/math_utils.py +32 -0
  1373. vllm/utils/mem_constants.py +13 -0
  1374. vllm/utils/mem_utils.py +232 -0
  1375. vllm/utils/nccl.py +64 -0
  1376. vllm/utils/network_utils.py +331 -0
  1377. vllm/utils/platform_utils.py +59 -0
  1378. vllm/utils/profiling.py +56 -0
  1379. vllm/utils/registry.py +49 -0
  1380. vllm/utils/serial_utils.py +169 -0
  1381. vllm/utils/system_utils.py +229 -0
  1382. vllm/utils/tensor_schema.py +255 -0
  1383. vllm/utils/torch_utils.py +657 -0
  1384. vllm/v1/__init__.py +0 -0
  1385. vllm/v1/attention/__init__.py +0 -0
  1386. vllm/v1/attention/backends/__init__.py +0 -0
  1387. vllm/v1/attention/backends/cpu_attn.py +496 -0
  1388. vllm/v1/attention/backends/flash_attn.py +1028 -0
  1389. vllm/v1/attention/backends/flashinfer.py +1572 -0
  1390. vllm/v1/attention/backends/flex_attention.py +926 -0
  1391. vllm/v1/attention/backends/gdn_attn.py +387 -0
  1392. vllm/v1/attention/backends/linear_attn.py +74 -0
  1393. vllm/v1/attention/backends/mamba1_attn.py +165 -0
  1394. vllm/v1/attention/backends/mamba2_attn.py +354 -0
  1395. vllm/v1/attention/backends/mamba_attn.py +115 -0
  1396. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1397. vllm/v1/attention/backends/mla/common.py +2031 -0
  1398. vllm/v1/attention/backends/mla/cutlass_mla.py +275 -0
  1399. vllm/v1/attention/backends/mla/flashattn_mla.py +337 -0
  1400. vllm/v1/attention/backends/mla/flashinfer_mla.py +171 -0
  1401. vllm/v1/attention/backends/mla/flashmla.py +314 -0
  1402. vllm/v1/attention/backends/mla/flashmla_sparse.py +548 -0
  1403. vllm/v1/attention/backends/mla/indexer.py +362 -0
  1404. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +294 -0
  1405. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1406. vllm/v1/attention/backends/pallas.py +436 -0
  1407. vllm/v1/attention/backends/rocm_aiter_fa.py +816 -0
  1408. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +196 -0
  1409. vllm/v1/attention/backends/rocm_attn.py +362 -0
  1410. vllm/v1/attention/backends/short_conv_attn.py +105 -0
  1411. vllm/v1/attention/backends/tree_attn.py +425 -0
  1412. vllm/v1/attention/backends/triton_attn.py +373 -0
  1413. vllm/v1/attention/backends/utils.py +1116 -0
  1414. vllm/v1/attention/backends/xformers.py +417 -0
  1415. vllm/v1/core/__init__.py +0 -0
  1416. vllm/v1/core/block_pool.py +428 -0
  1417. vllm/v1/core/encoder_cache_manager.py +343 -0
  1418. vllm/v1/core/kv_cache_coordinator.py +480 -0
  1419. vllm/v1/core/kv_cache_manager.py +420 -0
  1420. vllm/v1/core/kv_cache_utils.py +1340 -0
  1421. vllm/v1/core/sched/__init__.py +0 -0
  1422. vllm/v1/core/sched/async_scheduler.py +62 -0
  1423. vllm/v1/core/sched/interface.py +181 -0
  1424. vllm/v1/core/sched/output.py +202 -0
  1425. vllm/v1/core/sched/request_queue.py +221 -0
  1426. vllm/v1/core/sched/scheduler.py +1617 -0
  1427. vllm/v1/core/sched/utils.py +72 -0
  1428. vllm/v1/core/single_type_kv_cache_manager.py +736 -0
  1429. vllm/v1/cudagraph_dispatcher.py +148 -0
  1430. vllm/v1/engine/__init__.py +206 -0
  1431. vllm/v1/engine/async_llm.py +797 -0
  1432. vllm/v1/engine/coordinator.py +377 -0
  1433. vllm/v1/engine/core.py +1420 -0
  1434. vllm/v1/engine/core_client.py +1400 -0
  1435. vllm/v1/engine/detokenizer.py +351 -0
  1436. vllm/v1/engine/exceptions.py +18 -0
  1437. vllm/v1/engine/llm_engine.py +408 -0
  1438. vllm/v1/engine/logprobs.py +182 -0
  1439. vllm/v1/engine/output_processor.py +642 -0
  1440. vllm/v1/engine/parallel_sampling.py +145 -0
  1441. vllm/v1/engine/processor.py +621 -0
  1442. vllm/v1/engine/utils.py +1072 -0
  1443. vllm/v1/executor/__init__.py +6 -0
  1444. vllm/v1/executor/abstract.py +352 -0
  1445. vllm/v1/executor/multiproc_executor.py +877 -0
  1446. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1447. vllm/v1/executor/ray_executor.py +626 -0
  1448. vllm/v1/executor/ray_utils.py +465 -0
  1449. vllm/v1/executor/uniproc_executor.py +183 -0
  1450. vllm/v1/kv_cache_interface.py +403 -0
  1451. vllm/v1/kv_offload/__init__.py +0 -0
  1452. vllm/v1/kv_offload/abstract.py +161 -0
  1453. vllm/v1/kv_offload/arc_manager.py +237 -0
  1454. vllm/v1/kv_offload/backend.py +97 -0
  1455. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1456. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1457. vllm/v1/kv_offload/cpu.py +93 -0
  1458. vllm/v1/kv_offload/factory.py +56 -0
  1459. vllm/v1/kv_offload/lru_manager.py +139 -0
  1460. vllm/v1/kv_offload/mediums.py +39 -0
  1461. vllm/v1/kv_offload/spec.py +62 -0
  1462. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1463. vllm/v1/kv_offload/worker/cpu_gpu.py +185 -0
  1464. vllm/v1/kv_offload/worker/worker.py +144 -0
  1465. vllm/v1/metrics/__init__.py +0 -0
  1466. vllm/v1/metrics/loggers.py +1238 -0
  1467. vllm/v1/metrics/prometheus.py +82 -0
  1468. vllm/v1/metrics/ray_wrappers.py +169 -0
  1469. vllm/v1/metrics/reader.py +257 -0
  1470. vllm/v1/metrics/stats.py +420 -0
  1471. vllm/v1/outputs.py +249 -0
  1472. vllm/v1/pool/__init__.py +0 -0
  1473. vllm/v1/pool/metadata.py +82 -0
  1474. vllm/v1/request.py +259 -0
  1475. vllm/v1/sample/__init__.py +0 -0
  1476. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1477. vllm/v1/sample/logits_processor/builtin.py +274 -0
  1478. vllm/v1/sample/logits_processor/interface.py +106 -0
  1479. vllm/v1/sample/logits_processor/state.py +165 -0
  1480. vllm/v1/sample/metadata.py +44 -0
  1481. vllm/v1/sample/ops/__init__.py +0 -0
  1482. vllm/v1/sample/ops/bad_words.py +52 -0
  1483. vllm/v1/sample/ops/logprobs.py +25 -0
  1484. vllm/v1/sample/ops/penalties.py +57 -0
  1485. vllm/v1/sample/ops/topk_topp_sampler.py +290 -0
  1486. vllm/v1/sample/rejection_sampler.py +793 -0
  1487. vllm/v1/sample/sampler.py +316 -0
  1488. vllm/v1/sample/tpu/__init__.py +0 -0
  1489. vllm/v1/sample/tpu/metadata.py +120 -0
  1490. vllm/v1/sample/tpu/sampler.py +215 -0
  1491. vllm/v1/serial_utils.py +532 -0
  1492. vllm/v1/spec_decode/__init__.py +0 -0
  1493. vllm/v1/spec_decode/eagle.py +1225 -0
  1494. vllm/v1/spec_decode/medusa.py +73 -0
  1495. vllm/v1/spec_decode/metadata.py +66 -0
  1496. vllm/v1/spec_decode/metrics.py +224 -0
  1497. vllm/v1/spec_decode/ngram_proposer.py +291 -0
  1498. vllm/v1/spec_decode/suffix_decoding.py +103 -0
  1499. vllm/v1/spec_decode/utils.py +16 -0
  1500. vllm/v1/structured_output/__init__.py +338 -0
  1501. vllm/v1/structured_output/backend_guidance.py +265 -0
  1502. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1503. vllm/v1/structured_output/backend_outlines.py +324 -0
  1504. vllm/v1/structured_output/backend_types.py +136 -0
  1505. vllm/v1/structured_output/backend_xgrammar.py +362 -0
  1506. vllm/v1/structured_output/request.py +94 -0
  1507. vllm/v1/structured_output/utils.py +469 -0
  1508. vllm/v1/utils.py +414 -0
  1509. vllm/v1/worker/__init__.py +0 -0
  1510. vllm/v1/worker/block_table.py +327 -0
  1511. vllm/v1/worker/cpu_model_runner.py +122 -0
  1512. vllm/v1/worker/cpu_worker.py +206 -0
  1513. vllm/v1/worker/dp_utils.py +230 -0
  1514. vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
  1515. vllm/v1/worker/gpu_input_batch.py +975 -0
  1516. vllm/v1/worker/gpu_model_runner.py +5102 -0
  1517. vllm/v1/worker/gpu_ubatch_wrapper.py +466 -0
  1518. vllm/v1/worker/gpu_worker.py +894 -0
  1519. vllm/v1/worker/kv_connector_model_runner_mixin.py +144 -0
  1520. vllm/v1/worker/lora_model_runner_mixin.py +213 -0
  1521. vllm/v1/worker/tpu_input_batch.py +593 -0
  1522. vllm/v1/worker/tpu_model_runner.py +2173 -0
  1523. vllm/v1/worker/tpu_worker.py +355 -0
  1524. vllm/v1/worker/ubatch_utils.py +73 -0
  1525. vllm/v1/worker/ubatching.py +231 -0
  1526. vllm/v1/worker/utils.py +366 -0
  1527. vllm/v1/worker/worker_base.py +375 -0
  1528. vllm/v1/worker/xpu_model_runner.py +55 -0
  1529. vllm/v1/worker/xpu_worker.py +189 -0
  1530. vllm/version.py +39 -0
  1531. vllm/vllm_flash_attn/.gitkeep +0 -0
  1532. vllm_cpu_amxbf16-0.11.2.post2.dist-info/METADATA +345 -0
  1533. vllm_cpu_amxbf16-0.11.2.post2.dist-info/RECORD +1536 -0
  1534. vllm_cpu_amxbf16-0.11.2.post2.dist-info/WHEEL +5 -0
  1535. vllm_cpu_amxbf16-0.11.2.post2.dist-info/entry_points.txt +5 -0
  1536. vllm_cpu_amxbf16-0.11.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2144 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import argparse
5
+ import copy
6
+ import dataclasses
7
+ import functools
8
+ import json
9
+ import sys
10
+ from collections.abc import Callable
11
+ from dataclasses import MISSING, dataclass, fields, is_dataclass
12
+ from itertools import permutations
13
+ from types import UnionType
14
+ from typing import (
15
+ TYPE_CHECKING,
16
+ Annotated,
17
+ Any,
18
+ Literal,
19
+ TypeAlias,
20
+ TypeVar,
21
+ Union,
22
+ cast,
23
+ get_args,
24
+ get_origin,
25
+ )
26
+
27
+ import huggingface_hub
28
+ import regex as re
29
+ import torch
30
+ from pydantic import TypeAdapter, ValidationError
31
+ from pydantic.fields import FieldInfo
32
+ from typing_extensions import TypeIs, deprecated
33
+
34
+ import vllm.envs as envs
35
+ from vllm.attention.backends.registry import AttentionBackendEnum
36
+ from vllm.config import (
37
+ CacheConfig,
38
+ CompilationConfig,
39
+ ConfigType,
40
+ DeviceConfig,
41
+ ECTransferConfig,
42
+ EPLBConfig,
43
+ KVEventsConfig,
44
+ KVTransferConfig,
45
+ LoadConfig,
46
+ LoRAConfig,
47
+ ModelConfig,
48
+ MultiModalConfig,
49
+ ObservabilityConfig,
50
+ ParallelConfig,
51
+ PoolerConfig,
52
+ SchedulerConfig,
53
+ SpeculativeConfig,
54
+ StructuredOutputsConfig,
55
+ VllmConfig,
56
+ get_attr_docs,
57
+ )
58
+ from vllm.config.cache import (
59
+ BlockSize,
60
+ CacheDType,
61
+ KVOffloadingBackend,
62
+ MambaDType,
63
+ PrefixCachingHashAlgo,
64
+ )
65
+ from vllm.config.device import Device
66
+ from vllm.config.model import (
67
+ ConvertOption,
68
+ HfOverrides,
69
+ LogprobsMode,
70
+ ModelDType,
71
+ RunnerOption,
72
+ TaskOption,
73
+ TokenizerMode,
74
+ )
75
+ from vllm.config.multimodal import MMCacheType, MMEncoderTPMode
76
+ from vllm.config.observability import DetailedTraceModules
77
+ from vllm.config.parallel import DistributedExecutorBackend, ExpertPlacementStrategy
78
+ from vllm.config.scheduler import SchedulerPolicy
79
+ from vllm.config.utils import get_field
80
+ from vllm.logger import init_logger
81
+ from vllm.platforms import CpuArchEnum, current_platform
82
+ from vllm.plugins import load_general_plugins
83
+ from vllm.ray.lazy_utils import is_in_ray_actor, is_ray_initialized
84
+ from vllm.transformers_utils.config import (
85
+ get_model_path,
86
+ is_interleaved,
87
+ maybe_override_with_speculators,
88
+ )
89
+ from vllm.transformers_utils.utils import check_gguf_file, is_cloud_storage
90
+ from vllm.utils.argparse_utils import FlexibleArgumentParser
91
+ from vllm.utils.mem_constants import GiB_bytes
92
+ from vllm.utils.network_utils import get_ip
93
+ from vllm.v1.sample.logits_processor import LogitsProcessor
94
+
95
+ if TYPE_CHECKING:
96
+ from vllm.model_executor.layers.quantization import QuantizationMethods
97
+ from vllm.model_executor.model_loader import LoadFormats
98
+ from vllm.usage.usage_lib import UsageContext
99
+ from vllm.v1.executor import Executor
100
+ else:
101
+ Executor = Any
102
+ QuantizationMethods = Any
103
+ LoadFormats = Any
104
+ UsageContext = Any
105
+
106
+ logger = init_logger(__name__)
107
+
108
+ # object is used to allow for special typing forms
109
+ T = TypeVar("T")
110
+ TypeHint: TypeAlias = type[Any] | object
111
+ TypeHintT: TypeAlias = type[T] | object
112
+
113
+
114
+ def parse_type(return_type: Callable[[str], T]) -> Callable[[str], T]:
115
+ def _parse_type(val: str) -> T:
116
+ try:
117
+ return return_type(val)
118
+ except ValueError as e:
119
+ raise argparse.ArgumentTypeError(
120
+ f"Value {val} cannot be converted to {return_type}."
121
+ ) from e
122
+
123
+ return _parse_type
124
+
125
+
126
+ def optional_type(return_type: Callable[[str], T]) -> Callable[[str], T | None]:
127
+ def _optional_type(val: str) -> T | None:
128
+ if val == "" or val == "None":
129
+ return None
130
+ return parse_type(return_type)(val)
131
+
132
+ return _optional_type
133
+
134
+
135
+ def union_dict_and_str(val: str) -> str | dict[str, str] | None:
136
+ if not re.match(r"(?s)^\s*{.*}\s*$", val):
137
+ return str(val)
138
+ return optional_type(json.loads)(val)
139
+
140
+
141
+ def is_type(type_hint: TypeHint, type: TypeHintT) -> TypeIs[TypeHintT]:
142
+ """Check if the type hint is a specific type."""
143
+ return type_hint is type or get_origin(type_hint) is type
144
+
145
+
146
+ def contains_type(type_hints: set[TypeHint], type: TypeHintT) -> bool:
147
+ """Check if the type hints contain a specific type."""
148
+ return any(is_type(type_hint, type) for type_hint in type_hints)
149
+
150
+
151
+ def get_type(type_hints: set[TypeHint], type: TypeHintT) -> TypeHintT:
152
+ """Get the specific type from the type hints."""
153
+ return next((th for th in type_hints if is_type(th, type)), None)
154
+
155
+
156
+ def literal_to_kwargs(type_hints: set[TypeHint]) -> dict[str, Any]:
157
+ """Get the `type` and `choices` from a `Literal` type hint in `type_hints`.
158
+
159
+ If `type_hints` also contains `str`, we use `metavar` instead of `choices`.
160
+ """
161
+ type_hint = get_type(type_hints, Literal)
162
+ options = get_args(type_hint)
163
+ option_type = type(options[0])
164
+ if not all(isinstance(option, option_type) for option in options):
165
+ raise ValueError(
166
+ "All options must be of the same type. "
167
+ f"Got {options} with types {[type(c) for c in options]}"
168
+ )
169
+ kwarg = "metavar" if contains_type(type_hints, str) else "choices"
170
+ return {"type": option_type, kwarg: sorted(options)}
171
+
172
+
173
+ def collection_to_kwargs(type_hints: set[TypeHint], type: TypeHint) -> dict[str, Any]:
174
+ type_hint = get_type(type_hints, type)
175
+ types = get_args(type_hint)
176
+ elem_type = types[0]
177
+
178
+ # Handle Ellipsis
179
+ assert all(t is elem_type for t in types if t is not Ellipsis), (
180
+ f"All non-Ellipsis elements must be of the same type. Got {types}."
181
+ )
182
+
183
+ # Handle Union types
184
+ if get_origin(elem_type) in {Union, UnionType}:
185
+ # Union for Union[X, Y] and UnionType for X | Y
186
+ assert str in get_args(elem_type), (
187
+ "If element can have multiple types, one must be 'str' "
188
+ f"(i.e. 'list[int | str]'). Got {elem_type}."
189
+ )
190
+ elem_type = str
191
+
192
+ return {
193
+ "type": elem_type,
194
+ "nargs": "+" if type is not tuple or Ellipsis in types else len(types),
195
+ }
196
+
197
+
198
+ def is_not_builtin(type_hint: TypeHint) -> bool:
199
+ """Check if the class is not a built-in type."""
200
+ return type_hint.__module__ != "builtins"
201
+
202
+
203
+ def get_type_hints(type_hint: TypeHint) -> set[TypeHint]:
204
+ """Extract type hints from Annotated or Union type hints."""
205
+ type_hints: set[TypeHint] = set()
206
+ origin = get_origin(type_hint)
207
+ args = get_args(type_hint)
208
+
209
+ if origin is Annotated:
210
+ type_hints.update(get_type_hints(args[0]))
211
+ elif origin in {Union, UnionType}:
212
+ # Union for Union[X, Y] and UnionType for X | Y
213
+ for arg in args:
214
+ type_hints.update(get_type_hints(arg))
215
+ else:
216
+ type_hints.add(type_hint)
217
+
218
+ return type_hints
219
+
220
+
221
+ def is_online_quantization(quantization: Any) -> bool:
222
+ return quantization in ["inc"]
223
+
224
+
225
+ NEEDS_HELP = (
226
+ any("--help" in arg for arg in sys.argv) # vllm SUBCOMMAND --help
227
+ or (argv0 := sys.argv[0]).endswith("mkdocs") # mkdocs SUBCOMMAND
228
+ or argv0.endswith("mkdocs/__main__.py") # python -m mkdocs SUBCOMMAND
229
+ )
230
+
231
+
232
+ @functools.lru_cache(maxsize=30)
233
+ def _compute_kwargs(cls: ConfigType) -> dict[str, dict[str, Any]]:
234
+ # Save time only getting attr docs if we're generating help text
235
+ cls_docs = get_attr_docs(cls) if NEEDS_HELP else {}
236
+ kwargs = {}
237
+ for field in fields(cls):
238
+ # Get the set of possible types for the field
239
+ type_hints: set[TypeHint] = get_type_hints(field.type)
240
+
241
+ # If the field is a dataclass, we can use the model_validate_json
242
+ generator = (th for th in type_hints if is_dataclass(th))
243
+ dataclass_cls = next(generator, None)
244
+
245
+ # Get the default value of the field
246
+ if field.default is not MISSING:
247
+ default = field.default
248
+ # Handle pydantic.Field defaults
249
+ if isinstance(default, FieldInfo):
250
+ default = (
251
+ default.default
252
+ if default.default_factory is None
253
+ else default.default_factory()
254
+ )
255
+ elif field.default_factory is not MISSING:
256
+ default = field.default_factory()
257
+
258
+ # Get the help text for the field
259
+ name = field.name
260
+ help = cls_docs.get(name, "").strip()
261
+ # Escape % for argparse
262
+ help = help.replace("%", "%%")
263
+
264
+ # Initialise the kwargs dictionary for the field
265
+ kwargs[name] = {"default": default, "help": help}
266
+
267
+ # Set other kwargs based on the type hints
268
+ json_tip = (
269
+ "Should either be a valid JSON string or JSON keys passed individually."
270
+ )
271
+ if dataclass_cls is not None:
272
+
273
+ def parse_dataclass(val: str, cls=dataclass_cls) -> Any:
274
+ try:
275
+ return TypeAdapter(cls).validate_json(val)
276
+ except ValidationError as e:
277
+ raise argparse.ArgumentTypeError(repr(e)) from e
278
+
279
+ kwargs[name]["type"] = parse_dataclass
280
+ kwargs[name]["help"] += f"\n\n{json_tip}"
281
+ elif contains_type(type_hints, bool):
282
+ # Creates --no-<name> and --<name> flags
283
+ kwargs[name]["action"] = argparse.BooleanOptionalAction
284
+ elif contains_type(type_hints, Literal):
285
+ kwargs[name].update(literal_to_kwargs(type_hints))
286
+ elif contains_type(type_hints, tuple):
287
+ kwargs[name].update(collection_to_kwargs(type_hints, tuple))
288
+ elif contains_type(type_hints, list):
289
+ kwargs[name].update(collection_to_kwargs(type_hints, list))
290
+ elif contains_type(type_hints, set):
291
+ kwargs[name].update(collection_to_kwargs(type_hints, set))
292
+ elif contains_type(type_hints, int):
293
+ kwargs[name]["type"] = int
294
+ # Special case for large integers
295
+ human_readable_ints = {
296
+ "max_model_len",
297
+ "max_num_batched_tokens",
298
+ "kv_cache_memory_bytes",
299
+ }
300
+ if name in human_readable_ints:
301
+ kwargs[name]["type"] = human_readable_int
302
+ kwargs[name]["help"] += f"\n\n{human_readable_int.__doc__}"
303
+ elif contains_type(type_hints, float):
304
+ kwargs[name]["type"] = float
305
+ elif contains_type(type_hints, dict) and (
306
+ contains_type(type_hints, str)
307
+ or any(is_not_builtin(th) for th in type_hints)
308
+ ):
309
+ kwargs[name]["type"] = union_dict_and_str
310
+ elif contains_type(type_hints, dict):
311
+ kwargs[name]["type"] = parse_type(json.loads)
312
+ kwargs[name]["help"] += f"\n\n{json_tip}"
313
+ elif contains_type(type_hints, str) or any(
314
+ is_not_builtin(th) for th in type_hints
315
+ ):
316
+ kwargs[name]["type"] = str
317
+ else:
318
+ raise ValueError(f"Unsupported type {type_hints} for argument {name}.")
319
+
320
+ # If the type hint was a sequence of literals, use the helper function
321
+ # to update the type and choices
322
+ if get_origin(kwargs[name].get("type")) is Literal:
323
+ kwargs[name].update(literal_to_kwargs({kwargs[name]["type"]}))
324
+
325
+ # If None is in type_hints, make the argument optional.
326
+ # But not if it's a bool, argparse will handle this better.
327
+ if type(None) in type_hints and not contains_type(type_hints, bool):
328
+ kwargs[name]["type"] = optional_type(kwargs[name]["type"])
329
+ if kwargs[name].get("choices"):
330
+ kwargs[name]["choices"].append("None")
331
+ return kwargs
332
+
333
+
334
+ def get_kwargs(cls: ConfigType) -> dict[str, dict[str, Any]]:
335
+ """Return argparse kwargs for the given Config dataclass.
336
+
337
+ If `--help` or `mkdocs` are not present in the command line command, the
338
+ attribute documentation will not be included in the help output.
339
+
340
+ The heavy computation is cached via functools.lru_cache, and a deep copy
341
+ is returned so callers can mutate the dictionary without affecting the
342
+ cached version.
343
+ """
344
+ return copy.deepcopy(_compute_kwargs(cls))
345
+
346
+
347
+ @dataclass
348
+ class EngineArgs:
349
+ """Arguments for vLLM engine."""
350
+
351
+ model: str = ModelConfig.model
352
+ served_model_name: str | list[str] | None = ModelConfig.served_model_name
353
+ tokenizer: str | None = ModelConfig.tokenizer
354
+ hf_config_path: str | None = ModelConfig.hf_config_path
355
+ runner: RunnerOption = ModelConfig.runner
356
+ convert: ConvertOption = ModelConfig.convert
357
+ task: TaskOption | None = ModelConfig.task
358
+ skip_tokenizer_init: bool = ModelConfig.skip_tokenizer_init
359
+ enable_prompt_embeds: bool = ModelConfig.enable_prompt_embeds
360
+ tokenizer_mode: TokenizerMode = ModelConfig.tokenizer_mode
361
+ trust_remote_code: bool = ModelConfig.trust_remote_code
362
+ allowed_local_media_path: str = ModelConfig.allowed_local_media_path
363
+ allowed_media_domains: list[str] | None = ModelConfig.allowed_media_domains
364
+ download_dir: str | None = LoadConfig.download_dir
365
+ safetensors_load_strategy: str = LoadConfig.safetensors_load_strategy
366
+ load_format: str | LoadFormats = LoadConfig.load_format
367
+ config_format: str = ModelConfig.config_format
368
+ dtype: ModelDType = ModelConfig.dtype
369
+ kv_cache_dtype: CacheDType = CacheConfig.cache_dtype
370
+ seed: int | None = ModelConfig.seed
371
+ max_model_len: int | None = ModelConfig.max_model_len
372
+ cuda_graph_sizes: list[int] | None = CompilationConfig.cudagraph_capture_sizes
373
+ cudagraph_capture_sizes: list[int] | None = (
374
+ CompilationConfig.cudagraph_capture_sizes
375
+ )
376
+ max_cudagraph_capture_size: int | None = get_field(
377
+ CompilationConfig, "max_cudagraph_capture_size"
378
+ )
379
+ # Note: Specifying a custom executor backend by passing a class
380
+ # is intended for expert use only. The API may change without
381
+ # notice.
382
+ distributed_executor_backend: (
383
+ str | DistributedExecutorBackend | type[Executor] | None
384
+ ) = ParallelConfig.distributed_executor_backend
385
+ # number of P/D disaggregation (or other disaggregation) workers
386
+ pipeline_parallel_size: int = ParallelConfig.pipeline_parallel_size
387
+ master_addr: str = ParallelConfig.master_addr
388
+ master_port: int = ParallelConfig.master_port
389
+ nnodes: int = ParallelConfig.nnodes
390
+ node_rank: int = ParallelConfig.node_rank
391
+ tensor_parallel_size: int = ParallelConfig.tensor_parallel_size
392
+ decode_context_parallel_size: int = ParallelConfig.decode_context_parallel_size
393
+ dcp_kv_cache_interleave_size: int = ParallelConfig.dcp_kv_cache_interleave_size
394
+ data_parallel_size: int = ParallelConfig.data_parallel_size
395
+ data_parallel_rank: int | None = None
396
+ data_parallel_start_rank: int | None = None
397
+ data_parallel_size_local: int | None = None
398
+ data_parallel_address: str | None = None
399
+ data_parallel_rpc_port: int | None = None
400
+ data_parallel_hybrid_lb: bool = False
401
+ data_parallel_external_lb: bool = False
402
+ data_parallel_backend: str = ParallelConfig.data_parallel_backend
403
+ enable_expert_parallel: bool = ParallelConfig.enable_expert_parallel
404
+ all2all_backend: str | None = ParallelConfig.all2all_backend
405
+ enable_dbo: bool = ParallelConfig.enable_dbo
406
+ dbo_decode_token_threshold: int = ParallelConfig.dbo_decode_token_threshold
407
+ dbo_prefill_token_threshold: int = ParallelConfig.dbo_prefill_token_threshold
408
+ disable_nccl_for_dp_synchronization: bool = (
409
+ ParallelConfig.disable_nccl_for_dp_synchronization
410
+ )
411
+ eplb_config: EPLBConfig = get_field(ParallelConfig, "eplb_config")
412
+ enable_eplb: bool = ParallelConfig.enable_eplb
413
+ expert_placement_strategy: ExpertPlacementStrategy = (
414
+ ParallelConfig.expert_placement_strategy
415
+ )
416
+ _api_process_count: int = ParallelConfig._api_process_count
417
+ _api_process_rank: int = ParallelConfig._api_process_rank
418
+ num_redundant_experts: int = EPLBConfig.num_redundant_experts
419
+ eplb_window_size: int = EPLBConfig.window_size
420
+ eplb_step_interval: int = EPLBConfig.step_interval
421
+ eplb_log_balancedness: bool = EPLBConfig.log_balancedness
422
+ max_parallel_loading_workers: int | None = (
423
+ ParallelConfig.max_parallel_loading_workers
424
+ )
425
+ block_size: BlockSize | None = CacheConfig.block_size
426
+ enable_prefix_caching: bool | None = CacheConfig.enable_prefix_caching
427
+ prefix_caching_hash_algo: PrefixCachingHashAlgo = (
428
+ CacheConfig.prefix_caching_hash_algo
429
+ )
430
+ disable_sliding_window: bool = ModelConfig.disable_sliding_window
431
+ disable_cascade_attn: bool = ModelConfig.disable_cascade_attn
432
+ swap_space: float = CacheConfig.swap_space
433
+ cpu_offload_gb: float = CacheConfig.cpu_offload_gb
434
+ gpu_memory_utilization: float = CacheConfig.gpu_memory_utilization
435
+ kv_cache_memory_bytes: int | None = CacheConfig.kv_cache_memory_bytes
436
+ max_num_batched_tokens: int | None = None
437
+ max_num_partial_prefills: int = SchedulerConfig.max_num_partial_prefills
438
+ max_long_partial_prefills: int = SchedulerConfig.max_long_partial_prefills
439
+ long_prefill_token_threshold: int = SchedulerConfig.long_prefill_token_threshold
440
+ max_num_seqs: int | None = None
441
+ max_logprobs: int = ModelConfig.max_logprobs
442
+ logprobs_mode: LogprobsMode = ModelConfig.logprobs_mode
443
+ disable_log_stats: bool = False
444
+ aggregate_engine_logging: bool = False
445
+ revision: str | None = ModelConfig.revision
446
+ code_revision: str | None = ModelConfig.code_revision
447
+ hf_token: bool | str | None = ModelConfig.hf_token
448
+ hf_overrides: HfOverrides = get_field(ModelConfig, "hf_overrides")
449
+ tokenizer_revision: str | None = ModelConfig.tokenizer_revision
450
+ quantization: QuantizationMethods | None = ModelConfig.quantization
451
+ enforce_eager: bool = ModelConfig.enforce_eager
452
+ disable_custom_all_reduce: bool = ParallelConfig.disable_custom_all_reduce
453
+ limit_mm_per_prompt: dict[str, int | dict[str, int]] = get_field(
454
+ MultiModalConfig, "limit_per_prompt"
455
+ )
456
+ enable_mm_embeds: bool = MultiModalConfig.enable_mm_embeds
457
+ interleave_mm_strings: bool = MultiModalConfig.interleave_mm_strings
458
+ media_io_kwargs: dict[str, dict[str, Any]] = get_field(
459
+ MultiModalConfig, "media_io_kwargs"
460
+ )
461
+ mm_processor_kwargs: dict[str, Any] | None = MultiModalConfig.mm_processor_kwargs
462
+ disable_mm_preprocessor_cache: bool = False # DEPRECATED
463
+ mm_processor_cache_gb: float = MultiModalConfig.mm_processor_cache_gb
464
+ mm_processor_cache_type: MMCacheType | None = (
465
+ MultiModalConfig.mm_processor_cache_type
466
+ )
467
+ mm_shm_cache_max_object_size_mb: int = (
468
+ MultiModalConfig.mm_shm_cache_max_object_size_mb
469
+ )
470
+ mm_encoder_tp_mode: MMEncoderTPMode = MultiModalConfig.mm_encoder_tp_mode
471
+ mm_encoder_attn_backend: AttentionBackendEnum | str | None = (
472
+ MultiModalConfig.mm_encoder_attn_backend
473
+ )
474
+ io_processor_plugin: str | None = None
475
+ skip_mm_profiling: bool = MultiModalConfig.skip_mm_profiling
476
+ video_pruning_rate: float = MultiModalConfig.video_pruning_rate
477
+ # LoRA fields
478
+ enable_lora: bool = False
479
+ max_loras: int = LoRAConfig.max_loras
480
+ max_lora_rank: int = LoRAConfig.max_lora_rank
481
+ default_mm_loras: dict[str, str] | None = LoRAConfig.default_mm_loras
482
+ fully_sharded_loras: bool = LoRAConfig.fully_sharded_loras
483
+ max_cpu_loras: int | None = LoRAConfig.max_cpu_loras
484
+ lora_dtype: str | torch.dtype | None = LoRAConfig.lora_dtype
485
+ lora_extra_vocab_size: int = LoRAConfig.lora_extra_vocab_size
486
+
487
+ ray_workers_use_nsight: bool = ParallelConfig.ray_workers_use_nsight
488
+ num_gpu_blocks_override: int | None = CacheConfig.num_gpu_blocks_override
489
+ num_lookahead_slots: int = SchedulerConfig.num_lookahead_slots
490
+ model_loader_extra_config: dict = get_field(LoadConfig, "model_loader_extra_config")
491
+ ignore_patterns: str | list[str] = get_field(LoadConfig, "ignore_patterns")
492
+
493
+ enable_chunked_prefill: bool | None = None
494
+ disable_chunked_mm_input: bool = SchedulerConfig.disable_chunked_mm_input
495
+
496
+ disable_hybrid_kv_cache_manager: bool = (
497
+ SchedulerConfig.disable_hybrid_kv_cache_manager
498
+ )
499
+
500
+ structured_outputs_config: StructuredOutputsConfig = get_field(
501
+ VllmConfig, "structured_outputs_config"
502
+ )
503
+ reasoning_parser: str = StructuredOutputsConfig.reasoning_parser
504
+ reasoning_parser_plugin: str | None = None
505
+ # Deprecated guided decoding fields
506
+ guided_decoding_backend: str | None = None
507
+ guided_decoding_disable_fallback: bool | None = None
508
+ guided_decoding_disable_any_whitespace: bool | None = None
509
+ guided_decoding_disable_additional_properties: bool | None = None
510
+
511
+ logits_processor_pattern: str | None = ModelConfig.logits_processor_pattern
512
+
513
+ speculative_config: dict[str, Any] | None = None
514
+
515
+ show_hidden_metrics_for_version: str | None = (
516
+ ObservabilityConfig.show_hidden_metrics_for_version
517
+ )
518
+ otlp_traces_endpoint: str | None = ObservabilityConfig.otlp_traces_endpoint
519
+ collect_detailed_traces: list[DetailedTraceModules] | None = (
520
+ ObservabilityConfig.collect_detailed_traces
521
+ )
522
+ scheduling_policy: SchedulerPolicy = SchedulerConfig.policy
523
+ scheduler_cls: str | type[object] | None = SchedulerConfig.scheduler_cls
524
+
525
+ pooler_config: PoolerConfig | None = ModelConfig.pooler_config
526
+ override_pooler_config: dict | PoolerConfig | None = (
527
+ ModelConfig.override_pooler_config
528
+ )
529
+ compilation_config: CompilationConfig = get_field(VllmConfig, "compilation_config")
530
+ worker_cls: str = ParallelConfig.worker_cls
531
+ worker_extension_cls: str = ParallelConfig.worker_extension_cls
532
+
533
+ kv_transfer_config: KVTransferConfig | None = None
534
+ kv_events_config: KVEventsConfig | None = None
535
+
536
+ ec_transfer_config: ECTransferConfig | None = None
537
+
538
+ generation_config: str = ModelConfig.generation_config
539
+ enable_sleep_mode: bool = ModelConfig.enable_sleep_mode
540
+ override_generation_config: dict[str, Any] = get_field(
541
+ ModelConfig, "override_generation_config"
542
+ )
543
+ model_impl: str = ModelConfig.model_impl
544
+ override_attention_dtype: str = ModelConfig.override_attention_dtype
545
+
546
+ calculate_kv_scales: bool = CacheConfig.calculate_kv_scales
547
+ mamba_cache_dtype: MambaDType = CacheConfig.mamba_cache_dtype
548
+ mamba_ssm_cache_dtype: MambaDType = CacheConfig.mamba_ssm_cache_dtype
549
+ mamba_block_size: int | None = get_field(CacheConfig, "mamba_block_size")
550
+
551
+ additional_config: dict[str, Any] = get_field(VllmConfig, "additional_config")
552
+
553
+ use_tqdm_on_load: bool = LoadConfig.use_tqdm_on_load
554
+ pt_load_map_location: str = LoadConfig.pt_load_map_location
555
+
556
+ # DEPRECATED
557
+ enable_multimodal_encoder_data_parallel: bool = False
558
+
559
+ logits_processors: list[str | type[LogitsProcessor]] | None = (
560
+ ModelConfig.logits_processors
561
+ )
562
+ """Custom logitproc types"""
563
+
564
+ async_scheduling: bool | None = SchedulerConfig.async_scheduling
565
+
566
+ stream_interval: int = SchedulerConfig.stream_interval
567
+
568
+ kv_sharing_fast_prefill: bool = CacheConfig.kv_sharing_fast_prefill
569
+
570
+ kv_offloading_size: float | None = CacheConfig.kv_offloading_size
571
+ kv_offloading_backend: KVOffloadingBackend | None = (
572
+ CacheConfig.kv_offloading_backend
573
+ )
574
+ tokens_only: bool = False
575
+
576
+ def __post_init__(self):
577
+ # support `EngineArgs(compilation_config={...})`
578
+ # without having to manually construct a
579
+ # CompilationConfig object
580
+ if isinstance(self.compilation_config, dict):
581
+ self.compilation_config = CompilationConfig(**self.compilation_config)
582
+ if isinstance(self.eplb_config, dict):
583
+ self.eplb_config = EPLBConfig(**self.eplb_config)
584
+ # Setup plugins
585
+ from vllm.plugins import load_general_plugins
586
+
587
+ load_general_plugins()
588
+ # when use hf offline,replace model id to local model path
589
+ if huggingface_hub.constants.HF_HUB_OFFLINE:
590
+ model_id = self.model
591
+ self.model = get_model_path(self.model, self.revision)
592
+ logger.info(
593
+ "HF_HUB_OFFLINE is True, replace model_id [%s] to model_path [%s]",
594
+ model_id,
595
+ self.model,
596
+ )
597
+
598
+ @staticmethod
599
+ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
600
+ """Shared CLI arguments for vLLM engine."""
601
+
602
+ # Model arguments
603
+ model_kwargs = get_kwargs(ModelConfig)
604
+ model_group = parser.add_argument_group(
605
+ title="ModelConfig",
606
+ description=ModelConfig.__doc__,
607
+ )
608
+ if not ("serve" in sys.argv[1:] and "--help" in sys.argv[1:]):
609
+ model_group.add_argument("--model", **model_kwargs["model"])
610
+ model_group.add_argument("--runner", **model_kwargs["runner"])
611
+ model_group.add_argument("--convert", **model_kwargs["convert"])
612
+ model_group.add_argument("--task", **model_kwargs["task"], deprecated=True)
613
+ model_group.add_argument("--tokenizer", **model_kwargs["tokenizer"])
614
+ model_group.add_argument("--tokenizer-mode", **model_kwargs["tokenizer_mode"])
615
+ model_group.add_argument(
616
+ "--trust-remote-code", **model_kwargs["trust_remote_code"]
617
+ )
618
+ model_group.add_argument("--dtype", **model_kwargs["dtype"])
619
+ model_group.add_argument("--seed", **model_kwargs["seed"])
620
+ model_group.add_argument("--hf-config-path", **model_kwargs["hf_config_path"])
621
+ model_group.add_argument(
622
+ "--allowed-local-media-path", **model_kwargs["allowed_local_media_path"]
623
+ )
624
+ model_group.add_argument(
625
+ "--allowed-media-domains", **model_kwargs["allowed_media_domains"]
626
+ )
627
+ model_group.add_argument("--revision", **model_kwargs["revision"])
628
+ model_group.add_argument("--code-revision", **model_kwargs["code_revision"])
629
+ model_group.add_argument(
630
+ "--tokenizer-revision", **model_kwargs["tokenizer_revision"]
631
+ )
632
+ model_group.add_argument("--max-model-len", **model_kwargs["max_model_len"])
633
+ model_group.add_argument("--quantization", "-q", **model_kwargs["quantization"])
634
+ model_group.add_argument("--enforce-eager", **model_kwargs["enforce_eager"])
635
+ model_group.add_argument("--max-logprobs", **model_kwargs["max_logprobs"])
636
+ model_group.add_argument("--logprobs-mode", **model_kwargs["logprobs_mode"])
637
+ model_group.add_argument(
638
+ "--disable-sliding-window", **model_kwargs["disable_sliding_window"]
639
+ )
640
+ model_group.add_argument(
641
+ "--disable-cascade-attn", **model_kwargs["disable_cascade_attn"]
642
+ )
643
+ model_group.add_argument(
644
+ "--skip-tokenizer-init", **model_kwargs["skip_tokenizer_init"]
645
+ )
646
+ model_group.add_argument(
647
+ "--enable-prompt-embeds", **model_kwargs["enable_prompt_embeds"]
648
+ )
649
+ model_group.add_argument(
650
+ "--served-model-name", **model_kwargs["served_model_name"]
651
+ )
652
+ model_group.add_argument("--config-format", **model_kwargs["config_format"])
653
+ # This one is a special case because it can bool
654
+ # or str. TODO: Handle this in get_kwargs
655
+ model_group.add_argument(
656
+ "--hf-token",
657
+ type=str,
658
+ nargs="?",
659
+ const=True,
660
+ default=model_kwargs["hf_token"]["default"],
661
+ help=model_kwargs["hf_token"]["help"],
662
+ )
663
+ model_group.add_argument("--hf-overrides", **model_kwargs["hf_overrides"])
664
+ model_group.add_argument("--pooler-config", **model_kwargs["pooler_config"])
665
+ model_group.add_argument(
666
+ "--override-pooler-config",
667
+ **model_kwargs["override_pooler_config"],
668
+ deprecated=True,
669
+ )
670
+ model_group.add_argument(
671
+ "--logits-processor-pattern", **model_kwargs["logits_processor_pattern"]
672
+ )
673
+ model_group.add_argument(
674
+ "--generation-config", **model_kwargs["generation_config"]
675
+ )
676
+ model_group.add_argument(
677
+ "--override-generation-config", **model_kwargs["override_generation_config"]
678
+ )
679
+ model_group.add_argument(
680
+ "--enable-sleep-mode", **model_kwargs["enable_sleep_mode"]
681
+ )
682
+ model_group.add_argument("--model-impl", **model_kwargs["model_impl"])
683
+ model_group.add_argument(
684
+ "--override-attention-dtype", **model_kwargs["override_attention_dtype"]
685
+ )
686
+ model_group.add_argument(
687
+ "--logits-processors", **model_kwargs["logits_processors"]
688
+ )
689
+ model_group.add_argument(
690
+ "--io-processor-plugin", **model_kwargs["io_processor_plugin"]
691
+ )
692
+
693
+ # Model loading arguments
694
+ load_kwargs = get_kwargs(LoadConfig)
695
+ load_group = parser.add_argument_group(
696
+ title="LoadConfig",
697
+ description=LoadConfig.__doc__,
698
+ )
699
+ load_group.add_argument("--load-format", **load_kwargs["load_format"])
700
+ load_group.add_argument("--download-dir", **load_kwargs["download_dir"])
701
+ load_group.add_argument(
702
+ "--safetensors-load-strategy", **load_kwargs["safetensors_load_strategy"]
703
+ )
704
+ load_group.add_argument(
705
+ "--model-loader-extra-config", **load_kwargs["model_loader_extra_config"]
706
+ )
707
+ load_group.add_argument("--ignore-patterns", **load_kwargs["ignore_patterns"])
708
+ load_group.add_argument("--use-tqdm-on-load", **load_kwargs["use_tqdm_on_load"])
709
+ load_group.add_argument(
710
+ "--pt-load-map-location", **load_kwargs["pt_load_map_location"]
711
+ )
712
+
713
+ # Structured outputs arguments
714
+ structured_outputs_kwargs = get_kwargs(StructuredOutputsConfig)
715
+ structured_outputs_group = parser.add_argument_group(
716
+ title="StructuredOutputsConfig",
717
+ description=StructuredOutputsConfig.__doc__,
718
+ )
719
+ structured_outputs_group.add_argument(
720
+ "--reasoning-parser",
721
+ # Choices need to be validated after parsing to include plugins
722
+ **structured_outputs_kwargs["reasoning_parser"],
723
+ )
724
+ structured_outputs_group.add_argument(
725
+ "--reasoning-parser-plugin",
726
+ **structured_outputs_kwargs["reasoning_parser_plugin"],
727
+ )
728
+ # Deprecated guided decoding arguments
729
+ for arg, type in [
730
+ ("--guided-decoding-backend", str),
731
+ ("--guided-decoding-disable-fallback", bool),
732
+ ("--guided-decoding-disable-any-whitespace", bool),
733
+ ("--guided-decoding-disable-additional-properties", bool),
734
+ ]:
735
+ structured_outputs_group.add_argument(
736
+ arg,
737
+ type=type,
738
+ help=(f"[DEPRECATED] {arg} will be removed in v0.12.0."),
739
+ deprecated=True,
740
+ )
741
+
742
+ # Parallel arguments
743
+ parallel_kwargs = get_kwargs(ParallelConfig)
744
+ parallel_group = parser.add_argument_group(
745
+ title="ParallelConfig",
746
+ description=ParallelConfig.__doc__,
747
+ )
748
+ parallel_group.add_argument(
749
+ "--distributed-executor-backend",
750
+ **parallel_kwargs["distributed_executor_backend"],
751
+ )
752
+ parallel_group.add_argument(
753
+ "--pipeline-parallel-size",
754
+ "-pp",
755
+ **parallel_kwargs["pipeline_parallel_size"],
756
+ )
757
+ parallel_group.add_argument("--master-addr", **parallel_kwargs["master_addr"])
758
+ parallel_group.add_argument("--master-port", **parallel_kwargs["master_port"])
759
+ parallel_group.add_argument("--nnodes", "-n", **parallel_kwargs["nnodes"])
760
+ parallel_group.add_argument("--node-rank", "-r", **parallel_kwargs["node_rank"])
761
+ parallel_group.add_argument(
762
+ "--tensor-parallel-size", "-tp", **parallel_kwargs["tensor_parallel_size"]
763
+ )
764
+ parallel_group.add_argument(
765
+ "--decode-context-parallel-size",
766
+ "-dcp",
767
+ **parallel_kwargs["decode_context_parallel_size"],
768
+ )
769
+ parallel_group.add_argument(
770
+ "--dcp-kv-cache-interleave-size",
771
+ **parallel_kwargs["dcp_kv_cache_interleave_size"],
772
+ )
773
+ parallel_group.add_argument(
774
+ "--data-parallel-size", "-dp", **parallel_kwargs["data_parallel_size"]
775
+ )
776
+ parallel_group.add_argument(
777
+ "--data-parallel-rank",
778
+ "-dpn",
779
+ type=int,
780
+ help="Data parallel rank of this instance. "
781
+ "When set, enables external load balancer mode.",
782
+ )
783
+ parallel_group.add_argument(
784
+ "--data-parallel-start-rank",
785
+ "-dpr",
786
+ type=int,
787
+ help="Starting data parallel rank for secondary nodes.",
788
+ )
789
+ parallel_group.add_argument(
790
+ "--data-parallel-size-local",
791
+ "-dpl",
792
+ type=int,
793
+ help="Number of data parallel replicas to run on this node.",
794
+ )
795
+ parallel_group.add_argument(
796
+ "--data-parallel-address",
797
+ "-dpa",
798
+ type=str,
799
+ help="Address of data parallel cluster head-node.",
800
+ )
801
+ parallel_group.add_argument(
802
+ "--data-parallel-rpc-port",
803
+ "-dpp",
804
+ type=int,
805
+ help="Port for data parallel RPC communication.",
806
+ )
807
+ parallel_group.add_argument(
808
+ "--data-parallel-backend",
809
+ "-dpb",
810
+ type=str,
811
+ default="mp",
812
+ help='Backend for data parallel, either "mp" or "ray".',
813
+ )
814
+ parallel_group.add_argument(
815
+ "--data-parallel-hybrid-lb",
816
+ "-dph",
817
+ **parallel_kwargs["data_parallel_hybrid_lb"],
818
+ )
819
+ parallel_group.add_argument(
820
+ "--data-parallel-external-lb",
821
+ "-dpe",
822
+ **parallel_kwargs["data_parallel_external_lb"],
823
+ )
824
+ parallel_group.add_argument(
825
+ "--enable-expert-parallel", **parallel_kwargs["enable_expert_parallel"]
826
+ )
827
+ parallel_group.add_argument(
828
+ "--all2all-backend", **parallel_kwargs["all2all_backend"]
829
+ )
830
+ parallel_group.add_argument("--enable-dbo", **parallel_kwargs["enable_dbo"])
831
+ parallel_group.add_argument(
832
+ "--dbo-decode-token-threshold",
833
+ **parallel_kwargs["dbo_decode_token_threshold"],
834
+ )
835
+ parallel_group.add_argument(
836
+ "--dbo-prefill-token-threshold",
837
+ **parallel_kwargs["dbo_prefill_token_threshold"],
838
+ )
839
+ parallel_group.add_argument(
840
+ "--disable-nccl-for-dp-synchronization",
841
+ **parallel_kwargs["disable_nccl_for_dp_synchronization"],
842
+ )
843
+ parallel_group.add_argument("--enable-eplb", **parallel_kwargs["enable_eplb"])
844
+ parallel_group.add_argument("--eplb-config", **parallel_kwargs["eplb_config"])
845
+ parallel_group.add_argument(
846
+ "--expert-placement-strategy",
847
+ **parallel_kwargs["expert_placement_strategy"],
848
+ )
849
+ parallel_group.add_argument(
850
+ "--num-redundant-experts",
851
+ type=int,
852
+ help="[DEPRECATED] --num-redundant-experts will be removed in v0.12.0.",
853
+ deprecated=True,
854
+ )
855
+ parallel_group.add_argument(
856
+ "--eplb-window-size",
857
+ type=int,
858
+ help="[DEPRECATED] --eplb-window-size will be removed in v0.12.0.",
859
+ deprecated=True,
860
+ )
861
+ parallel_group.add_argument(
862
+ "--eplb-step-interval",
863
+ type=int,
864
+ help="[DEPRECATED] --eplb-step-interval will be removed in v0.12.0.",
865
+ deprecated=True,
866
+ )
867
+ parallel_group.add_argument(
868
+ "--eplb-log-balancedness",
869
+ action=argparse.BooleanOptionalAction,
870
+ help="[DEPRECATED] --eplb-log-balancedness will be removed in v0.12.0.",
871
+ deprecated=True,
872
+ )
873
+
874
+ parallel_group.add_argument(
875
+ "--max-parallel-loading-workers",
876
+ **parallel_kwargs["max_parallel_loading_workers"],
877
+ )
878
+ parallel_group.add_argument(
879
+ "--ray-workers-use-nsight", **parallel_kwargs["ray_workers_use_nsight"]
880
+ )
881
+ parallel_group.add_argument(
882
+ "--disable-custom-all-reduce",
883
+ **parallel_kwargs["disable_custom_all_reduce"],
884
+ )
885
+ parallel_group.add_argument("--worker-cls", **parallel_kwargs["worker_cls"])
886
+ parallel_group.add_argument(
887
+ "--worker-extension-cls", **parallel_kwargs["worker_extension_cls"]
888
+ )
889
+ parallel_group.add_argument(
890
+ "--enable-multimodal-encoder-data-parallel",
891
+ action="store_true",
892
+ deprecated=True,
893
+ )
894
+
895
+ # KV cache arguments
896
+ cache_kwargs = get_kwargs(CacheConfig)
897
+ cache_group = parser.add_argument_group(
898
+ title="CacheConfig",
899
+ description=CacheConfig.__doc__,
900
+ )
901
+ cache_group.add_argument("--block-size", **cache_kwargs["block_size"])
902
+ cache_group.add_argument(
903
+ "--gpu-memory-utilization", **cache_kwargs["gpu_memory_utilization"]
904
+ )
905
+ cache_group.add_argument(
906
+ "--kv-cache-memory-bytes", **cache_kwargs["kv_cache_memory_bytes"]
907
+ )
908
+ cache_group.add_argument("--swap-space", **cache_kwargs["swap_space"])
909
+ cache_group.add_argument("--kv-cache-dtype", **cache_kwargs["cache_dtype"])
910
+ cache_group.add_argument(
911
+ "--num-gpu-blocks-override", **cache_kwargs["num_gpu_blocks_override"]
912
+ )
913
+ cache_group.add_argument(
914
+ "--enable-prefix-caching", **cache_kwargs["enable_prefix_caching"]
915
+ )
916
+ cache_group.add_argument(
917
+ "--prefix-caching-hash-algo", **cache_kwargs["prefix_caching_hash_algo"]
918
+ )
919
+ cache_group.add_argument("--cpu-offload-gb", **cache_kwargs["cpu_offload_gb"])
920
+ cache_group.add_argument(
921
+ "--calculate-kv-scales", **cache_kwargs["calculate_kv_scales"]
922
+ )
923
+ cache_group.add_argument(
924
+ "--kv-sharing-fast-prefill", **cache_kwargs["kv_sharing_fast_prefill"]
925
+ )
926
+ cache_group.add_argument(
927
+ "--mamba-cache-dtype", **cache_kwargs["mamba_cache_dtype"]
928
+ )
929
+ cache_group.add_argument(
930
+ "--mamba-ssm-cache-dtype", **cache_kwargs["mamba_ssm_cache_dtype"]
931
+ )
932
+ cache_group.add_argument(
933
+ "--mamba-block-size", **cache_kwargs["mamba_block_size"]
934
+ )
935
+ cache_group.add_argument(
936
+ "--kv-offloading-size", **cache_kwargs["kv_offloading_size"]
937
+ )
938
+ cache_group.add_argument(
939
+ "--kv-offloading-backend", **cache_kwargs["kv_offloading_backend"]
940
+ )
941
+
942
+ # Multimodal related configs
943
+ multimodal_kwargs = get_kwargs(MultiModalConfig)
944
+ multimodal_group = parser.add_argument_group(
945
+ title="MultiModalConfig",
946
+ description=MultiModalConfig.__doc__,
947
+ )
948
+ multimodal_group.add_argument(
949
+ "--limit-mm-per-prompt", **multimodal_kwargs["limit_per_prompt"]
950
+ )
951
+ multimodal_group.add_argument(
952
+ "--enable-mm-embeds", **multimodal_kwargs["enable_mm_embeds"]
953
+ )
954
+ multimodal_group.add_argument(
955
+ "--media-io-kwargs", **multimodal_kwargs["media_io_kwargs"]
956
+ )
957
+ multimodal_group.add_argument(
958
+ "--mm-processor-kwargs", **multimodal_kwargs["mm_processor_kwargs"]
959
+ )
960
+ multimodal_group.add_argument(
961
+ "--mm-processor-cache-gb", **multimodal_kwargs["mm_processor_cache_gb"]
962
+ )
963
+ multimodal_group.add_argument(
964
+ "--disable-mm-preprocessor-cache", action="store_true", deprecated=True
965
+ )
966
+ multimodal_group.add_argument(
967
+ "--mm-processor-cache-type", **multimodal_kwargs["mm_processor_cache_type"]
968
+ )
969
+ multimodal_group.add_argument(
970
+ "--mm-shm-cache-max-object-size-mb",
971
+ **multimodal_kwargs["mm_shm_cache_max_object_size_mb"],
972
+ )
973
+ multimodal_group.add_argument(
974
+ "--mm-encoder-tp-mode", **multimodal_kwargs["mm_encoder_tp_mode"]
975
+ )
976
+ multimodal_group.add_argument(
977
+ "--mm-encoder-attn-backend",
978
+ **multimodal_kwargs["mm_encoder_attn_backend"],
979
+ )
980
+ multimodal_group.add_argument(
981
+ "--interleave-mm-strings", **multimodal_kwargs["interleave_mm_strings"]
982
+ )
983
+ multimodal_group.add_argument(
984
+ "--skip-mm-profiling", **multimodal_kwargs["skip_mm_profiling"]
985
+ )
986
+
987
+ multimodal_group.add_argument(
988
+ "--video-pruning-rate", **multimodal_kwargs["video_pruning_rate"]
989
+ )
990
+
991
+ # LoRA related configs
992
+ lora_kwargs = get_kwargs(LoRAConfig)
993
+ lora_group = parser.add_argument_group(
994
+ title="LoRAConfig",
995
+ description=LoRAConfig.__doc__,
996
+ )
997
+ lora_group.add_argument(
998
+ "--enable-lora",
999
+ action=argparse.BooleanOptionalAction,
1000
+ help="If True, enable handling of LoRA adapters.",
1001
+ )
1002
+ lora_group.add_argument("--max-loras", **lora_kwargs["max_loras"])
1003
+ lora_group.add_argument("--max-lora-rank", **lora_kwargs["max_lora_rank"])
1004
+ lora_group.add_argument(
1005
+ "--lora-extra-vocab-size", **lora_kwargs["lora_extra_vocab_size"]
1006
+ )
1007
+ lora_group.add_argument(
1008
+ "--lora-dtype",
1009
+ **lora_kwargs["lora_dtype"],
1010
+ )
1011
+ lora_group.add_argument("--max-cpu-loras", **lora_kwargs["max_cpu_loras"])
1012
+ lora_group.add_argument(
1013
+ "--fully-sharded-loras", **lora_kwargs["fully_sharded_loras"]
1014
+ )
1015
+ lora_group.add_argument("--default-mm-loras", **lora_kwargs["default_mm_loras"])
1016
+
1017
+ # Observability arguments
1018
+ observability_kwargs = get_kwargs(ObservabilityConfig)
1019
+ observability_group = parser.add_argument_group(
1020
+ title="ObservabilityConfig",
1021
+ description=ObservabilityConfig.__doc__,
1022
+ )
1023
+ observability_group.add_argument(
1024
+ "--show-hidden-metrics-for-version",
1025
+ **observability_kwargs["show_hidden_metrics_for_version"],
1026
+ )
1027
+ observability_group.add_argument(
1028
+ "--otlp-traces-endpoint", **observability_kwargs["otlp_traces_endpoint"]
1029
+ )
1030
+ # TODO: generalise this special case
1031
+ choices = observability_kwargs["collect_detailed_traces"]["choices"]
1032
+ metavar = f"{{{','.join(choices)}}}"
1033
+ observability_kwargs["collect_detailed_traces"]["metavar"] = metavar
1034
+ observability_kwargs["collect_detailed_traces"]["choices"] += [
1035
+ ",".join(p) for p in permutations(get_args(DetailedTraceModules), r=2)
1036
+ ]
1037
+ observability_group.add_argument(
1038
+ "--collect-detailed-traces",
1039
+ **observability_kwargs["collect_detailed_traces"],
1040
+ )
1041
+
1042
+ # Scheduler arguments
1043
+ scheduler_kwargs = get_kwargs(SchedulerConfig)
1044
+ scheduler_group = parser.add_argument_group(
1045
+ title="SchedulerConfig",
1046
+ description=SchedulerConfig.__doc__,
1047
+ )
1048
+ scheduler_group.add_argument(
1049
+ "--max-num-batched-tokens",
1050
+ **{
1051
+ **scheduler_kwargs["max_num_batched_tokens"],
1052
+ "default": None,
1053
+ },
1054
+ )
1055
+ scheduler_group.add_argument(
1056
+ "--max-num-seqs",
1057
+ **{
1058
+ **scheduler_kwargs["max_num_seqs"],
1059
+ "default": None,
1060
+ },
1061
+ )
1062
+ scheduler_group.add_argument(
1063
+ "--max-num-partial-prefills", **scheduler_kwargs["max_num_partial_prefills"]
1064
+ )
1065
+ scheduler_group.add_argument(
1066
+ "--max-long-partial-prefills",
1067
+ **scheduler_kwargs["max_long_partial_prefills"],
1068
+ )
1069
+ scheduler_group.add_argument(
1070
+ "--long-prefill-token-threshold",
1071
+ **scheduler_kwargs["long_prefill_token_threshold"],
1072
+ )
1073
+ scheduler_group.add_argument(
1074
+ "--num-lookahead-slots", **scheduler_kwargs["num_lookahead_slots"]
1075
+ )
1076
+ # multi-step scheduling has been removed; corresponding arguments
1077
+ # are no longer supported.
1078
+ scheduler_group.add_argument(
1079
+ "--scheduling-policy", **scheduler_kwargs["policy"]
1080
+ )
1081
+ scheduler_group.add_argument(
1082
+ "--enable-chunked-prefill",
1083
+ **{
1084
+ **scheduler_kwargs["enable_chunked_prefill"],
1085
+ "default": None,
1086
+ },
1087
+ )
1088
+ scheduler_group.add_argument(
1089
+ "--disable-chunked-mm-input", **scheduler_kwargs["disable_chunked_mm_input"]
1090
+ )
1091
+ scheduler_group.add_argument(
1092
+ "--scheduler-cls", **scheduler_kwargs["scheduler_cls"]
1093
+ )
1094
+ scheduler_group.add_argument(
1095
+ "--disable-hybrid-kv-cache-manager",
1096
+ **scheduler_kwargs["disable_hybrid_kv_cache_manager"],
1097
+ )
1098
+ scheduler_group.add_argument(
1099
+ "--async-scheduling", **scheduler_kwargs["async_scheduling"]
1100
+ )
1101
+ scheduler_group.add_argument(
1102
+ "--stream-interval", **scheduler_kwargs["stream_interval"]
1103
+ )
1104
+
1105
+ # Compilation arguments
1106
+ compilation_kwargs = get_kwargs(CompilationConfig)
1107
+ compilation_group = parser.add_argument_group(
1108
+ title="CompilationConfig",
1109
+ description=CompilationConfig.__doc__,
1110
+ )
1111
+ compilation_group.add_argument(
1112
+ "--cudagraph-capture-sizes", **compilation_kwargs["cudagraph_capture_sizes"]
1113
+ )
1114
+ compilation_kwargs["cudagraph_capture_sizes"]["help"] = (
1115
+ "--cuda-graph-sizes is deprecated and will be removed in v0.13.0 or v1.0.0,"
1116
+ " whichever is soonest. Please use --cudagraph-capture-sizes instead."
1117
+ )
1118
+ compilation_group.add_argument(
1119
+ "--cuda-graph-sizes",
1120
+ **compilation_kwargs["cudagraph_capture_sizes"],
1121
+ deprecated=True,
1122
+ )
1123
+ compilation_group.add_argument(
1124
+ "--max-cudagraph-capture-size",
1125
+ **compilation_kwargs["max_cudagraph_capture_size"],
1126
+ )
1127
+
1128
+ # vLLM arguments
1129
+ vllm_kwargs = get_kwargs(VllmConfig)
1130
+ vllm_group = parser.add_argument_group(
1131
+ title="VllmConfig",
1132
+ description=VllmConfig.__doc__,
1133
+ )
1134
+ # We construct SpeculativeConfig using fields from other configs in
1135
+ # create_engine_config. So we set the type to a JSON string here to
1136
+ # delay the Pydantic validation that comes with SpeculativeConfig.
1137
+ vllm_kwargs["speculative_config"]["type"] = optional_type(json.loads)
1138
+ vllm_group.add_argument(
1139
+ "--speculative-config", **vllm_kwargs["speculative_config"]
1140
+ )
1141
+ vllm_group.add_argument(
1142
+ "--kv-transfer-config", **vllm_kwargs["kv_transfer_config"]
1143
+ )
1144
+ vllm_group.add_argument("--kv-events-config", **vllm_kwargs["kv_events_config"])
1145
+ vllm_group.add_argument(
1146
+ "--ec-transfer-config", **vllm_kwargs["ec_transfer_config"]
1147
+ )
1148
+ vllm_group.add_argument(
1149
+ "--compilation-config", "-O", **vllm_kwargs["compilation_config"]
1150
+ )
1151
+ vllm_group.add_argument(
1152
+ "--additional-config", **vllm_kwargs["additional_config"]
1153
+ )
1154
+ vllm_group.add_argument(
1155
+ "--structured-outputs-config", **vllm_kwargs["structured_outputs_config"]
1156
+ )
1157
+
1158
+ # Other arguments
1159
+ parser.add_argument(
1160
+ "--disable-log-stats",
1161
+ action="store_true",
1162
+ help="Disable logging statistics.",
1163
+ )
1164
+
1165
+ parser.add_argument(
1166
+ "--aggregate-engine-logging",
1167
+ action="store_true",
1168
+ help="Log aggregate rather than per-engine statistics "
1169
+ "when using data parallelism.",
1170
+ )
1171
+ return parser
1172
+
1173
+ @classmethod
1174
+ def from_cli_args(cls, args: argparse.Namespace):
1175
+ # Get the list of attributes of this dataclass.
1176
+ attrs = [attr.name for attr in dataclasses.fields(cls)]
1177
+ # Set the attributes from the parsed arguments.
1178
+ engine_args = cls(
1179
+ **{attr: getattr(args, attr) for attr in attrs if hasattr(args, attr)}
1180
+ )
1181
+ return engine_args
1182
+
1183
+ def create_model_config(self) -> ModelConfig:
1184
+ # gguf file needs a specific model loader and doesn't use hf_repo
1185
+ if check_gguf_file(self.model):
1186
+ self.quantization = self.load_format = "gguf"
1187
+
1188
+ if self.disable_mm_preprocessor_cache:
1189
+ logger.warning(
1190
+ "`--disable-mm-preprocessor-cache` is deprecated "
1191
+ "and will be removed in v0.13. "
1192
+ "Please use `--mm-processor-cache-gb 0` instead.",
1193
+ )
1194
+
1195
+ self.mm_processor_cache_gb = 0
1196
+ elif envs.VLLM_MM_INPUT_CACHE_GIB != 4:
1197
+ logger.warning(
1198
+ "VLLM_MM_INPUT_CACHE_GIB` is deprecated "
1199
+ "and will be removed in v0.13. "
1200
+ "Please use `--mm-processor-cache-gb %d` instead.",
1201
+ envs.VLLM_MM_INPUT_CACHE_GIB,
1202
+ )
1203
+
1204
+ self.mm_processor_cache_gb = envs.VLLM_MM_INPUT_CACHE_GIB
1205
+
1206
+ if self.enable_multimodal_encoder_data_parallel:
1207
+ logger.warning(
1208
+ "--enable-multimodal-encoder-data-parallel` is deprecated "
1209
+ "and will be removed in v0.13. "
1210
+ "Please use `--mm-encoder-tp-mode data` instead."
1211
+ )
1212
+
1213
+ self.mm_encoder_tp_mode = "data"
1214
+
1215
+ return ModelConfig(
1216
+ model=self.model,
1217
+ hf_config_path=self.hf_config_path,
1218
+ runner=self.runner,
1219
+ convert=self.convert,
1220
+ task=self.task,
1221
+ tokenizer=self.tokenizer,
1222
+ tokenizer_mode=self.tokenizer_mode,
1223
+ trust_remote_code=self.trust_remote_code,
1224
+ allowed_local_media_path=self.allowed_local_media_path,
1225
+ allowed_media_domains=self.allowed_media_domains,
1226
+ dtype=self.dtype,
1227
+ seed=self.seed,
1228
+ revision=self.revision,
1229
+ code_revision=self.code_revision,
1230
+ hf_token=self.hf_token,
1231
+ hf_overrides=self.hf_overrides,
1232
+ tokenizer_revision=self.tokenizer_revision,
1233
+ max_model_len=self.max_model_len,
1234
+ quantization=self.quantization,
1235
+ enforce_eager=self.enforce_eager,
1236
+ max_logprobs=self.max_logprobs,
1237
+ logprobs_mode=self.logprobs_mode,
1238
+ disable_sliding_window=self.disable_sliding_window,
1239
+ disable_cascade_attn=self.disable_cascade_attn,
1240
+ skip_tokenizer_init=self.skip_tokenizer_init,
1241
+ enable_prompt_embeds=self.enable_prompt_embeds,
1242
+ served_model_name=self.served_model_name,
1243
+ limit_mm_per_prompt=self.limit_mm_per_prompt,
1244
+ enable_mm_embeds=self.enable_mm_embeds,
1245
+ interleave_mm_strings=self.interleave_mm_strings,
1246
+ media_io_kwargs=self.media_io_kwargs,
1247
+ skip_mm_profiling=self.skip_mm_profiling,
1248
+ config_format=self.config_format,
1249
+ mm_processor_kwargs=self.mm_processor_kwargs,
1250
+ mm_processor_cache_gb=self.mm_processor_cache_gb,
1251
+ mm_processor_cache_type=self.mm_processor_cache_type,
1252
+ mm_shm_cache_max_object_size_mb=self.mm_shm_cache_max_object_size_mb,
1253
+ mm_encoder_tp_mode=self.mm_encoder_tp_mode,
1254
+ mm_encoder_attn_backend=self.mm_encoder_attn_backend,
1255
+ pooler_config=self.pooler_config,
1256
+ override_pooler_config=self.override_pooler_config,
1257
+ logits_processor_pattern=self.logits_processor_pattern,
1258
+ generation_config=self.generation_config,
1259
+ override_generation_config=self.override_generation_config,
1260
+ enable_sleep_mode=self.enable_sleep_mode,
1261
+ model_impl=self.model_impl,
1262
+ override_attention_dtype=self.override_attention_dtype,
1263
+ logits_processors=self.logits_processors,
1264
+ video_pruning_rate=self.video_pruning_rate,
1265
+ io_processor_plugin=self.io_processor_plugin,
1266
+ )
1267
+
1268
+ def validate_tensorizer_args(self):
1269
+ from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
1270
+
1271
+ for key in self.model_loader_extra_config:
1272
+ if key in TensorizerConfig._fields:
1273
+ self.model_loader_extra_config["tensorizer_config"][key] = (
1274
+ self.model_loader_extra_config[key]
1275
+ )
1276
+
1277
+ def create_load_config(self) -> LoadConfig:
1278
+ if self.quantization == "bitsandbytes":
1279
+ self.load_format = "bitsandbytes"
1280
+
1281
+ if self.load_format == "tensorizer":
1282
+ if hasattr(self.model_loader_extra_config, "to_serializable"):
1283
+ self.model_loader_extra_config = (
1284
+ self.model_loader_extra_config.to_serializable()
1285
+ )
1286
+ self.model_loader_extra_config["tensorizer_config"] = {}
1287
+ self.model_loader_extra_config["tensorizer_config"]["tensorizer_dir"] = (
1288
+ self.model
1289
+ )
1290
+ self.validate_tensorizer_args()
1291
+
1292
+ return LoadConfig(
1293
+ load_format=self.load_format,
1294
+ download_dir=self.download_dir,
1295
+ safetensors_load_strategy=self.safetensors_load_strategy,
1296
+ device="cpu" if is_online_quantization(self.quantization) else None,
1297
+ model_loader_extra_config=self.model_loader_extra_config,
1298
+ ignore_patterns=self.ignore_patterns,
1299
+ use_tqdm_on_load=self.use_tqdm_on_load,
1300
+ pt_load_map_location=self.pt_load_map_location,
1301
+ )
1302
+
1303
+ def create_speculative_config(
1304
+ self,
1305
+ target_model_config: ModelConfig,
1306
+ target_parallel_config: ParallelConfig,
1307
+ ) -> SpeculativeConfig | None:
1308
+ """Initializes and returns a SpeculativeConfig object based on
1309
+ `speculative_config`.
1310
+
1311
+ This function utilizes `speculative_config` to create a
1312
+ SpeculativeConfig object. The `speculative_config` can either be
1313
+ provided as a JSON string input via CLI arguments or directly as a
1314
+ dictionary from the engine.
1315
+ """
1316
+ if self.speculative_config is None:
1317
+ return None
1318
+
1319
+ # Note(Shangming): These parameters are not obtained from the cli arg
1320
+ # '--speculative-config' and must be passed in when creating the engine
1321
+ # config.
1322
+ self.speculative_config.update(
1323
+ {
1324
+ "target_model_config": target_model_config,
1325
+ "target_parallel_config": target_parallel_config,
1326
+ }
1327
+ )
1328
+ return SpeculativeConfig(**self.speculative_config)
1329
+
1330
+ def create_engine_config(
1331
+ self,
1332
+ usage_context: UsageContext | None = None,
1333
+ headless: bool = False,
1334
+ ) -> VllmConfig:
1335
+ """
1336
+ Create the VllmConfig.
1337
+
1338
+ NOTE: If VllmConfig is incompatible, we raise an error.
1339
+ """
1340
+ current_platform.pre_register_and_update()
1341
+
1342
+ device_config = DeviceConfig(device=cast(Device, current_platform.device_type))
1343
+
1344
+ # Check if the model is a speculator and override model/tokenizer/config
1345
+ # BEFORE creating ModelConfig, so the config is created with the target model
1346
+ # Skip speculator detection for cloud storage models (eg: S3, GCS) since
1347
+ # HuggingFace cannot load configs directly from S3 URLs. S3 models can still
1348
+ # use speculators with explicit --speculative-config.
1349
+ if not is_cloud_storage(self.model):
1350
+ (self.model, self.tokenizer, self.speculative_config) = (
1351
+ maybe_override_with_speculators(
1352
+ model=self.model,
1353
+ tokenizer=self.tokenizer,
1354
+ revision=self.revision,
1355
+ trust_remote_code=self.trust_remote_code,
1356
+ vllm_speculative_config=self.speculative_config,
1357
+ )
1358
+ )
1359
+
1360
+ model_config = self.create_model_config()
1361
+ self.model = model_config.model
1362
+ self.tokenizer = model_config.tokenizer
1363
+
1364
+ self._check_feature_supported(model_config)
1365
+
1366
+ # Set default arguments for V1 Engine.
1367
+ self._set_default_args(usage_context, model_config)
1368
+ # Disable chunked prefill and prefix caching for:
1369
+ # POWER (ppc64le)/ARM/s390x/RISCV CPUs in V1
1370
+ if current_platform.is_cpu() and current_platform.get_cpu_architecture() in (
1371
+ CpuArchEnum.POWERPC,
1372
+ CpuArchEnum.S390X,
1373
+ CpuArchEnum.ARM,
1374
+ CpuArchEnum.RISCV,
1375
+ ):
1376
+ logger.info(
1377
+ "Chunked prefill is not supported for ARM and POWER, "
1378
+ "S390X and RISC-V CPUs; "
1379
+ "disabling it for V1 backend."
1380
+ )
1381
+ self.enable_chunked_prefill = False
1382
+ logger.info(
1383
+ "Prefix caching is not supported for ARM and POWER, "
1384
+ "S390X and RISC-V CPUs; "
1385
+ "disabling it for V1 backend."
1386
+ )
1387
+ self.enable_prefix_caching = False
1388
+
1389
+ assert self.enable_chunked_prefill is not None
1390
+
1391
+ sliding_window: int | None = None
1392
+ if not is_interleaved(model_config.hf_text_config):
1393
+ # Only set CacheConfig.sliding_window if the model is all sliding
1394
+ # window. Otherwise CacheConfig.sliding_window will override the
1395
+ # global layers in interleaved sliding window models.
1396
+ sliding_window = model_config.get_sliding_window()
1397
+
1398
+ # Note(hc): In the current implementation of decode context
1399
+ # parallel(DCP), tp_size needs to be divisible by dcp_size,
1400
+ # because the world size does not change by dcp, it simply
1401
+ # reuses the GPUs of TP group, and split one TP group into
1402
+ # tp_size//dcp_size DCP groups.
1403
+ assert self.tensor_parallel_size % self.decode_context_parallel_size == 0, (
1404
+ f"tp_size={self.tensor_parallel_size} must be divisible by"
1405
+ f"dcp_size={self.decode_context_parallel_size}."
1406
+ )
1407
+
1408
+ cache_config = CacheConfig(
1409
+ block_size=self.block_size,
1410
+ gpu_memory_utilization=self.gpu_memory_utilization,
1411
+ kv_cache_memory_bytes=self.kv_cache_memory_bytes,
1412
+ swap_space=self.swap_space,
1413
+ cache_dtype=self.kv_cache_dtype,
1414
+ is_attention_free=model_config.is_attention_free,
1415
+ num_gpu_blocks_override=self.num_gpu_blocks_override,
1416
+ sliding_window=sliding_window,
1417
+ enable_prefix_caching=self.enable_prefix_caching,
1418
+ prefix_caching_hash_algo=self.prefix_caching_hash_algo,
1419
+ cpu_offload_gb=self.cpu_offload_gb,
1420
+ calculate_kv_scales=self.calculate_kv_scales,
1421
+ kv_sharing_fast_prefill=self.kv_sharing_fast_prefill,
1422
+ mamba_cache_dtype=self.mamba_cache_dtype,
1423
+ mamba_ssm_cache_dtype=self.mamba_ssm_cache_dtype,
1424
+ mamba_block_size=self.mamba_block_size,
1425
+ kv_offloading_size=self.kv_offloading_size,
1426
+ kv_offloading_backend=self.kv_offloading_backend,
1427
+ )
1428
+
1429
+ ray_runtime_env = None
1430
+ if is_ray_initialized():
1431
+ # Ray Serve LLM calls `create_engine_config` in the context
1432
+ # of a Ray task, therefore we check is_ray_initialized()
1433
+ # as opposed to is_in_ray_actor().
1434
+ import ray
1435
+
1436
+ ray_runtime_env = ray.get_runtime_context().runtime_env
1437
+ # Avoid logging sensitive environment variables
1438
+ sanitized_env = ray_runtime_env.to_dict() if ray_runtime_env else {}
1439
+ if "env_vars" in sanitized_env:
1440
+ sanitized_env["env_vars"] = {
1441
+ k: "***" for k in sanitized_env["env_vars"]
1442
+ }
1443
+ logger.info("Using ray runtime env (env vars redacted): %s", sanitized_env)
1444
+
1445
+ # Get the current placement group if Ray is initialized and
1446
+ # we are in a Ray actor. If so, then the placement group will be
1447
+ # passed to spawned processes.
1448
+ placement_group = None
1449
+ if is_in_ray_actor():
1450
+ import ray
1451
+
1452
+ # This call initializes Ray automatically if it is not initialized,
1453
+ # but we should not do this here.
1454
+ placement_group = ray.util.get_current_placement_group()
1455
+
1456
+ assert not headless or not self.data_parallel_hybrid_lb, (
1457
+ "data_parallel_hybrid_lb is not applicable in headless mode"
1458
+ )
1459
+ assert not (self.data_parallel_hybrid_lb and self.data_parallel_external_lb), (
1460
+ "data_parallel_hybrid_lb and data_parallel_external_lb cannot both be True."
1461
+ )
1462
+ assert self.data_parallel_backend == "mp" or self.nnodes == 1, (
1463
+ "nnodes > 1 is only supported with data_parallel_backend=mp"
1464
+ )
1465
+ inferred_data_parallel_rank = 0
1466
+ if self.nnodes > 1:
1467
+ world_size = (
1468
+ self.data_parallel_size
1469
+ * self.pipeline_parallel_size
1470
+ * self.tensor_parallel_size
1471
+ )
1472
+ world_size_within_dp = (
1473
+ self.pipeline_parallel_size * self.tensor_parallel_size
1474
+ )
1475
+ local_world_size = world_size // self.nnodes
1476
+ assert world_size % self.nnodes == 0, (
1477
+ f"world_size={world_size} must be divisible by nnodes={self.nnodes}."
1478
+ )
1479
+ assert self.node_rank < self.nnodes, (
1480
+ f"node_rank={self.node_rank} must be less than nnodes={self.nnodes}."
1481
+ )
1482
+ inferred_data_parallel_rank = (
1483
+ self.node_rank * local_world_size
1484
+ ) // world_size_within_dp
1485
+ if self.data_parallel_size > 1 and self.data_parallel_external_lb:
1486
+ self.data_parallel_rank = inferred_data_parallel_rank
1487
+ logger.info(
1488
+ "Inferred data_parallel_rank %d from node_rank %d for external lb",
1489
+ self.data_parallel_rank,
1490
+ self.node_rank,
1491
+ )
1492
+ elif self.data_parallel_size_local is None:
1493
+ # Infer data parallel size local for internal dplb:
1494
+ self.data_parallel_size_local = max(
1495
+ local_world_size // world_size_within_dp, 1
1496
+ )
1497
+ data_parallel_external_lb = (
1498
+ self.data_parallel_external_lb or self.data_parallel_rank is not None
1499
+ )
1500
+ # Local DP rank = 1, use pure-external LB.
1501
+ if data_parallel_external_lb:
1502
+ assert self.data_parallel_rank is not None, (
1503
+ "data_parallel_rank or node_rank must be spefified if "
1504
+ "data_parallel_external_lb is enable."
1505
+ )
1506
+ assert self.data_parallel_size_local in (1, None), (
1507
+ "data_parallel_size_local must be 1 or None when data_parallel_rank "
1508
+ "is set"
1509
+ )
1510
+ data_parallel_size_local = 1
1511
+ # Use full external lb if we have local_size of 1.
1512
+ self.data_parallel_hybrid_lb = False
1513
+ elif self.data_parallel_size_local is not None:
1514
+ data_parallel_size_local = self.data_parallel_size_local
1515
+
1516
+ if self.data_parallel_start_rank and not headless:
1517
+ # Infer hybrid LB mode.
1518
+ self.data_parallel_hybrid_lb = True
1519
+
1520
+ if self.data_parallel_hybrid_lb and data_parallel_size_local == 1:
1521
+ # Use full external lb if we have local_size of 1.
1522
+ logger.warning(
1523
+ "data_parallel_hybrid_lb is not eligible when "
1524
+ "data_parallel_size_local = 1, autoswitch to "
1525
+ "data_parallel_external_lb."
1526
+ )
1527
+ data_parallel_external_lb = True
1528
+ self.data_parallel_hybrid_lb = False
1529
+
1530
+ if data_parallel_size_local == self.data_parallel_size:
1531
+ # Disable hybrid LB mode if set for a single node
1532
+ self.data_parallel_hybrid_lb = False
1533
+
1534
+ self.data_parallel_rank = (
1535
+ self.data_parallel_start_rank or inferred_data_parallel_rank
1536
+ )
1537
+ if self.nnodes > 1:
1538
+ logger.info(
1539
+ "Inferred data_parallel_rank %d from node_rank %d",
1540
+ self.data_parallel_rank,
1541
+ self.node_rank,
1542
+ )
1543
+ else:
1544
+ assert not self.data_parallel_hybrid_lb, (
1545
+ "data_parallel_size_local must be set to use data_parallel_hybrid_lb."
1546
+ )
1547
+
1548
+ if self.data_parallel_backend == "ray" and (
1549
+ envs.VLLM_RAY_DP_PACK_STRATEGY == "span"
1550
+ ):
1551
+ # Data parallel size defaults to 1 if DP ranks are spanning
1552
+ # multiple nodes
1553
+ data_parallel_size_local = 1
1554
+ else:
1555
+ # Otherwise local DP size defaults to global DP size if not set
1556
+ data_parallel_size_local = self.data_parallel_size
1557
+
1558
+ # DP address, used in multi-node case for torch distributed group
1559
+ # and ZMQ sockets.
1560
+ if self.data_parallel_address is None:
1561
+ if self.data_parallel_backend == "ray":
1562
+ host_ip = get_ip()
1563
+ logger.info(
1564
+ "Using host IP %s as ray-based data parallel address", host_ip
1565
+ )
1566
+ data_parallel_address = host_ip
1567
+ else:
1568
+ assert self.data_parallel_backend == "mp", (
1569
+ "data_parallel_backend can only be ray or mp, got %s",
1570
+ self.data_parallel_backend,
1571
+ )
1572
+ data_parallel_address = (
1573
+ self.master_addr or ParallelConfig.data_parallel_master_ip
1574
+ )
1575
+ else:
1576
+ data_parallel_address = self.data_parallel_address
1577
+
1578
+ # This port is only used when there are remote data parallel engines,
1579
+ # otherwise the local IPC transport is used.
1580
+ data_parallel_rpc_port = (
1581
+ self.data_parallel_rpc_port
1582
+ if (self.data_parallel_rpc_port is not None)
1583
+ else ParallelConfig.data_parallel_rpc_port
1584
+ )
1585
+
1586
+ if self.tokens_only and not model_config.skip_tokenizer_init:
1587
+ model_config.skip_tokenizer_init = True
1588
+ logger.info("Skipping tokenizer initialization for tokens-only mode.")
1589
+
1590
+ # Forward the deprecated CLI args to the EPLB config.
1591
+ if self.num_redundant_experts is not None:
1592
+ self.eplb_config.num_redundant_experts = self.num_redundant_experts
1593
+ if self.eplb_window_size is not None:
1594
+ self.eplb_config.window_size = self.eplb_window_size
1595
+ if self.eplb_step_interval is not None:
1596
+ self.eplb_config.step_interval = self.eplb_step_interval
1597
+ if self.eplb_log_balancedness is not None:
1598
+ self.eplb_config.log_balancedness = self.eplb_log_balancedness
1599
+
1600
+ parallel_config = ParallelConfig(
1601
+ pipeline_parallel_size=self.pipeline_parallel_size,
1602
+ tensor_parallel_size=self.tensor_parallel_size,
1603
+ data_parallel_size=self.data_parallel_size,
1604
+ data_parallel_rank=self.data_parallel_rank or 0,
1605
+ data_parallel_external_lb=data_parallel_external_lb,
1606
+ data_parallel_size_local=data_parallel_size_local,
1607
+ master_addr=self.master_addr,
1608
+ master_port=self.master_port,
1609
+ nnodes=self.nnodes,
1610
+ node_rank=self.node_rank,
1611
+ data_parallel_master_ip=data_parallel_address,
1612
+ data_parallel_rpc_port=data_parallel_rpc_port,
1613
+ data_parallel_backend=self.data_parallel_backend,
1614
+ data_parallel_hybrid_lb=self.data_parallel_hybrid_lb,
1615
+ enable_expert_parallel=self.enable_expert_parallel,
1616
+ all2all_backend=self.all2all_backend,
1617
+ enable_dbo=self.enable_dbo,
1618
+ dbo_decode_token_threshold=self.dbo_decode_token_threshold,
1619
+ dbo_prefill_token_threshold=self.dbo_prefill_token_threshold,
1620
+ disable_nccl_for_dp_synchronization=self.disable_nccl_for_dp_synchronization,
1621
+ enable_eplb=self.enable_eplb,
1622
+ eplb_config=self.eplb_config,
1623
+ expert_placement_strategy=self.expert_placement_strategy,
1624
+ max_parallel_loading_workers=self.max_parallel_loading_workers,
1625
+ disable_custom_all_reduce=self.disable_custom_all_reduce,
1626
+ ray_workers_use_nsight=self.ray_workers_use_nsight,
1627
+ ray_runtime_env=ray_runtime_env,
1628
+ placement_group=placement_group,
1629
+ distributed_executor_backend=self.distributed_executor_backend,
1630
+ worker_cls=self.worker_cls,
1631
+ worker_extension_cls=self.worker_extension_cls,
1632
+ decode_context_parallel_size=self.decode_context_parallel_size,
1633
+ dcp_kv_cache_interleave_size=self.dcp_kv_cache_interleave_size,
1634
+ _api_process_count=self._api_process_count,
1635
+ _api_process_rank=self._api_process_rank,
1636
+ )
1637
+
1638
+ speculative_config = self.create_speculative_config(
1639
+ target_model_config=model_config,
1640
+ target_parallel_config=parallel_config,
1641
+ )
1642
+
1643
+ # make sure num_lookahead_slots is set appropriately depending on
1644
+ # whether speculative decoding is enabled
1645
+ num_lookahead_slots = self.num_lookahead_slots
1646
+ if speculative_config is not None:
1647
+ num_lookahead_slots = speculative_config.num_lookahead_slots
1648
+
1649
+ scheduler_config = SchedulerConfig(
1650
+ runner_type=model_config.runner_type,
1651
+ max_num_batched_tokens=self.max_num_batched_tokens,
1652
+ max_num_seqs=self.max_num_seqs,
1653
+ max_model_len=model_config.max_model_len,
1654
+ num_lookahead_slots=num_lookahead_slots,
1655
+ enable_chunked_prefill=self.enable_chunked_prefill,
1656
+ disable_chunked_mm_input=self.disable_chunked_mm_input,
1657
+ is_multimodal_model=model_config.is_multimodal_model,
1658
+ is_encoder_decoder=model_config.is_encoder_decoder,
1659
+ policy=self.scheduling_policy,
1660
+ scheduler_cls=self.scheduler_cls,
1661
+ max_num_partial_prefills=self.max_num_partial_prefills,
1662
+ max_long_partial_prefills=self.max_long_partial_prefills,
1663
+ long_prefill_token_threshold=self.long_prefill_token_threshold,
1664
+ disable_hybrid_kv_cache_manager=self.disable_hybrid_kv_cache_manager,
1665
+ async_scheduling=self.async_scheduling,
1666
+ stream_interval=self.stream_interval,
1667
+ )
1668
+
1669
+ if not model_config.is_multimodal_model and self.default_mm_loras:
1670
+ raise ValueError(
1671
+ "Default modality-specific LoRA(s) were provided for a "
1672
+ "non multimodal model"
1673
+ )
1674
+
1675
+ lora_config = (
1676
+ LoRAConfig(
1677
+ max_lora_rank=self.max_lora_rank,
1678
+ max_loras=self.max_loras,
1679
+ default_mm_loras=self.default_mm_loras,
1680
+ fully_sharded_loras=self.fully_sharded_loras,
1681
+ lora_extra_vocab_size=self.lora_extra_vocab_size,
1682
+ lora_dtype=self.lora_dtype,
1683
+ max_cpu_loras=self.max_cpu_loras
1684
+ if self.max_cpu_loras and self.max_cpu_loras > 0
1685
+ else None,
1686
+ )
1687
+ if self.enable_lora
1688
+ else None
1689
+ )
1690
+
1691
+ if (
1692
+ lora_config is not None
1693
+ and speculative_config is not None
1694
+ and scheduler_config.max_num_batched_tokens
1695
+ < (
1696
+ scheduler_config.max_num_seqs
1697
+ * (speculative_config.num_speculative_tokens + 1)
1698
+ )
1699
+ ):
1700
+ raise ValueError(
1701
+ "Consider increasing max_num_batched_tokens or "
1702
+ "decreasing num_speculative_tokens"
1703
+ )
1704
+
1705
+ # bitsandbytes pre-quantized model need a specific model loader
1706
+ if model_config.quantization == "bitsandbytes":
1707
+ self.quantization = self.load_format = "bitsandbytes"
1708
+
1709
+ load_config = self.create_load_config()
1710
+
1711
+ # Pass reasoning_parser into StructuredOutputsConfig
1712
+ if self.reasoning_parser:
1713
+ self.structured_outputs_config.reasoning_parser = self.reasoning_parser
1714
+
1715
+ if self.reasoning_parser_plugin:
1716
+ self.structured_outputs_config.reasoning_parser_plugin = (
1717
+ self.reasoning_parser_plugin
1718
+ )
1719
+
1720
+ # Forward the deprecated CLI args to the StructuredOutputsConfig
1721
+ so_config = self.structured_outputs_config
1722
+ if self.guided_decoding_backend is not None:
1723
+ so_config.guided_decoding_backend = self.guided_decoding_backend
1724
+ if self.guided_decoding_disable_fallback is not None:
1725
+ so_config.disable_fallback = self.guided_decoding_disable_fallback
1726
+ if self.guided_decoding_disable_any_whitespace is not None:
1727
+ so_config.disable_any_whitespace = (
1728
+ self.guided_decoding_disable_any_whitespace
1729
+ )
1730
+ if self.guided_decoding_disable_additional_properties is not None:
1731
+ so_config.disable_additional_properties = (
1732
+ self.guided_decoding_disable_additional_properties
1733
+ )
1734
+
1735
+ observability_config = ObservabilityConfig(
1736
+ show_hidden_metrics_for_version=self.show_hidden_metrics_for_version,
1737
+ otlp_traces_endpoint=self.otlp_traces_endpoint,
1738
+ collect_detailed_traces=self.collect_detailed_traces,
1739
+ )
1740
+
1741
+ # Compilation config overrides
1742
+ compilation_config = copy.deepcopy(self.compilation_config)
1743
+ if self.cuda_graph_sizes is not None:
1744
+ logger.warning(
1745
+ "--cuda-graph-sizes is deprecated and will be removed in v0.13.0 or "
1746
+ "v1.0.0, whichever is soonest. Please use --cudagraph-capture-sizes "
1747
+ "instead."
1748
+ )
1749
+ if compilation_config.cudagraph_capture_sizes is not None:
1750
+ raise ValueError(
1751
+ "cuda_graph_sizes and compilation_config."
1752
+ "cudagraph_capture_sizes are mutually exclusive"
1753
+ )
1754
+ compilation_config.cudagraph_capture_sizes = self.cuda_graph_sizes
1755
+ if self.cudagraph_capture_sizes is not None:
1756
+ if compilation_config.cudagraph_capture_sizes is not None:
1757
+ raise ValueError(
1758
+ "cudagraph_capture_sizes and compilation_config."
1759
+ "cudagraph_capture_sizes are mutually exclusive"
1760
+ )
1761
+ compilation_config.cudagraph_capture_sizes = self.cudagraph_capture_sizes
1762
+ if self.max_cudagraph_capture_size is not None:
1763
+ if compilation_config.max_cudagraph_capture_size is not None:
1764
+ raise ValueError(
1765
+ "max_cudagraph_capture_size and compilation_config."
1766
+ "max_cudagraph_capture_size are mutually exclusive"
1767
+ )
1768
+ compilation_config.max_cudagraph_capture_size = (
1769
+ self.max_cudagraph_capture_size
1770
+ )
1771
+
1772
+ config = VllmConfig(
1773
+ model_config=model_config,
1774
+ cache_config=cache_config,
1775
+ parallel_config=parallel_config,
1776
+ scheduler_config=scheduler_config,
1777
+ device_config=device_config,
1778
+ lora_config=lora_config,
1779
+ speculative_config=speculative_config,
1780
+ load_config=load_config,
1781
+ structured_outputs_config=self.structured_outputs_config,
1782
+ observability_config=observability_config,
1783
+ compilation_config=compilation_config,
1784
+ kv_transfer_config=self.kv_transfer_config,
1785
+ kv_events_config=self.kv_events_config,
1786
+ ec_transfer_config=self.ec_transfer_config,
1787
+ additional_config=self.additional_config,
1788
+ )
1789
+
1790
+ return config
1791
+
1792
+ def _check_feature_supported(self, model_config: ModelConfig):
1793
+ """Raise an error if the feature is not supported."""
1794
+ if self.logits_processor_pattern != EngineArgs.logits_processor_pattern:
1795
+ _raise_unsupported_error(feature_name="--logits-processor-pattern")
1796
+
1797
+ # No Concurrent Partial Prefills so far.
1798
+ if (
1799
+ self.max_num_partial_prefills != SchedulerConfig.max_num_partial_prefills
1800
+ or self.max_long_partial_prefills
1801
+ != SchedulerConfig.max_long_partial_prefills
1802
+ ):
1803
+ _raise_unsupported_error(feature_name="Concurrent Partial Prefill")
1804
+
1805
+ # N-gram, Medusa, and Eagle are supported for speculative decoding.
1806
+ if self.speculative_config is not None:
1807
+ # speculative_config could still be a dict at this point
1808
+ if isinstance(self.speculative_config, dict):
1809
+ method = self.speculative_config.get("method", None)
1810
+ else:
1811
+ method = self.speculative_config.method
1812
+
1813
+ if method == "draft_model":
1814
+ raise NotImplementedError(
1815
+ "Draft model speculative decoding is not supported yet. "
1816
+ "Please consider using other speculative decoding methods "
1817
+ "such as ngram, medusa, eagle, or mtp."
1818
+ )
1819
+
1820
+ if self.pipeline_parallel_size > 1:
1821
+ supports_pp = getattr(
1822
+ self.distributed_executor_backend, "supports_pp", False
1823
+ )
1824
+ if not supports_pp and self.distributed_executor_backend not in (
1825
+ ParallelConfig.distributed_executor_backend,
1826
+ "ray",
1827
+ "mp",
1828
+ "external_launcher",
1829
+ ):
1830
+ name = (
1831
+ "Pipeline Parallelism without Ray distributed "
1832
+ "executor or multiprocessing executor or external "
1833
+ "launcher"
1834
+ )
1835
+ _raise_unsupported_error(feature_name=name)
1836
+
1837
+ @classmethod
1838
+ def get_chunked_prefill_prefix_caching_defaults(
1839
+ cls,
1840
+ model_config: ModelConfig,
1841
+ ) -> tuple[bool, bool]:
1842
+ if model_config.runner_type != "pooling":
1843
+ default_chunked_prefill = True
1844
+
1845
+ # Disable prefix caching default for hybrid models
1846
+ # since the feature is still experimental.
1847
+ default_prefix_caching = not model_config.is_hybrid
1848
+ else:
1849
+ assert model_config.pooler_config is not None
1850
+
1851
+ pooling_type = model_config.pooler_config.pooling_type
1852
+ incremental_prefill_supported = (
1853
+ pooling_type is not None
1854
+ and pooling_type.lower() == "last"
1855
+ and getattr(model_config.hf_config, "is_causal", True)
1856
+ )
1857
+
1858
+ default_chunked_prefill = incremental_prefill_supported
1859
+ default_prefix_caching = incremental_prefill_supported
1860
+
1861
+ return default_chunked_prefill, default_prefix_caching
1862
+
1863
+ @classmethod
1864
+ def get_batch_defaults(
1865
+ cls,
1866
+ world_size: int,
1867
+ ) -> tuple[dict[UsageContext | None, int], dict[UsageContext | None, int]]:
1868
+ from vllm.usage.usage_lib import UsageContext
1869
+
1870
+ default_max_num_batched_tokens: dict[UsageContext | None, int]
1871
+ default_max_num_seqs: dict[UsageContext | None, int]
1872
+
1873
+ # When no user override, set the default values based on the usage
1874
+ # context.
1875
+ # Use different default values for different hardware.
1876
+
1877
+ # Try to query the device name on the current platform. If it fails,
1878
+ # it may be because the platform that imports vLLM is not the same
1879
+ # as the platform that vLLM is running on (e.g. the case of scaling
1880
+ # vLLM with Ray) and has no GPUs. In this case we use the default
1881
+ # values for non-H100/H200 GPUs.
1882
+ try:
1883
+ device_memory = current_platform.get_device_total_memory()
1884
+ device_name = current_platform.get_device_name().lower()
1885
+ except Exception:
1886
+ # This is only used to set default_max_num_batched_tokens
1887
+ device_memory = 0
1888
+
1889
+ # NOTE(Kuntai): Setting large `max_num_batched_tokens` for A100 reduces
1890
+ # throughput, see PR #17885 for more details.
1891
+ # So here we do an extra device name check to prevent such regression.
1892
+ if device_memory >= 70 * GiB_bytes and "a100" not in device_name:
1893
+ # For GPUs like H100 and MI300x, use larger default values.
1894
+ default_max_num_batched_tokens = {
1895
+ UsageContext.LLM_CLASS: 16384,
1896
+ UsageContext.OPENAI_API_SERVER: 8192,
1897
+ }
1898
+ default_max_num_seqs = {
1899
+ UsageContext.LLM_CLASS: 1024,
1900
+ UsageContext.OPENAI_API_SERVER: 1024,
1901
+ }
1902
+ else:
1903
+ # TODO(woosuk): Tune the default values for other hardware.
1904
+ default_max_num_batched_tokens = {
1905
+ UsageContext.LLM_CLASS: 8192,
1906
+ UsageContext.OPENAI_API_SERVER: 2048,
1907
+ }
1908
+ default_max_num_seqs = {
1909
+ UsageContext.LLM_CLASS: 256,
1910
+ UsageContext.OPENAI_API_SERVER: 256,
1911
+ }
1912
+
1913
+ # tpu specific default values.
1914
+ if current_platform.is_tpu():
1915
+ chip_name = current_platform.get_device_name()
1916
+
1917
+ if chip_name == "V6E":
1918
+ default_max_num_batched_tokens = {
1919
+ UsageContext.LLM_CLASS: 2048,
1920
+ UsageContext.OPENAI_API_SERVER: 1024,
1921
+ }
1922
+ elif chip_name == "V5E":
1923
+ default_max_num_batched_tokens = {
1924
+ UsageContext.LLM_CLASS: 1024,
1925
+ UsageContext.OPENAI_API_SERVER: 512,
1926
+ }
1927
+ elif chip_name == "V5P":
1928
+ default_max_num_batched_tokens = {
1929
+ UsageContext.LLM_CLASS: 512,
1930
+ UsageContext.OPENAI_API_SERVER: 256,
1931
+ }
1932
+
1933
+ # cpu specific default values.
1934
+ if current_platform.is_cpu():
1935
+ default_max_num_batched_tokens = {
1936
+ UsageContext.LLM_CLASS: 4096 * world_size,
1937
+ UsageContext.OPENAI_API_SERVER: 2048 * world_size,
1938
+ }
1939
+ default_max_num_seqs = {
1940
+ UsageContext.LLM_CLASS: 256 * world_size,
1941
+ UsageContext.OPENAI_API_SERVER: 128 * world_size,
1942
+ }
1943
+
1944
+ return default_max_num_batched_tokens, default_max_num_seqs
1945
+
1946
+ def _set_default_args(
1947
+ self, usage_context: UsageContext, model_config: ModelConfig
1948
+ ) -> None:
1949
+ """Set Default Arguments for V1 Engine."""
1950
+ (
1951
+ default_chunked_prefill,
1952
+ default_prefix_caching,
1953
+ ) = self.get_chunked_prefill_prefix_caching_defaults(model_config)
1954
+
1955
+ if self.enable_chunked_prefill is None:
1956
+ self.enable_chunked_prefill = default_chunked_prefill
1957
+
1958
+ logger.debug(
1959
+ "%s chunked prefill by default",
1960
+ "Enabling" if default_chunked_prefill else "Disabling",
1961
+ )
1962
+ elif (
1963
+ model_config.runner_type == "pooling"
1964
+ and self.enable_chunked_prefill
1965
+ and not default_chunked_prefill
1966
+ ):
1967
+ logger.warning(
1968
+ "This model does not officially support chunked prefill. "
1969
+ "Enabling this manually may cause the engine to crash "
1970
+ "or produce incorrect outputs.",
1971
+ )
1972
+
1973
+ if self.enable_prefix_caching is None:
1974
+ self.enable_prefix_caching = default_prefix_caching
1975
+
1976
+ logger.debug(
1977
+ "%s prefix caching by default",
1978
+ "Enabling" if default_prefix_caching else "Disabling",
1979
+ )
1980
+ elif (
1981
+ model_config.runner_type == "pooling"
1982
+ and self.enable_prefix_caching
1983
+ and not default_prefix_caching
1984
+ ):
1985
+ logger.warning(
1986
+ "This model does not officially support prefix caching. "
1987
+ "Enabling this manually may cause the engine to crash "
1988
+ "or produce incorrect outputs.",
1989
+ )
1990
+
1991
+ world_size = self.pipeline_parallel_size * self.tensor_parallel_size
1992
+ (
1993
+ default_max_num_batched_tokens,
1994
+ default_max_num_seqs,
1995
+ ) = self.get_batch_defaults(world_size)
1996
+
1997
+ orig_max_num_batched_tokens = self.max_num_batched_tokens
1998
+ orig_max_num_seqs = self.max_num_seqs
1999
+
2000
+ if self.max_num_batched_tokens is None:
2001
+ self.max_num_batched_tokens = default_max_num_batched_tokens.get(
2002
+ usage_context,
2003
+ SchedulerConfig.DEFAULT_MAX_NUM_BATCHED_TOKENS,
2004
+ )
2005
+
2006
+ if self.max_num_seqs is None:
2007
+ self.max_num_seqs = default_max_num_seqs.get(
2008
+ usage_context,
2009
+ SchedulerConfig.DEFAULT_MAX_NUM_SEQS,
2010
+ )
2011
+
2012
+ if orig_max_num_batched_tokens is None:
2013
+ if not self.enable_chunked_prefill:
2014
+ # If max_model_len is too short, use the default for higher throughput.
2015
+ self.max_num_batched_tokens = max(
2016
+ model_config.max_model_len,
2017
+ self.max_num_batched_tokens,
2018
+ )
2019
+
2020
+ # When using default settings,
2021
+ # Ensure max_num_batched_tokens does not exceed model limit.
2022
+ # Some models (e.g., Whisper) have embeddings tied to max length.
2023
+ self.max_num_batched_tokens = min(
2024
+ self.max_num_seqs * model_config.max_model_len,
2025
+ self.max_num_batched_tokens,
2026
+ )
2027
+
2028
+ logger.debug(
2029
+ "Defaulting max_num_batched_tokens to %d for %s usage context.",
2030
+ self.max_num_batched_tokens,
2031
+ usage_context.value if usage_context else None,
2032
+ )
2033
+
2034
+ if orig_max_num_seqs is None:
2035
+ assert self.max_num_batched_tokens is not None # For type checking
2036
+ self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens)
2037
+
2038
+ logger.debug(
2039
+ "Defaulting max_num_seqs to %d for %s usage context.",
2040
+ self.max_num_seqs,
2041
+ usage_context.value if usage_context else None,
2042
+ )
2043
+
2044
+
2045
+ @dataclass
2046
+ class AsyncEngineArgs(EngineArgs):
2047
+ """Arguments for asynchronous vLLM engine."""
2048
+
2049
+ enable_log_requests: bool = False
2050
+
2051
+ @property
2052
+ @deprecated(
2053
+ "`disable_log_requests` is deprecated and has been replaced with "
2054
+ "`enable_log_requests`. This will be removed in v0.12.0. Please use "
2055
+ "`enable_log_requests` instead."
2056
+ )
2057
+ def disable_log_requests(self) -> bool:
2058
+ return not self.enable_log_requests
2059
+
2060
+ @disable_log_requests.setter
2061
+ @deprecated(
2062
+ "`disable_log_requests` is deprecated and has been replaced with "
2063
+ "`enable_log_requests`. This will be removed in v0.12.0. Please use "
2064
+ "`enable_log_requests` instead."
2065
+ )
2066
+ def disable_log_requests(self, value: bool):
2067
+ self.enable_log_requests = not value
2068
+
2069
+ @staticmethod
2070
+ def add_cli_args(
2071
+ parser: FlexibleArgumentParser, async_args_only: bool = False
2072
+ ) -> FlexibleArgumentParser:
2073
+ # Initialize plugin to update the parser, for example, The plugin may
2074
+ # add a new kind of quantization method to --quantization argument or
2075
+ # a new device to --device argument.
2076
+ load_general_plugins()
2077
+ if not async_args_only:
2078
+ parser = EngineArgs.add_cli_args(parser)
2079
+ parser.add_argument(
2080
+ "--enable-log-requests",
2081
+ action=argparse.BooleanOptionalAction,
2082
+ default=AsyncEngineArgs.enable_log_requests,
2083
+ help="Enable logging requests.",
2084
+ )
2085
+ parser.add_argument(
2086
+ "--disable-log-requests",
2087
+ action=argparse.BooleanOptionalAction,
2088
+ default=not AsyncEngineArgs.enable_log_requests,
2089
+ help="[DEPRECATED] Disable logging requests.",
2090
+ deprecated=True,
2091
+ )
2092
+ current_platform.pre_register_and_update(parser)
2093
+ return parser
2094
+
2095
+
2096
+ def _raise_unsupported_error(feature_name: str):
2097
+ msg = (
2098
+ f"{feature_name} is not supported. We recommend to "
2099
+ f"remove {feature_name} from your config."
2100
+ )
2101
+ raise NotImplementedError(msg)
2102
+
2103
+
2104
+ def human_readable_int(value):
2105
+ """Parse human-readable integers like '1k', '2M', etc.
2106
+ Including decimal values with decimal multipliers.
2107
+
2108
+ Examples:
2109
+ - '1k' -> 1,000
2110
+ - '1K' -> 1,024
2111
+ - '25.6k' -> 25,600
2112
+ """
2113
+ value = value.strip()
2114
+ match = re.fullmatch(r"(\d+(?:\.\d+)?)([kKmMgGtT])", value)
2115
+ if match:
2116
+ decimal_multiplier = {
2117
+ "k": 10**3,
2118
+ "m": 10**6,
2119
+ "g": 10**9,
2120
+ }
2121
+ binary_multiplier = {
2122
+ "K": 2**10,
2123
+ "M": 2**20,
2124
+ "G": 2**30,
2125
+ }
2126
+
2127
+ number, suffix = match.groups()
2128
+ if suffix in decimal_multiplier:
2129
+ mult = decimal_multiplier[suffix]
2130
+ return int(float(number) * mult)
2131
+ elif suffix in binary_multiplier:
2132
+ mult = binary_multiplier[suffix]
2133
+ # Do not allow decimals with binary multipliers
2134
+ try:
2135
+ return int(number) * mult
2136
+ except ValueError as e:
2137
+ raise argparse.ArgumentTypeError(
2138
+ "Decimals are not allowed "
2139
+ f"with binary suffixes like {suffix}. Did you mean to use "
2140
+ f"{number}{suffix.lower()} instead?"
2141
+ ) from e
2142
+
2143
+ # Regular plain number.
2144
+ return int(value)