vllm-cpu 0.9.2.post2__cp311-cp311-manylinux_2_17_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1236) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +214 -0
  3. vllm/_custom_ops.py +1915 -0
  4. vllm/_ipex_ops.py +350 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +139 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +325 -0
  20. vllm/attention/backends/blocksparse_attn.py +465 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1506 -0
  23. vllm/attention/backends/flash_attn.py +1008 -0
  24. vllm/attention/backends/flashinfer.py +1107 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +318 -0
  27. vllm/attention/backends/ipex_attn.py +403 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1391 -0
  30. vllm/attention/backends/pallas.py +356 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +1015 -0
  34. vllm/attention/backends/torch_sdpa.py +707 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +807 -0
  38. vllm/attention/layer.py +481 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +903 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/pallas_kv_cache_update.py +120 -0
  52. vllm/attention/ops/prefix_prefill.py +902 -0
  53. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  54. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  55. vllm/attention/ops/triton_decode_attention.py +674 -0
  56. vllm/attention/ops/triton_flash_attention.py +984 -0
  57. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  58. vllm/attention/ops/triton_unified_attention.py +738 -0
  59. vllm/attention/selector.py +214 -0
  60. vllm/attention/utils/fa_utils.py +72 -0
  61. vllm/beam_search.py +87 -0
  62. vllm/benchmarks/__init__.py +0 -0
  63. vllm/benchmarks/datasets.py +1441 -0
  64. vllm/benchmarks/endpoint_request_func.py +393 -0
  65. vllm/benchmarks/latency.py +168 -0
  66. vllm/benchmarks/serve.py +1063 -0
  67. vllm/benchmarks/throughput.py +609 -0
  68. vllm/benchmarks/utils.py +70 -0
  69. vllm/collect_env.py +820 -0
  70. vllm/compilation/__init__.py +0 -0
  71. vllm/compilation/activation_quant_fusion.py +89 -0
  72. vllm/compilation/backends.py +610 -0
  73. vllm/compilation/base_piecewise_backend.py +72 -0
  74. vllm/compilation/collective_fusion.py +127 -0
  75. vllm/compilation/compiler_interface.py +564 -0
  76. vllm/compilation/counter.py +41 -0
  77. vllm/compilation/cuda_piecewise_backend.py +218 -0
  78. vllm/compilation/decorators.py +250 -0
  79. vllm/compilation/fix_functionalization.py +191 -0
  80. vllm/compilation/fusion.py +645 -0
  81. vllm/compilation/fusion_attn.py +166 -0
  82. vllm/compilation/fx_utils.py +84 -0
  83. vllm/compilation/inductor_pass.py +115 -0
  84. vllm/compilation/monitor.py +39 -0
  85. vllm/compilation/multi_output_match.py +109 -0
  86. vllm/compilation/noop_elimination.py +165 -0
  87. vllm/compilation/pass_manager.py +82 -0
  88. vllm/compilation/sequence_parallelism.py +482 -0
  89. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  90. vllm/compilation/vllm_inductor_pass.py +70 -0
  91. vllm/compilation/wrapper.py +135 -0
  92. vllm/config.py +4913 -0
  93. vllm/connections.py +174 -0
  94. vllm/core/__init__.py +0 -0
  95. vllm/core/block/__init__.py +0 -0
  96. vllm/core/block/block_table.py +399 -0
  97. vllm/core/block/common.py +371 -0
  98. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  99. vllm/core/block/interfaces.py +319 -0
  100. vllm/core/block/naive_block.py +466 -0
  101. vllm/core/block/prefix_caching_block.py +1135 -0
  102. vllm/core/block/utils.py +28 -0
  103. vllm/core/block_manager.py +525 -0
  104. vllm/core/evictor.py +157 -0
  105. vllm/core/interfaces.py +139 -0
  106. vllm/core/placeholder_block_space_manager.py +103 -0
  107. vllm/core/scheduler.py +2126 -0
  108. vllm/device_allocator/__init__.py +0 -0
  109. vllm/device_allocator/cumem.py +281 -0
  110. vllm/distributed/__init__.py +6 -0
  111. vllm/distributed/communication_op.py +41 -0
  112. vllm/distributed/device_communicators/__init__.py +0 -0
  113. vllm/distributed/device_communicators/all2all.py +264 -0
  114. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  115. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  116. vllm/distributed/device_communicators/cuda_communicator.py +194 -0
  117. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  118. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  119. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  120. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  121. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  122. vllm/distributed/device_communicators/pynccl.py +218 -0
  123. vllm/distributed/device_communicators/pynccl_wrapper.py +349 -0
  124. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  125. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  126. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  127. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  128. vllm/distributed/eplb/__init__.py +8 -0
  129. vllm/distributed/eplb/eplb_state.py +432 -0
  130. vllm/distributed/eplb/rebalance_algo.py +234 -0
  131. vllm/distributed/eplb/rebalance_execute.py +307 -0
  132. vllm/distributed/kv_events.py +356 -0
  133. vllm/distributed/kv_transfer/README.md +29 -0
  134. vllm/distributed/kv_transfer/__init__.py +12 -0
  135. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  137. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  138. vllm/distributed/kv_transfer/kv_connector/factory.py +133 -0
  139. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  140. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  141. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  142. vllm/distributed/kv_transfer/kv_connector/utils.py +109 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1103 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +485 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +533 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +265 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +389 -0
  153. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  154. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  155. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  156. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  158. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  159. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  160. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  161. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  162. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  163. vllm/distributed/parallel_state.py +1385 -0
  164. vllm/distributed/tpu_distributed_utils.py +178 -0
  165. vllm/distributed/utils.py +536 -0
  166. vllm/engine/__init__.py +0 -0
  167. vllm/engine/arg_utils.py +1801 -0
  168. vllm/engine/async_llm_engine.py +1200 -0
  169. vllm/engine/async_timeout.py +173 -0
  170. vllm/engine/llm_engine.py +2101 -0
  171. vllm/engine/metrics.py +629 -0
  172. vllm/engine/metrics_types.py +94 -0
  173. vllm/engine/multiprocessing/__init__.py +148 -0
  174. vllm/engine/multiprocessing/client.py +681 -0
  175. vllm/engine/multiprocessing/engine.py +460 -0
  176. vllm/engine/output_processor/__init__.py +0 -0
  177. vllm/engine/output_processor/interfaces.py +75 -0
  178. vllm/engine/output_processor/multi_step.py +216 -0
  179. vllm/engine/output_processor/single_step.py +145 -0
  180. vllm/engine/output_processor/stop_checker.py +131 -0
  181. vllm/engine/output_processor/util.py +28 -0
  182. vllm/engine/protocol.py +326 -0
  183. vllm/entrypoints/__init__.py +0 -0
  184. vllm/entrypoints/api_server.py +178 -0
  185. vllm/entrypoints/chat_utils.py +1278 -0
  186. vllm/entrypoints/cli/__init__.py +12 -0
  187. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  188. vllm/entrypoints/cli/benchmark/base.py +25 -0
  189. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  190. vllm/entrypoints/cli/benchmark/main.py +58 -0
  191. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  192. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  193. vllm/entrypoints/cli/collect_env.py +36 -0
  194. vllm/entrypoints/cli/main.py +71 -0
  195. vllm/entrypoints/cli/openai.py +201 -0
  196. vllm/entrypoints/cli/run_batch.py +69 -0
  197. vllm/entrypoints/cli/serve.py +265 -0
  198. vllm/entrypoints/cli/types.py +29 -0
  199. vllm/entrypoints/launcher.py +147 -0
  200. vllm/entrypoints/llm.py +1599 -0
  201. vllm/entrypoints/logger.py +50 -0
  202. vllm/entrypoints/openai/__init__.py +0 -0
  203. vllm/entrypoints/openai/api_server.py +1495 -0
  204. vllm/entrypoints/openai/cli_args.py +331 -0
  205. vllm/entrypoints/openai/logits_processors.py +90 -0
  206. vllm/entrypoints/openai/protocol.py +2096 -0
  207. vllm/entrypoints/openai/run_batch.py +473 -0
  208. vllm/entrypoints/openai/serving_chat.py +1258 -0
  209. vllm/entrypoints/openai/serving_classification.py +160 -0
  210. vllm/entrypoints/openai/serving_completion.py +618 -0
  211. vllm/entrypoints/openai/serving_embedding.py +201 -0
  212. vllm/entrypoints/openai/serving_engine.py +988 -0
  213. vllm/entrypoints/openai/serving_models.py +315 -0
  214. vllm/entrypoints/openai/serving_pooling.py +234 -0
  215. vllm/entrypoints/openai/serving_score.py +431 -0
  216. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  217. vllm/entrypoints/openai/serving_transcription.py +132 -0
  218. vllm/entrypoints/openai/speech_to_text.py +395 -0
  219. vllm/entrypoints/openai/tool_parsers/__init__.py +25 -0
  220. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  221. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  222. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  223. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  224. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  225. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  226. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  227. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  228. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  229. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +369 -0
  230. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  231. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  232. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  233. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  234. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +466 -0
  235. vllm/entrypoints/score_utils.py +50 -0
  236. vllm/entrypoints/ssl.py +75 -0
  237. vllm/entrypoints/utils.py +262 -0
  238. vllm/env_override.py +41 -0
  239. vllm/envs.py +1029 -0
  240. vllm/executor/__init__.py +0 -0
  241. vllm/executor/executor_base.py +401 -0
  242. vllm/executor/mp_distributed_executor.py +244 -0
  243. vllm/executor/msgspec_utils.py +30 -0
  244. vllm/executor/multiproc_worker_utils.py +313 -0
  245. vllm/executor/ray_distributed_executor.py +701 -0
  246. vllm/executor/ray_utils.py +399 -0
  247. vllm/executor/uniproc_executor.py +139 -0
  248. vllm/forward_context.py +185 -0
  249. vllm/inputs/__init__.py +41 -0
  250. vllm/inputs/data.py +331 -0
  251. vllm/inputs/parse.py +151 -0
  252. vllm/inputs/preprocess.py +924 -0
  253. vllm/inputs/registry.py +245 -0
  254. vllm/jsontree.py +80 -0
  255. vllm/logger.py +212 -0
  256. vllm/logging_utils/__init__.py +8 -0
  257. vllm/logging_utils/dump_input.py +81 -0
  258. vllm/logging_utils/formatter.py +18 -0
  259. vllm/logits_process.py +119 -0
  260. vllm/lora/__init__.py +0 -0
  261. vllm/lora/fully_sharded_layers.py +355 -0
  262. vllm/lora/layers.py +1285 -0
  263. vllm/lora/lora.py +199 -0
  264. vllm/lora/models.py +818 -0
  265. vllm/lora/ops/__init__.py +0 -0
  266. vllm/lora/ops/torch_ops/__init__.py +16 -0
  267. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  268. vllm/lora/ops/triton_ops/__init__.py +12 -0
  269. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  270. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  271. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  272. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  273. vllm/lora/ops/triton_ops/utils.py +120 -0
  274. vllm/lora/ops/xla_ops/__init__.py +7 -0
  275. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  276. vllm/lora/peft_helper.py +136 -0
  277. vllm/lora/punica_wrapper/__init__.py +10 -0
  278. vllm/lora/punica_wrapper/punica_base.py +485 -0
  279. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  280. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  281. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  284. vllm/lora/punica_wrapper/utils.py +164 -0
  285. vllm/lora/request.py +99 -0
  286. vllm/lora/resolver.py +85 -0
  287. vllm/lora/utils.py +240 -0
  288. vllm/lora/worker_manager.py +256 -0
  289. vllm/model_executor/__init__.py +16 -0
  290. vllm/model_executor/custom_op.py +208 -0
  291. vllm/model_executor/guided_decoding/__init__.py +181 -0
  292. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  293. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  294. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  295. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  296. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  297. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  298. vllm/model_executor/guided_decoding/utils.py +242 -0
  299. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  300. vllm/model_executor/layers/__init__.py +0 -0
  301. vllm/model_executor/layers/activation.py +420 -0
  302. vllm/model_executor/layers/fused_moe/__init__.py +78 -0
  303. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +298 -0
  304. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +140 -0
  305. vllm/model_executor/layers/fused_moe/config.py +456 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  475. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +215 -0
  476. vllm/model_executor/layers/fused_moe/cutlass_moe.py +645 -0
  477. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +250 -0
  478. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +231 -0
  479. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +183 -0
  480. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1021 -0
  481. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +234 -0
  482. vllm/model_executor/layers/fused_moe/fused_moe.py +1734 -0
  483. vllm/model_executor/layers/fused_moe/layer.py +1528 -0
  484. vllm/model_executor/layers/fused_moe/modular_kernel.py +598 -0
  485. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +224 -0
  486. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  487. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  488. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  489. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +233 -0
  490. vllm/model_executor/layers/fused_moe/prepare_finalize.py +66 -0
  491. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +429 -0
  492. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +136 -0
  493. vllm/model_executor/layers/fused_moe/utils.py +144 -0
  494. vllm/model_executor/layers/layernorm.py +287 -0
  495. vllm/model_executor/layers/lightning_attn.py +652 -0
  496. vllm/model_executor/layers/linear.py +1547 -0
  497. vllm/model_executor/layers/logits_processor.py +197 -0
  498. vllm/model_executor/layers/mamba/__init__.py +0 -0
  499. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  500. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  501. vllm/model_executor/layers/mamba/mamba_mixer2.py +731 -0
  502. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  503. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  504. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  505. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  506. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  507. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  508. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  509. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  510. vllm/model_executor/layers/pooler.py +473 -0
  511. vllm/model_executor/layers/quantization/__init__.py +160 -0
  512. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  513. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  514. vllm/model_executor/layers/quantization/awq.py +228 -0
  515. vllm/model_executor/layers/quantization/awq_marlin.py +523 -0
  516. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  517. vllm/model_executor/layers/quantization/base_config.py +164 -0
  518. vllm/model_executor/layers/quantization/bitblas.py +462 -0
  519. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  520. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  521. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +694 -0
  522. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1613 -0
  523. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  524. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  525. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  526. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  527. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  528. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +149 -0
  529. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  530. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  531. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  532. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  533. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  534. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  535. vllm/model_executor/layers/quantization/deepgemm.py +83 -0
  536. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  537. vllm/model_executor/layers/quantization/experts_int8.py +204 -0
  538. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  539. vllm/model_executor/layers/quantization/fp8.py +950 -0
  540. vllm/model_executor/layers/quantization/gguf.py +577 -0
  541. vllm/model_executor/layers/quantization/gptq.py +278 -0
  542. vllm/model_executor/layers/quantization/gptq_bitblas.py +446 -0
  543. vllm/model_executor/layers/quantization/gptq_marlin.py +679 -0
  544. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  545. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  546. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  547. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  548. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  549. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  550. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  551. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  552. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  553. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +132 -0
  554. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  555. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  556. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  557. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  558. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  559. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  560. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  561. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  562. vllm/model_executor/layers/quantization/marlin.py +263 -0
  563. vllm/model_executor/layers/quantization/modelopt.py +747 -0
  564. vllm/model_executor/layers/quantization/moe_wna16.py +457 -0
  565. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  566. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  567. vllm/model_executor/layers/quantization/qqq.py +275 -0
  568. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  569. vllm/model_executor/layers/quantization/quark/quark.py +437 -0
  570. vllm/model_executor/layers/quantization/quark/quark_moe.py +245 -0
  571. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  572. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  573. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  574. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +157 -0
  575. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  576. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  577. vllm/model_executor/layers/quantization/rtn.py +289 -0
  578. vllm/model_executor/layers/quantization/schema.py +86 -0
  579. vllm/model_executor/layers/quantization/torchao.py +212 -0
  580. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  581. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  582. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  583. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/fp8_utils.py +653 -0
  787. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  788. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  789. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  790. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  791. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  792. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  793. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  794. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  795. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  796. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  797. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  798. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +146 -0
  799. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  800. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  801. vllm/model_executor/layers/rejection_sampler.py +406 -0
  802. vllm/model_executor/layers/resampler.py +270 -0
  803. vllm/model_executor/layers/rotary_embedding.py +2025 -0
  804. vllm/model_executor/layers/sampler.py +1204 -0
  805. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  806. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  807. vllm/model_executor/layers/utils.py +116 -0
  808. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  809. vllm/model_executor/model_loader/__init__.py +77 -0
  810. vllm/model_executor/model_loader/base_loader.py +43 -0
  811. vllm/model_executor/model_loader/bitsandbytes_loader.py +613 -0
  812. vllm/model_executor/model_loader/default_loader.py +282 -0
  813. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  814. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  815. vllm/model_executor/model_loader/neuron.py +476 -0
  816. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  817. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  818. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  819. vllm/model_executor/model_loader/tensorizer.py +602 -0
  820. vllm/model_executor/model_loader/tensorizer_loader.py +127 -0
  821. vllm/model_executor/model_loader/tpu.py +113 -0
  822. vllm/model_executor/model_loader/utils.py +315 -0
  823. vllm/model_executor/model_loader/weight_utils.py +782 -0
  824. vllm/model_executor/models/__init__.py +30 -0
  825. vllm/model_executor/models/adapters.py +375 -0
  826. vllm/model_executor/models/aimv2.py +246 -0
  827. vllm/model_executor/models/arctic.py +559 -0
  828. vllm/model_executor/models/aria.py +670 -0
  829. vllm/model_executor/models/aya_vision.py +486 -0
  830. vllm/model_executor/models/baichuan.py +474 -0
  831. vllm/model_executor/models/bamba.py +558 -0
  832. vllm/model_executor/models/bart.py +938 -0
  833. vllm/model_executor/models/bert.py +513 -0
  834. vllm/model_executor/models/bert_with_rope.py +617 -0
  835. vllm/model_executor/models/blip.py +339 -0
  836. vllm/model_executor/models/blip2.py +728 -0
  837. vllm/model_executor/models/bloom.py +373 -0
  838. vllm/model_executor/models/chameleon.py +1146 -0
  839. vllm/model_executor/models/chatglm.py +478 -0
  840. vllm/model_executor/models/clip.py +407 -0
  841. vllm/model_executor/models/commandr.py +471 -0
  842. vllm/model_executor/models/config.py +200 -0
  843. vllm/model_executor/models/constant_size_cache.py +137 -0
  844. vllm/model_executor/models/dbrx.py +472 -0
  845. vllm/model_executor/models/deepseek.py +486 -0
  846. vllm/model_executor/models/deepseek_mtp.py +281 -0
  847. vllm/model_executor/models/deepseek_v2.py +935 -0
  848. vllm/model_executor/models/deepseek_vl2.py +660 -0
  849. vllm/model_executor/models/dots1.py +536 -0
  850. vllm/model_executor/models/eagle.py +261 -0
  851. vllm/model_executor/models/ernie45.py +43 -0
  852. vllm/model_executor/models/ernie45_moe.py +583 -0
  853. vllm/model_executor/models/exaone.py +551 -0
  854. vllm/model_executor/models/fairseq2_llama.py +154 -0
  855. vllm/model_executor/models/falcon.py +510 -0
  856. vllm/model_executor/models/falcon_h1.py +708 -0
  857. vllm/model_executor/models/florence2.py +1113 -0
  858. vllm/model_executor/models/fuyu.py +406 -0
  859. vllm/model_executor/models/gemma.py +427 -0
  860. vllm/model_executor/models/gemma2.py +427 -0
  861. vllm/model_executor/models/gemma3.py +535 -0
  862. vllm/model_executor/models/gemma3_mm.py +729 -0
  863. vllm/model_executor/models/gemma3n.py +811 -0
  864. vllm/model_executor/models/glm.py +23 -0
  865. vllm/model_executor/models/glm4.py +305 -0
  866. vllm/model_executor/models/glm4_1v.py +1590 -0
  867. vllm/model_executor/models/glm4v.py +657 -0
  868. vllm/model_executor/models/gpt2.py +382 -0
  869. vllm/model_executor/models/gpt_bigcode.py +335 -0
  870. vllm/model_executor/models/gpt_j.py +339 -0
  871. vllm/model_executor/models/gpt_neox.py +332 -0
  872. vllm/model_executor/models/granite.py +493 -0
  873. vllm/model_executor/models/granite_speech.py +790 -0
  874. vllm/model_executor/models/granitemoe.py +437 -0
  875. vllm/model_executor/models/granitemoehybrid.py +653 -0
  876. vllm/model_executor/models/granitemoeshared.py +341 -0
  877. vllm/model_executor/models/gritlm.py +224 -0
  878. vllm/model_executor/models/grok1.py +546 -0
  879. vllm/model_executor/models/h2ovl.py +549 -0
  880. vllm/model_executor/models/hunyuan_v1_moe.py +897 -0
  881. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  882. vllm/model_executor/models/idefics3.py +786 -0
  883. vllm/model_executor/models/interfaces.py +681 -0
  884. vllm/model_executor/models/interfaces_base.py +164 -0
  885. vllm/model_executor/models/intern_vit.py +480 -0
  886. vllm/model_executor/models/internlm2.py +455 -0
  887. vllm/model_executor/models/internlm2_ve.py +147 -0
  888. vllm/model_executor/models/internvl.py +1432 -0
  889. vllm/model_executor/models/jais.py +373 -0
  890. vllm/model_executor/models/jamba.py +592 -0
  891. vllm/model_executor/models/keye.py +1736 -0
  892. vllm/model_executor/models/kimi_vl.py +585 -0
  893. vllm/model_executor/models/llama.py +644 -0
  894. vllm/model_executor/models/llama4.py +531 -0
  895. vllm/model_executor/models/llama_eagle.py +165 -0
  896. vllm/model_executor/models/llama_eagle3.py +263 -0
  897. vllm/model_executor/models/llava.py +887 -0
  898. vllm/model_executor/models/llava_next.py +604 -0
  899. vllm/model_executor/models/llava_next_video.py +492 -0
  900. vllm/model_executor/models/llava_onevision.py +985 -0
  901. vllm/model_executor/models/mamba.py +273 -0
  902. vllm/model_executor/models/mamba2.py +320 -0
  903. vllm/model_executor/models/mamba_cache.py +76 -0
  904. vllm/model_executor/models/medusa.py +219 -0
  905. vllm/model_executor/models/mimo.py +192 -0
  906. vllm/model_executor/models/mimo_mtp.py +285 -0
  907. vllm/model_executor/models/minicpm.py +592 -0
  908. vllm/model_executor/models/minicpm3.py +230 -0
  909. vllm/model_executor/models/minicpm_eagle.py +391 -0
  910. vllm/model_executor/models/minicpmo.py +772 -0
  911. vllm/model_executor/models/minicpmv.py +1307 -0
  912. vllm/model_executor/models/minimax_cache.py +36 -0
  913. vllm/model_executor/models/minimax_text_01.py +1301 -0
  914. vllm/model_executor/models/minimax_vl_01.py +374 -0
  915. vllm/model_executor/models/mistral3.py +624 -0
  916. vllm/model_executor/models/mixtral.py +488 -0
  917. vllm/model_executor/models/mixtral_quant.py +453 -0
  918. vllm/model_executor/models/mllama.py +1682 -0
  919. vllm/model_executor/models/mllama4.py +947 -0
  920. vllm/model_executor/models/mlp_speculator.py +206 -0
  921. vllm/model_executor/models/modernbert.py +339 -0
  922. vllm/model_executor/models/module_mapping.py +72 -0
  923. vllm/model_executor/models/molmo.py +1576 -0
  924. vllm/model_executor/models/moonvit.py +630 -0
  925. vllm/model_executor/models/mpt.py +331 -0
  926. vllm/model_executor/models/nemotron.py +508 -0
  927. vllm/model_executor/models/nemotron_h.py +588 -0
  928. vllm/model_executor/models/nemotron_nas.py +484 -0
  929. vllm/model_executor/models/nvlm_d.py +216 -0
  930. vllm/model_executor/models/olmo.py +389 -0
  931. vllm/model_executor/models/olmo2.py +414 -0
  932. vllm/model_executor/models/olmoe.py +468 -0
  933. vllm/model_executor/models/opt.py +412 -0
  934. vllm/model_executor/models/orion.py +349 -0
  935. vllm/model_executor/models/ovis.py +577 -0
  936. vllm/model_executor/models/paligemma.py +419 -0
  937. vllm/model_executor/models/persimmon.py +344 -0
  938. vllm/model_executor/models/phi.py +356 -0
  939. vllm/model_executor/models/phi3.py +19 -0
  940. vllm/model_executor/models/phi3_small.py +465 -0
  941. vllm/model_executor/models/phi3v.py +733 -0
  942. vllm/model_executor/models/phi4mm.py +1258 -0
  943. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  944. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  945. vllm/model_executor/models/phimoe.py +674 -0
  946. vllm/model_executor/models/pixtral.py +1329 -0
  947. vllm/model_executor/models/plamo2.py +738 -0
  948. vllm/model_executor/models/prithvi_geospatial_mae.py +240 -0
  949. vllm/model_executor/models/qwen.py +362 -0
  950. vllm/model_executor/models/qwen2.py +501 -0
  951. vllm/model_executor/models/qwen2_5_omni_thinker.py +923 -0
  952. vllm/model_executor/models/qwen2_5_vl.py +1175 -0
  953. vllm/model_executor/models/qwen2_audio.py +420 -0
  954. vllm/model_executor/models/qwen2_moe.py +540 -0
  955. vllm/model_executor/models/qwen2_rm.py +122 -0
  956. vllm/model_executor/models/qwen2_vl.py +1513 -0
  957. vllm/model_executor/models/qwen3.py +325 -0
  958. vllm/model_executor/models/qwen3_moe.py +541 -0
  959. vllm/model_executor/models/qwen_vl.py +796 -0
  960. vllm/model_executor/models/registry.py +634 -0
  961. vllm/model_executor/models/roberta.py +271 -0
  962. vllm/model_executor/models/siglip.py +524 -0
  963. vllm/model_executor/models/skyworkr1v.py +961 -0
  964. vllm/model_executor/models/smolvlm.py +52 -0
  965. vllm/model_executor/models/solar.py +506 -0
  966. vllm/model_executor/models/stablelm.py +343 -0
  967. vllm/model_executor/models/starcoder2.py +356 -0
  968. vllm/model_executor/models/tarsier.py +652 -0
  969. vllm/model_executor/models/telechat2.py +140 -0
  970. vllm/model_executor/models/teleflm.py +79 -0
  971. vllm/model_executor/models/transformers.py +509 -0
  972. vllm/model_executor/models/ultravox.py +670 -0
  973. vllm/model_executor/models/utils.py +744 -0
  974. vllm/model_executor/models/vision.py +147 -0
  975. vllm/model_executor/models/whisper.py +886 -0
  976. vllm/model_executor/models/zamba2.py +1036 -0
  977. vllm/model_executor/parameter.py +459 -0
  978. vllm/model_executor/pooling_metadata.py +72 -0
  979. vllm/model_executor/sampling_metadata.py +597 -0
  980. vllm/model_executor/utils.py +80 -0
  981. vllm/multimodal/__init__.py +33 -0
  982. vllm/multimodal/audio.py +116 -0
  983. vllm/multimodal/base.py +219 -0
  984. vllm/multimodal/hasher.py +91 -0
  985. vllm/multimodal/image.py +103 -0
  986. vllm/multimodal/inputs.py +878 -0
  987. vllm/multimodal/parse.py +499 -0
  988. vllm/multimodal/processing.py +1948 -0
  989. vllm/multimodal/profiling.py +283 -0
  990. vllm/multimodal/registry.py +331 -0
  991. vllm/multimodal/utils.py +492 -0
  992. vllm/multimodal/video.py +227 -0
  993. vllm/outputs.py +516 -0
  994. vllm/platforms/__init__.py +291 -0
  995. vllm/platforms/cpu.py +281 -0
  996. vllm/platforms/cuda.py +568 -0
  997. vllm/platforms/hpu.py +106 -0
  998. vllm/platforms/interface.py +551 -0
  999. vllm/platforms/neuron.py +150 -0
  1000. vllm/platforms/rocm.py +453 -0
  1001. vllm/platforms/tpu.py +206 -0
  1002. vllm/platforms/xpu.py +192 -0
  1003. vllm/plugins/__init__.py +94 -0
  1004. vllm/plugins/lora_resolvers/README.md +15 -0
  1005. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1006. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1007. vllm/pooling_params.py +64 -0
  1008. vllm/profiler/__init__.py +0 -0
  1009. vllm/profiler/layerwise_profile.py +375 -0
  1010. vllm/profiler/utils.py +148 -0
  1011. vllm/prompt_adapter/__init__.py +0 -0
  1012. vllm/prompt_adapter/layers.py +83 -0
  1013. vllm/prompt_adapter/models.py +358 -0
  1014. vllm/prompt_adapter/request.py +37 -0
  1015. vllm/prompt_adapter/utils.py +98 -0
  1016. vllm/prompt_adapter/worker_manager.py +179 -0
  1017. vllm/py.typed +2 -0
  1018. vllm/reasoning/__init__.py +15 -0
  1019. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  1020. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1021. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1022. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1023. vllm/sampling_params.py +602 -0
  1024. vllm/scalar_type.py +347 -0
  1025. vllm/scripts.py +15 -0
  1026. vllm/sequence.py +1568 -0
  1027. vllm/spec_decode/__init__.py +0 -0
  1028. vllm/spec_decode/batch_expansion.py +506 -0
  1029. vllm/spec_decode/draft_model_runner.py +349 -0
  1030. vllm/spec_decode/interfaces.py +99 -0
  1031. vllm/spec_decode/medusa_worker.py +138 -0
  1032. vllm/spec_decode/metrics.py +213 -0
  1033. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1034. vllm/spec_decode/mqa_scorer.py +160 -0
  1035. vllm/spec_decode/multi_step_worker.py +423 -0
  1036. vllm/spec_decode/ngram_worker.py +196 -0
  1037. vllm/spec_decode/proposer_worker_base.py +59 -0
  1038. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1039. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1040. vllm/spec_decode/target_model_runner.py +45 -0
  1041. vllm/spec_decode/top1_proposer.py +275 -0
  1042. vllm/spec_decode/util.py +277 -0
  1043. vllm/test_utils.py +130 -0
  1044. vllm/third_party/__init__.py +0 -0
  1045. vllm/third_party/pynvml.py +6140 -0
  1046. vllm/tracing.py +131 -0
  1047. vllm/transformers_utils/__init__.py +24 -0
  1048. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1049. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1050. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1051. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1052. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1053. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1054. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1055. vllm/transformers_utils/config.py +922 -0
  1056. vllm/transformers_utils/configs/__init__.py +57 -0
  1057. vllm/transformers_utils/configs/arctic.py +207 -0
  1058. vllm/transformers_utils/configs/chatglm.py +72 -0
  1059. vllm/transformers_utils/configs/cohere2.py +195 -0
  1060. vllm/transformers_utils/configs/dbrx.py +280 -0
  1061. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1062. vllm/transformers_utils/configs/eagle.py +85 -0
  1063. vllm/transformers_utils/configs/exaone.py +190 -0
  1064. vllm/transformers_utils/configs/falcon.py +90 -0
  1065. vllm/transformers_utils/configs/jais.py +238 -0
  1066. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1067. vllm/transformers_utils/configs/medusa.py +63 -0
  1068. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1069. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1070. vllm/transformers_utils/configs/mllama.py +31 -0
  1071. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1072. vllm/transformers_utils/configs/moonvit.py +33 -0
  1073. vllm/transformers_utils/configs/mpt.py +180 -0
  1074. vllm/transformers_utils/configs/nemotron.py +205 -0
  1075. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1076. vllm/transformers_utils/configs/nvlm_d.py +31 -0
  1077. vllm/transformers_utils/configs/ovis.py +184 -0
  1078. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1079. vllm/transformers_utils/configs/solar.py +247 -0
  1080. vllm/transformers_utils/configs/telechat2.py +64 -0
  1081. vllm/transformers_utils/configs/ultravox.py +108 -0
  1082. vllm/transformers_utils/detokenizer.py +168 -0
  1083. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1084. vllm/transformers_utils/processor.py +221 -0
  1085. vllm/transformers_utils/processors/__init__.py +8 -0
  1086. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1087. vllm/transformers_utils/processors/ovis.py +420 -0
  1088. vllm/transformers_utils/s3_utils.py +162 -0
  1089. vllm/transformers_utils/tokenizer.py +302 -0
  1090. vllm/transformers_utils/tokenizer_base.py +149 -0
  1091. vllm/transformers_utils/tokenizer_group.py +120 -0
  1092. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1093. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1094. vllm/transformers_utils/utils.py +99 -0
  1095. vllm/triton_utils/__init__.py +14 -0
  1096. vllm/triton_utils/importing.py +94 -0
  1097. vllm/usage/__init__.py +0 -0
  1098. vllm/usage/usage_lib.py +259 -0
  1099. vllm/utils/__init__.py +3008 -0
  1100. vllm/v1/__init__.py +0 -0
  1101. vllm/v1/attention/__init__.py +0 -0
  1102. vllm/v1/attention/backends/__init__.py +0 -0
  1103. vllm/v1/attention/backends/cpu_attn.py +184 -0
  1104. vllm/v1/attention/backends/flash_attn.py +757 -0
  1105. vllm/v1/attention/backends/flashinfer.py +680 -0
  1106. vllm/v1/attention/backends/flex_attention.py +491 -0
  1107. vllm/v1/attention/backends/mamba_attn.py +192 -0
  1108. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1109. vllm/v1/attention/backends/mla/common.py +978 -0
  1110. vllm/v1/attention/backends/mla/cutlass_mla.py +98 -0
  1111. vllm/v1/attention/backends/mla/flashmla.py +180 -0
  1112. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +241 -0
  1113. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1114. vllm/v1/attention/backends/pallas.py +320 -0
  1115. vllm/v1/attention/backends/rocm_aiter_fa.py +609 -0
  1116. vllm/v1/attention/backends/triton_attn.py +449 -0
  1117. vllm/v1/attention/backends/utils.py +310 -0
  1118. vllm/v1/core/__init__.py +0 -0
  1119. vllm/v1/core/block_pool.py +349 -0
  1120. vllm/v1/core/encoder_cache_manager.py +254 -0
  1121. vllm/v1/core/kv_cache_coordinator.py +369 -0
  1122. vllm/v1/core/kv_cache_manager.py +398 -0
  1123. vllm/v1/core/kv_cache_utils.py +999 -0
  1124. vllm/v1/core/sched/__init__.py +0 -0
  1125. vllm/v1/core/sched/interface.py +150 -0
  1126. vllm/v1/core/sched/output.py +157 -0
  1127. vllm/v1/core/sched/request_queue.py +224 -0
  1128. vllm/v1/core/sched/scheduler.py +1115 -0
  1129. vllm/v1/core/sched/utils.py +36 -0
  1130. vllm/v1/core/single_type_kv_cache_manager.py +444 -0
  1131. vllm/v1/engine/__init__.py +179 -0
  1132. vllm/v1/engine/async_llm.py +626 -0
  1133. vllm/v1/engine/coordinator.py +278 -0
  1134. vllm/v1/engine/core.py +1046 -0
  1135. vllm/v1/engine/core_client.py +1049 -0
  1136. vllm/v1/engine/detokenizer.py +292 -0
  1137. vllm/v1/engine/exceptions.py +17 -0
  1138. vllm/v1/engine/llm_engine.py +322 -0
  1139. vllm/v1/engine/logprobs.py +200 -0
  1140. vllm/v1/engine/mm_input_cache.py +91 -0
  1141. vllm/v1/engine/output_processor.py +477 -0
  1142. vllm/v1/engine/parallel_sampling.py +133 -0
  1143. vllm/v1/engine/processor.py +422 -0
  1144. vllm/v1/engine/utils.py +546 -0
  1145. vllm/v1/executor/__init__.py +0 -0
  1146. vllm/v1/executor/abstract.py +113 -0
  1147. vllm/v1/executor/multiproc_executor.py +532 -0
  1148. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1149. vllm/v1/kv_cache_interface.py +223 -0
  1150. vllm/v1/metrics/__init__.py +0 -0
  1151. vllm/v1/metrics/loggers.py +557 -0
  1152. vllm/v1/metrics/prometheus.py +82 -0
  1153. vllm/v1/metrics/ray_wrappers.py +131 -0
  1154. vllm/v1/metrics/reader.py +246 -0
  1155. vllm/v1/metrics/stats.py +240 -0
  1156. vllm/v1/outputs.py +124 -0
  1157. vllm/v1/pool/__init__.py +0 -0
  1158. vllm/v1/pool/metadata.py +17 -0
  1159. vllm/v1/request.py +229 -0
  1160. vllm/v1/sample/__init__.py +0 -0
  1161. vllm/v1/sample/logits_processor.py +517 -0
  1162. vllm/v1/sample/metadata.py +43 -0
  1163. vllm/v1/sample/ops/__init__.py +0 -0
  1164. vllm/v1/sample/ops/bad_words.py +39 -0
  1165. vllm/v1/sample/ops/penalties.py +43 -0
  1166. vllm/v1/sample/ops/topk_topp_sampler.py +296 -0
  1167. vllm/v1/sample/rejection_sampler.py +631 -0
  1168. vllm/v1/sample/sampler.py +226 -0
  1169. vllm/v1/sample/tpu/__init__.py +0 -0
  1170. vllm/v1/sample/tpu/metadata.py +124 -0
  1171. vllm/v1/sample/tpu/sampler.py +145 -0
  1172. vllm/v1/serial_utils.py +315 -0
  1173. vllm/v1/spec_decode/__init__.py +0 -0
  1174. vllm/v1/spec_decode/eagle.py +441 -0
  1175. vllm/v1/spec_decode/medusa.py +64 -0
  1176. vllm/v1/spec_decode/metadata.py +62 -0
  1177. vllm/v1/spec_decode/metrics.py +178 -0
  1178. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1179. vllm/v1/spec_decode/utils.py +41 -0
  1180. vllm/v1/structured_output/__init__.py +227 -0
  1181. vllm/v1/structured_output/backend_guidance.py +245 -0
  1182. vllm/v1/structured_output/backend_types.py +134 -0
  1183. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1184. vllm/v1/structured_output/request.py +86 -0
  1185. vllm/v1/structured_output/utils.py +175 -0
  1186. vllm/v1/utils.py +377 -0
  1187. vllm/v1/worker/__init__.py +0 -0
  1188. vllm/v1/worker/block_table.py +142 -0
  1189. vllm/v1/worker/cpu_model_runner.py +91 -0
  1190. vllm/v1/worker/cpu_worker.py +153 -0
  1191. vllm/v1/worker/gpu_input_batch.py +757 -0
  1192. vllm/v1/worker/gpu_model_runner.py +2739 -0
  1193. vllm/v1/worker/gpu_worker.py +408 -0
  1194. vllm/v1/worker/lora_model_runner_mixin.py +177 -0
  1195. vllm/v1/worker/tpu_input_batch.py +585 -0
  1196. vllm/v1/worker/tpu_model_runner.py +1849 -0
  1197. vllm/v1/worker/tpu_worker.py +315 -0
  1198. vllm/v1/worker/utils.py +112 -0
  1199. vllm/v1/worker/worker_base.py +65 -0
  1200. vllm/v1/worker/xpu_model_runner.py +33 -0
  1201. vllm/v1/worker/xpu_worker.py +165 -0
  1202. vllm/version.py +41 -0
  1203. vllm/vllm_flash_attn/.gitkeep +0 -0
  1204. vllm/worker/__init__.py +0 -0
  1205. vllm/worker/cache_engine.py +145 -0
  1206. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1207. vllm/worker/cpu_model_runner.py +671 -0
  1208. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1209. vllm/worker/cpu_worker.py +452 -0
  1210. vllm/worker/enc_dec_model_runner.py +555 -0
  1211. vllm/worker/hpu_model_runner.py +2320 -0
  1212. vllm/worker/hpu_worker.py +484 -0
  1213. vllm/worker/model_runner.py +2178 -0
  1214. vllm/worker/model_runner_base.py +282 -0
  1215. vllm/worker/multi_step_hpu_worker.py +123 -0
  1216. vllm/worker/multi_step_model_runner.py +911 -0
  1217. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1218. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1219. vllm/worker/multi_step_tpu_worker.py +108 -0
  1220. vllm/worker/multi_step_worker.py +197 -0
  1221. vllm/worker/neuron_model_runner.py +460 -0
  1222. vllm/worker/neuron_worker.py +193 -0
  1223. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1224. vllm/worker/pooling_model_runner.py +211 -0
  1225. vllm/worker/tpu_model_runner.py +909 -0
  1226. vllm/worker/tpu_worker.py +337 -0
  1227. vllm/worker/utils.py +53 -0
  1228. vllm/worker/worker.py +577 -0
  1229. vllm/worker/worker_base.py +646 -0
  1230. vllm/worker/xpu_model_runner.py +606 -0
  1231. vllm/worker/xpu_worker.py +186 -0
  1232. vllm_cpu-0.9.2.post2.dist-info/METADATA +339 -0
  1233. vllm_cpu-0.9.2.post2.dist-info/RECORD +1236 -0
  1234. vllm_cpu-0.9.2.post2.dist-info/WHEEL +5 -0
  1235. vllm_cpu-0.9.2.post2.dist-info/entry_points.txt +5 -0
  1236. vllm_cpu-0.9.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1801 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ # yapf: disable
5
+ import argparse
6
+ import copy
7
+ import dataclasses
8
+ import functools
9
+ import json
10
+ import sys
11
+ import threading
12
+ import warnings
13
+ from dataclasses import MISSING, dataclass, fields, is_dataclass
14
+ from itertools import permutations
15
+ from typing import (Annotated, Any, Callable, Dict, List, Literal, Optional,
16
+ Type, TypeVar, Union, cast, get_args, get_origin)
17
+
18
+ import regex as re
19
+ import torch
20
+ from pydantic import TypeAdapter, ValidationError
21
+ from typing_extensions import TypeIs, deprecated
22
+
23
+ import vllm.envs as envs
24
+ from vllm.config import (BlockSize, CacheConfig, CacheDType, CompilationConfig,
25
+ ConfigFormat, ConfigType, DecodingConfig,
26
+ DetailedTraceModules, Device, DeviceConfig,
27
+ DistributedExecutorBackend, GuidedDecodingBackend,
28
+ GuidedDecodingBackendV1, HfOverrides, KVEventsConfig,
29
+ KVTransferConfig, LoadConfig, LoadFormat, LoRAConfig,
30
+ ModelConfig, ModelDType, ModelImpl, MultiModalConfig,
31
+ ObservabilityConfig, ParallelConfig, PoolerConfig,
32
+ PrefixCachingHashAlgo, PromptAdapterConfig,
33
+ SchedulerConfig, SchedulerPolicy, SpeculativeConfig,
34
+ TaskOption, TokenizerMode, TokenizerPoolConfig,
35
+ VllmConfig, get_attr_docs, get_field)
36
+ from vllm.executor.executor_base import ExecutorBase
37
+ from vllm.logger import init_logger
38
+ from vllm.model_executor.layers.quantization import QuantizationMethods
39
+ from vllm.plugins import load_general_plugins
40
+ from vllm.reasoning import ReasoningParserManager
41
+ from vllm.test_utils import MODEL_WEIGHTS_S3_BUCKET, MODELS_ON_S3
42
+ from vllm.transformers_utils.utils import check_gguf_file
43
+ from vllm.usage.usage_lib import UsageContext
44
+ from vllm.utils import (STR_DUAL_CHUNK_FLASH_ATTN_VAL, FlexibleArgumentParser,
45
+ GiB_bytes, get_ip, is_in_ray_actor)
46
+
47
+ # yapf: enable
48
+
49
+ logger = init_logger(__name__)
50
+
51
+ # object is used to allow for special typing forms
52
+ T = TypeVar("T")
53
+ TypeHint = Union[type[Any], object]
54
+ TypeHintT = Union[type[T], object]
55
+
56
+
57
+ def parse_type(return_type: Callable[[str], T]) -> Callable[[str], T]:
58
+
59
+ def _parse_type(val: str) -> T:
60
+ try:
61
+ if return_type is json.loads and not re.match("^{.*}$", val):
62
+ return cast(T, nullable_kvs(val))
63
+ return return_type(val)
64
+ except ValueError as e:
65
+ raise argparse.ArgumentTypeError(
66
+ f"Value {val} cannot be converted to {return_type}.") from e
67
+
68
+ return _parse_type
69
+
70
+
71
+ def optional_type(
72
+ return_type: Callable[[str], T]) -> Callable[[str], Optional[T]]:
73
+
74
+ def _optional_type(val: str) -> Optional[T]:
75
+ if val == "" or val == "None":
76
+ return None
77
+ return parse_type(return_type)(val)
78
+
79
+ return _optional_type
80
+
81
+
82
+ def union_dict_and_str(val: str) -> Optional[Union[str, dict[str, str]]]:
83
+ if not re.match("^{.*}$", val):
84
+ return str(val)
85
+ return optional_type(json.loads)(val)
86
+
87
+
88
+ @deprecated(
89
+ "Passing a JSON argument as a string containing comma separated key=value "
90
+ "pairs is deprecated. This will be removed in v0.10.0. Please use a JSON "
91
+ "string instead.")
92
+ def nullable_kvs(val: str) -> dict[str, int]:
93
+ """Parses a string containing comma separate key [str] to value [int]
94
+ pairs into a dictionary.
95
+
96
+ Args:
97
+ val: String value to be parsed.
98
+
99
+ Returns:
100
+ Dictionary with parsed values.
101
+ """
102
+ out_dict: dict[str, int] = {}
103
+ for item in val.split(","):
104
+ kv_parts = [part.lower().strip() for part in item.split("=")]
105
+ if len(kv_parts) != 2:
106
+ raise argparse.ArgumentTypeError(
107
+ "Each item should be in the form KEY=VALUE")
108
+ key, value = kv_parts
109
+
110
+ try:
111
+ parsed_value = int(value)
112
+ except ValueError as exc:
113
+ msg = f"Failed to parse value of item {key}={value}"
114
+ raise argparse.ArgumentTypeError(msg) from exc
115
+
116
+ if key in out_dict and out_dict[key] != parsed_value:
117
+ raise argparse.ArgumentTypeError(
118
+ f"Conflicting values specified for key: {key}")
119
+ out_dict[key] = parsed_value
120
+
121
+ return out_dict
122
+
123
+
124
+ def is_type(type_hint: TypeHint, type: TypeHintT) -> TypeIs[TypeHintT]:
125
+ """Check if the type hint is a specific type."""
126
+ return type_hint is type or get_origin(type_hint) is type
127
+
128
+
129
+ def contains_type(type_hints: set[TypeHint], type: TypeHintT) -> bool:
130
+ """Check if the type hints contain a specific type."""
131
+ return any(is_type(type_hint, type) for type_hint in type_hints)
132
+
133
+
134
+ def get_type(type_hints: set[TypeHint], type: TypeHintT) -> TypeHintT:
135
+ """Get the specific type from the type hints."""
136
+ return next((th for th in type_hints if is_type(th, type)), None)
137
+
138
+
139
+ def literal_to_kwargs(type_hints: set[TypeHint]) -> dict[str, Any]:
140
+ """Convert Literal type hints to argparse kwargs."""
141
+ type_hint = get_type(type_hints, Literal)
142
+ choices = get_args(type_hint)
143
+ choice_type = type(choices[0])
144
+ if not all(isinstance(choice, choice_type) for choice in choices):
145
+ raise ValueError(
146
+ "All choices must be of the same type. "
147
+ f"Got {choices} with types {[type(c) for c in choices]}")
148
+ return {"type": choice_type, "choices": sorted(choices)}
149
+
150
+
151
+ def is_not_builtin(type_hint: TypeHint) -> bool:
152
+ """Check if the class is not a built-in type."""
153
+ return type_hint.__module__ != "builtins"
154
+
155
+
156
+ def get_type_hints(type_hint: TypeHint) -> set[TypeHint]:
157
+ """Extract type hints from Annotated or Union type hints."""
158
+ type_hints: set[TypeHint] = set()
159
+ origin = get_origin(type_hint)
160
+ args = get_args(type_hint)
161
+
162
+ if origin is Annotated:
163
+ type_hints.update(get_type_hints(args[0]))
164
+ elif origin is Union:
165
+ for arg in args:
166
+ type_hints.update(get_type_hints(arg))
167
+ else:
168
+ type_hints.add(type_hint)
169
+
170
+ return type_hints
171
+
172
+
173
+ @functools.lru_cache(maxsize=30)
174
+ def _compute_kwargs(cls: ConfigType) -> dict[str, Any]:
175
+ cls_docs = get_attr_docs(cls)
176
+ kwargs = {}
177
+ for field in fields(cls):
178
+ # Get the set of possible types for the field
179
+ type_hints: set[TypeHint] = get_type_hints(field.type)
180
+
181
+ # If the field is a dataclass, we can use the model_validate_json
182
+ generator = (th for th in type_hints if is_dataclass(th))
183
+ dataclass_cls = next(generator, None)
184
+
185
+ # Get the default value of the field
186
+ if field.default is not MISSING:
187
+ default = field.default
188
+ elif field.default_factory is not MISSING:
189
+ default = field.default_factory()
190
+
191
+ # Get the help text for the field
192
+ name = field.name
193
+ help = cls_docs[name].strip()
194
+ # Escape % for argparse
195
+ help = help.replace("%", "%%")
196
+
197
+ # Initialise the kwargs dictionary for the field
198
+ kwargs[name] = {"default": default, "help": help}
199
+
200
+ # Set other kwargs based on the type hints
201
+ json_tip = """\n\nShould either be a valid JSON string or JSON keys
202
+ passed individually. For example, the following sets of arguments are
203
+ equivalent:\n\n
204
+ - `--json-arg '{"key1": "value1", "key2": {"key3": "value2"}}'`\n
205
+ - `--json-arg.key1 value1 --json-arg.key2.key3 value2`\n
206
+ Additionally, list elements can be passed individually using '+':
207
+ - `--json-arg '{"key4": ["value3", "value4", "value5"]}'`\n
208
+ - `--json-arg.key4+ value3 --json-arg.key4+='value4,value5'`\n\n"""
209
+ if dataclass_cls is not None:
210
+
211
+ def parse_dataclass(val: str, cls=dataclass_cls) -> Any:
212
+ try:
213
+ if hasattr(cls, "from_cli"):
214
+ return cls.from_cli(val)
215
+ return TypeAdapter(cls).validate_json(val)
216
+ except ValidationError as e:
217
+ raise argparse.ArgumentTypeError(repr(e)) from e
218
+
219
+ kwargs[name]["type"] = parse_dataclass
220
+ kwargs[name]["help"] += json_tip
221
+ elif contains_type(type_hints, bool):
222
+ # Creates --no-<name> and --<name> flags
223
+ kwargs[name]["action"] = argparse.BooleanOptionalAction
224
+ elif contains_type(type_hints, Literal):
225
+ kwargs[name].update(literal_to_kwargs(type_hints))
226
+ elif contains_type(type_hints, tuple):
227
+ type_hint = get_type(type_hints, tuple)
228
+ types = get_args(type_hint)
229
+ tuple_type = types[0]
230
+ assert all(t is tuple_type for t in types if t is not Ellipsis), (
231
+ "All non-Ellipsis tuple elements must be of the same "
232
+ f"type. Got {types}.")
233
+ kwargs[name]["type"] = tuple_type
234
+ kwargs[name]["nargs"] = "+" if Ellipsis in types else len(types)
235
+ elif contains_type(type_hints, list):
236
+ type_hint = get_type(type_hints, list)
237
+ types = get_args(type_hint)
238
+ assert len(types) == 1, (
239
+ "List type must have exactly one type. Got "
240
+ f"{type_hint} with types {types}")
241
+ kwargs[name]["type"] = types[0]
242
+ kwargs[name]["nargs"] = "+"
243
+ elif contains_type(type_hints, int):
244
+ kwargs[name]["type"] = int
245
+ # Special case for large integers
246
+ if name in {"max_model_len", "max_num_batched_tokens"}:
247
+ kwargs[name]["type"] = human_readable_int
248
+ elif contains_type(type_hints, float):
249
+ kwargs[name]["type"] = float
250
+ elif (contains_type(type_hints, dict)
251
+ and (contains_type(type_hints, str)
252
+ or any(is_not_builtin(th) for th in type_hints))):
253
+ kwargs[name]["type"] = union_dict_and_str
254
+ elif contains_type(type_hints, dict):
255
+ kwargs[name]["type"] = parse_type(json.loads)
256
+ kwargs[name]["help"] += json_tip
257
+ elif (contains_type(type_hints, str)
258
+ or any(is_not_builtin(th) for th in type_hints)):
259
+ kwargs[name]["type"] = str
260
+ else:
261
+ raise ValueError(
262
+ f"Unsupported type {type_hints} for argument {name}.")
263
+
264
+ # If the type hint was a sequence of literals, use the helper function
265
+ # to update the type and choices
266
+ if get_origin(kwargs[name].get("type")) is Literal:
267
+ kwargs[name].update(literal_to_kwargs({kwargs[name]["type"]}))
268
+
269
+ # If None is in type_hints, make the argument optional.
270
+ # But not if it's a bool, argparse will handle this better.
271
+ if type(None) in type_hints and not contains_type(type_hints, bool):
272
+ kwargs[name]["type"] = optional_type(kwargs[name]["type"])
273
+ if kwargs[name].get("choices"):
274
+ kwargs[name]["choices"].append("None")
275
+ return kwargs
276
+
277
+
278
+ def get_kwargs(cls: ConfigType) -> dict[str, Any]:
279
+ """Return argparse kwargs for the given Config dataclass.
280
+
281
+ The heavy computation is cached via functools.lru_cache, and a deep copy
282
+ is returned so callers can mutate the dictionary without affecting the
283
+ cached version.
284
+ """
285
+ return copy.deepcopy(_compute_kwargs(cls))
286
+
287
+
288
+ @dataclass
289
+ class EngineArgs:
290
+ """Arguments for vLLM engine."""
291
+ model: str = ModelConfig.model
292
+ served_model_name: Optional[Union[
293
+ str, List[str]]] = ModelConfig.served_model_name
294
+ tokenizer: Optional[str] = ModelConfig.tokenizer
295
+ hf_config_path: Optional[str] = ModelConfig.hf_config_path
296
+ task: TaskOption = ModelConfig.task
297
+ skip_tokenizer_init: bool = ModelConfig.skip_tokenizer_init
298
+ enable_prompt_embeds: bool = ModelConfig.enable_prompt_embeds
299
+ tokenizer_mode: TokenizerMode = ModelConfig.tokenizer_mode
300
+ trust_remote_code: bool = ModelConfig.trust_remote_code
301
+ allowed_local_media_path: str = ModelConfig.allowed_local_media_path
302
+ download_dir: Optional[str] = LoadConfig.download_dir
303
+ load_format: str = LoadConfig.load_format
304
+ config_format: str = ModelConfig.config_format
305
+ dtype: ModelDType = ModelConfig.dtype
306
+ kv_cache_dtype: CacheDType = CacheConfig.cache_dtype
307
+ seed: Optional[int] = ModelConfig.seed
308
+ max_model_len: Optional[int] = ModelConfig.max_model_len
309
+ cuda_graph_sizes: list[int] = get_field(SchedulerConfig,
310
+ "cuda_graph_sizes")
311
+ # Note: Specifying a custom executor backend by passing a class
312
+ # is intended for expert use only. The API may change without
313
+ # notice.
314
+ distributed_executor_backend: Optional[Union[
315
+ DistributedExecutorBackend,
316
+ Type[ExecutorBase]]] = ParallelConfig.distributed_executor_backend
317
+ # number of P/D disaggregation (or other disaggregation) workers
318
+ pipeline_parallel_size: int = ParallelConfig.pipeline_parallel_size
319
+ tensor_parallel_size: int = ParallelConfig.tensor_parallel_size
320
+ data_parallel_size: int = ParallelConfig.data_parallel_size
321
+ data_parallel_rank: Optional[int] = None
322
+ data_parallel_size_local: Optional[int] = None
323
+ data_parallel_address: Optional[str] = None
324
+ data_parallel_rpc_port: Optional[int] = None
325
+ data_parallel_backend: str = ParallelConfig.data_parallel_backend
326
+ enable_expert_parallel: bool = ParallelConfig.enable_expert_parallel
327
+ enable_eplb: bool = ParallelConfig.enable_eplb
328
+ num_redundant_experts: int = ParallelConfig.num_redundant_experts
329
+ eplb_window_size: int = ParallelConfig.eplb_window_size
330
+ eplb_step_interval: int = ParallelConfig.eplb_step_interval
331
+ eplb_log_balancedness: bool = ParallelConfig.eplb_log_balancedness
332
+ max_parallel_loading_workers: Optional[
333
+ int] = ParallelConfig.max_parallel_loading_workers
334
+ block_size: Optional[BlockSize] = CacheConfig.block_size
335
+ enable_prefix_caching: Optional[bool] = CacheConfig.enable_prefix_caching
336
+ prefix_caching_hash_algo: PrefixCachingHashAlgo = \
337
+ CacheConfig.prefix_caching_hash_algo
338
+ disable_sliding_window: bool = ModelConfig.disable_sliding_window
339
+ disable_cascade_attn: bool = ModelConfig.disable_cascade_attn
340
+ use_v2_block_manager: bool = True
341
+ swap_space: float = CacheConfig.swap_space
342
+ cpu_offload_gb: float = CacheConfig.cpu_offload_gb
343
+ gpu_memory_utilization: float = CacheConfig.gpu_memory_utilization
344
+ max_num_batched_tokens: Optional[
345
+ int] = SchedulerConfig.max_num_batched_tokens
346
+ max_num_partial_prefills: int = SchedulerConfig.max_num_partial_prefills
347
+ max_long_partial_prefills: int = SchedulerConfig.max_long_partial_prefills
348
+ long_prefill_token_threshold: int = \
349
+ SchedulerConfig.long_prefill_token_threshold
350
+ max_num_seqs: Optional[int] = SchedulerConfig.max_num_seqs
351
+ max_logprobs: int = ModelConfig.max_logprobs
352
+ disable_log_stats: bool = False
353
+ revision: Optional[str] = ModelConfig.revision
354
+ code_revision: Optional[str] = ModelConfig.code_revision
355
+ rope_scaling: dict[str, Any] = get_field(ModelConfig, "rope_scaling")
356
+ rope_theta: Optional[float] = ModelConfig.rope_theta
357
+ hf_token: Optional[Union[bool, str]] = ModelConfig.hf_token
358
+ hf_overrides: HfOverrides = get_field(ModelConfig, "hf_overrides")
359
+ tokenizer_revision: Optional[str] = ModelConfig.tokenizer_revision
360
+ quantization: Optional[QuantizationMethods] = ModelConfig.quantization
361
+ enforce_eager: bool = ModelConfig.enforce_eager
362
+ max_seq_len_to_capture: int = ModelConfig.max_seq_len_to_capture
363
+ disable_custom_all_reduce: bool = ParallelConfig.disable_custom_all_reduce
364
+ # The following three fields are deprecated and will be removed in a future
365
+ # release. Setting them will have no effect. Please remove them from your
366
+ # configurations.
367
+ tokenizer_pool_size: int = TokenizerPoolConfig.pool_size
368
+ tokenizer_pool_type: str = TokenizerPoolConfig.pool_type
369
+ tokenizer_pool_extra_config: dict = \
370
+ get_field(TokenizerPoolConfig, "extra_config")
371
+ limit_mm_per_prompt: dict[str, int] = \
372
+ get_field(MultiModalConfig, "limit_per_prompt")
373
+ media_io_kwargs: dict[str, dict[str,
374
+ Any]] = get_field(MultiModalConfig,
375
+ "media_io_kwargs")
376
+ mm_processor_kwargs: Optional[Dict[str, Any]] = \
377
+ MultiModalConfig.mm_processor_kwargs
378
+ disable_mm_preprocessor_cache: bool = \
379
+ MultiModalConfig.disable_mm_preprocessor_cache
380
+ # LoRA fields
381
+ enable_lora: bool = False
382
+ enable_lora_bias: bool = LoRAConfig.bias_enabled
383
+ max_loras: int = LoRAConfig.max_loras
384
+ max_lora_rank: int = LoRAConfig.max_lora_rank
385
+ fully_sharded_loras: bool = LoRAConfig.fully_sharded_loras
386
+ max_cpu_loras: Optional[int] = LoRAConfig.max_cpu_loras
387
+ lora_dtype: Optional[Union[str, torch.dtype]] = LoRAConfig.lora_dtype
388
+ lora_extra_vocab_size: int = LoRAConfig.lora_extra_vocab_size
389
+ long_lora_scaling_factors: Optional[tuple[float, ...]] = \
390
+ LoRAConfig.long_lora_scaling_factors
391
+ # PromptAdapter fields
392
+ enable_prompt_adapter: bool = False
393
+ max_prompt_adapters: int = PromptAdapterConfig.max_prompt_adapters
394
+ max_prompt_adapter_token: int = \
395
+ PromptAdapterConfig.max_prompt_adapter_token
396
+
397
+ device: Device = DeviceConfig.device
398
+ num_scheduler_steps: int = SchedulerConfig.num_scheduler_steps
399
+ multi_step_stream_outputs: bool = SchedulerConfig.multi_step_stream_outputs
400
+ ray_workers_use_nsight: bool = ParallelConfig.ray_workers_use_nsight
401
+ num_gpu_blocks_override: Optional[
402
+ int] = CacheConfig.num_gpu_blocks_override
403
+ num_lookahead_slots: int = SchedulerConfig.num_lookahead_slots
404
+ model_loader_extra_config: dict = \
405
+ get_field(LoadConfig, "model_loader_extra_config")
406
+ ignore_patterns: Optional[Union[str,
407
+ List[str]]] = LoadConfig.ignore_patterns
408
+ preemption_mode: Optional[str] = SchedulerConfig.preemption_mode
409
+
410
+ scheduler_delay_factor: float = SchedulerConfig.delay_factor
411
+ enable_chunked_prefill: Optional[
412
+ bool] = SchedulerConfig.enable_chunked_prefill
413
+ disable_chunked_mm_input: bool = SchedulerConfig.disable_chunked_mm_input
414
+
415
+ disable_hybrid_kv_cache_manager: bool = (
416
+ SchedulerConfig.disable_hybrid_kv_cache_manager)
417
+
418
+ guided_decoding_backend: GuidedDecodingBackend = DecodingConfig.backend
419
+ guided_decoding_disable_fallback: bool = DecodingConfig.disable_fallback
420
+ guided_decoding_disable_any_whitespace: bool = \
421
+ DecodingConfig.disable_any_whitespace
422
+ guided_decoding_disable_additional_properties: bool = \
423
+ DecodingConfig.disable_additional_properties
424
+ logits_processor_pattern: Optional[
425
+ str] = ModelConfig.logits_processor_pattern
426
+
427
+ speculative_config: Optional[Dict[str, Any]] = None
428
+
429
+ qlora_adapter_name_or_path: Optional[str] = None
430
+ show_hidden_metrics_for_version: Optional[str] = \
431
+ ObservabilityConfig.show_hidden_metrics_for_version
432
+ otlp_traces_endpoint: Optional[str] = \
433
+ ObservabilityConfig.otlp_traces_endpoint
434
+ collect_detailed_traces: Optional[list[DetailedTraceModules]] = \
435
+ ObservabilityConfig.collect_detailed_traces
436
+ disable_async_output_proc: bool = not ModelConfig.use_async_output_proc
437
+ scheduling_policy: SchedulerPolicy = SchedulerConfig.policy
438
+ scheduler_cls: Union[str, Type[object]] = SchedulerConfig.scheduler_cls
439
+
440
+ override_neuron_config: dict[str, Any] = \
441
+ get_field(ModelConfig, "override_neuron_config")
442
+ override_pooler_config: Optional[Union[dict, PoolerConfig]] = \
443
+ ModelConfig.override_pooler_config
444
+ compilation_config: CompilationConfig = \
445
+ get_field(VllmConfig, "compilation_config")
446
+ worker_cls: str = ParallelConfig.worker_cls
447
+ worker_extension_cls: str = ParallelConfig.worker_extension_cls
448
+
449
+ kv_transfer_config: Optional[KVTransferConfig] = None
450
+ kv_events_config: Optional[KVEventsConfig] = None
451
+
452
+ generation_config: str = ModelConfig.generation_config
453
+ enable_sleep_mode: bool = ModelConfig.enable_sleep_mode
454
+ override_generation_config: dict[str, Any] = \
455
+ get_field(ModelConfig, "override_generation_config")
456
+ model_impl: str = ModelConfig.model_impl
457
+ override_attention_dtype: str = ModelConfig.override_attention_dtype
458
+
459
+ calculate_kv_scales: bool = CacheConfig.calculate_kv_scales
460
+
461
+ additional_config: dict[str, Any] = \
462
+ get_field(VllmConfig, "additional_config")
463
+ enable_reasoning: Optional[bool] = None # DEPRECATED
464
+ reasoning_parser: str = DecodingConfig.reasoning_backend
465
+
466
+ use_tqdm_on_load: bool = LoadConfig.use_tqdm_on_load
467
+ pt_load_map_location: str = LoadConfig.pt_load_map_location
468
+
469
+ enable_multimodal_encoder_data_parallel: bool = \
470
+ ParallelConfig.enable_multimodal_encoder_data_parallel
471
+
472
+ def __post_init__(self):
473
+ # support `EngineArgs(compilation_config={...})`
474
+ # without having to manually construct a
475
+ # CompilationConfig object
476
+ if isinstance(self.compilation_config, (int, dict)):
477
+ self.compilation_config = CompilationConfig.from_cli(
478
+ str(self.compilation_config))
479
+ if self.qlora_adapter_name_or_path is not None:
480
+ warnings.warn(
481
+ "The `qlora_adapter_name_or_path` is deprecated "
482
+ "and will be removed in v0.10.0. ",
483
+ DeprecationWarning,
484
+ stacklevel=2,
485
+ )
486
+ # Setup plugins
487
+ from vllm.plugins import load_general_plugins
488
+ load_general_plugins()
489
+
490
+ @staticmethod
491
+ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
492
+ """Shared CLI arguments for vLLM engine."""
493
+
494
+ # Model arguments
495
+ model_kwargs = get_kwargs(ModelConfig)
496
+ model_group = parser.add_argument_group(
497
+ title="ModelConfig",
498
+ description=ModelConfig.__doc__,
499
+ )
500
+ if not ('serve' in sys.argv[1:] and '--help' in sys.argv[1:]):
501
+ model_group.add_argument("--model", **model_kwargs["model"])
502
+ model_group.add_argument("--task", **model_kwargs["task"])
503
+ model_group.add_argument("--tokenizer", **model_kwargs["tokenizer"])
504
+ model_group.add_argument("--tokenizer-mode",
505
+ **model_kwargs["tokenizer_mode"])
506
+ model_group.add_argument("--trust-remote-code",
507
+ **model_kwargs["trust_remote_code"])
508
+ model_group.add_argument("--dtype", **model_kwargs["dtype"])
509
+ model_group.add_argument("--seed", **model_kwargs["seed"])
510
+ model_group.add_argument("--hf-config-path",
511
+ **model_kwargs["hf_config_path"])
512
+ model_group.add_argument("--allowed-local-media-path",
513
+ **model_kwargs["allowed_local_media_path"])
514
+ model_group.add_argument("--revision", **model_kwargs["revision"])
515
+ model_group.add_argument("--code-revision",
516
+ **model_kwargs["code_revision"])
517
+ model_group.add_argument("--rope-scaling",
518
+ **model_kwargs["rope_scaling"])
519
+ model_group.add_argument("--rope-theta", **model_kwargs["rope_theta"])
520
+ model_group.add_argument("--tokenizer-revision",
521
+ **model_kwargs["tokenizer_revision"])
522
+ model_group.add_argument("--max-model-len",
523
+ **model_kwargs["max_model_len"])
524
+ model_group.add_argument("--quantization", "-q",
525
+ **model_kwargs["quantization"])
526
+ model_group.add_argument("--enforce-eager",
527
+ **model_kwargs["enforce_eager"])
528
+ model_group.add_argument("--max-seq-len-to-capture",
529
+ **model_kwargs["max_seq_len_to_capture"])
530
+ model_group.add_argument("--max-logprobs",
531
+ **model_kwargs["max_logprobs"])
532
+ model_group.add_argument("--disable-sliding-window",
533
+ **model_kwargs["disable_sliding_window"])
534
+ model_group.add_argument("--disable-cascade-attn",
535
+ **model_kwargs["disable_cascade_attn"])
536
+ model_group.add_argument("--skip-tokenizer-init",
537
+ **model_kwargs["skip_tokenizer_init"])
538
+ model_group.add_argument("--enable-prompt-embeds",
539
+ **model_kwargs["enable_prompt_embeds"])
540
+ model_group.add_argument("--served-model-name",
541
+ **model_kwargs["served_model_name"])
542
+ # This one is a special case because it is the
543
+ # opposite of ModelConfig.use_async_output_proc
544
+ model_group.add_argument(
545
+ "--disable-async-output-proc",
546
+ action="store_true",
547
+ default=EngineArgs.disable_async_output_proc,
548
+ help="Disable async output processing. This may result in "
549
+ "lower performance.")
550
+ model_group.add_argument("--config-format",
551
+ choices=[f.value for f in ConfigFormat],
552
+ **model_kwargs["config_format"])
553
+ # This one is a special case because it can bool
554
+ # or str. TODO: Handle this in get_kwargs
555
+ model_group.add_argument("--hf-token",
556
+ type=str,
557
+ nargs="?",
558
+ const=True,
559
+ default=model_kwargs["hf_token"]["default"],
560
+ help=model_kwargs["hf_token"]["help"])
561
+ model_group.add_argument("--hf-overrides",
562
+ **model_kwargs["hf_overrides"])
563
+ model_group.add_argument("--override-neuron-config",
564
+ **model_kwargs["override_neuron_config"])
565
+ model_group.add_argument("--override-pooler-config",
566
+ **model_kwargs["override_pooler_config"])
567
+ model_group.add_argument("--logits-processor-pattern",
568
+ **model_kwargs["logits_processor_pattern"])
569
+ model_group.add_argument("--generation-config",
570
+ **model_kwargs["generation_config"])
571
+ model_group.add_argument("--override-generation-config",
572
+ **model_kwargs["override_generation_config"])
573
+ model_group.add_argument("--enable-sleep-mode",
574
+ **model_kwargs["enable_sleep_mode"])
575
+ model_group.add_argument("--model-impl",
576
+ choices=[f.value for f in ModelImpl],
577
+ **model_kwargs["model_impl"])
578
+ model_group.add_argument("--override-attention-dtype",
579
+ **model_kwargs["override_attention_dtype"])
580
+
581
+ # Model loading arguments
582
+ load_kwargs = get_kwargs(LoadConfig)
583
+ load_group = parser.add_argument_group(
584
+ title="LoadConfig",
585
+ description=LoadConfig.__doc__,
586
+ )
587
+ load_group.add_argument("--load-format",
588
+ choices=[f.value for f in LoadFormat],
589
+ **load_kwargs["load_format"])
590
+ load_group.add_argument("--download-dir",
591
+ **load_kwargs["download_dir"])
592
+ load_group.add_argument("--model-loader-extra-config",
593
+ **load_kwargs["model_loader_extra_config"])
594
+ load_group.add_argument("--ignore-patterns",
595
+ **load_kwargs["ignore_patterns"])
596
+ load_group.add_argument("--use-tqdm-on-load",
597
+ **load_kwargs["use_tqdm_on_load"])
598
+ load_group.add_argument(
599
+ "--qlora-adapter-name-or-path",
600
+ type=str,
601
+ default=None,
602
+ help="The `--qlora-adapter-name-or-path` has no effect, do not set"
603
+ " it, and it will be removed in v0.10.0.",
604
+ deprecated=True,
605
+ )
606
+ load_group.add_argument('--pt-load-map-location',
607
+ **load_kwargs["pt_load_map_location"])
608
+
609
+ # Guided decoding arguments
610
+ guided_decoding_kwargs = get_kwargs(DecodingConfig)
611
+ guided_decoding_group = parser.add_argument_group(
612
+ title="DecodingConfig",
613
+ description=DecodingConfig.__doc__,
614
+ )
615
+ guided_decoding_group.add_argument("--guided-decoding-backend",
616
+ **guided_decoding_kwargs["backend"])
617
+ guided_decoding_group.add_argument(
618
+ "--guided-decoding-disable-fallback",
619
+ **guided_decoding_kwargs["disable_fallback"])
620
+ guided_decoding_group.add_argument(
621
+ "--guided-decoding-disable-any-whitespace",
622
+ **guided_decoding_kwargs["disable_any_whitespace"])
623
+ guided_decoding_group.add_argument(
624
+ "--guided-decoding-disable-additional-properties",
625
+ **guided_decoding_kwargs["disable_additional_properties"])
626
+ guided_decoding_group.add_argument(
627
+ "--enable-reasoning",
628
+ action=argparse.BooleanOptionalAction,
629
+ deprecated=True,
630
+ help="[DEPRECATED] The `--enable-reasoning` flag is deprecated as "
631
+ "of v0.9.0. Use `--reasoning-parser` to specify the reasoning "
632
+ "parser backend instead. This flag (`--enable-reasoning`) will be "
633
+ "removed in v0.10.0. When `--reasoning-parser` is specified, "
634
+ "reasoning mode is automatically enabled.")
635
+ guided_decoding_group.add_argument(
636
+ "--reasoning-parser",
637
+ # This choices is a special case because it's not static
638
+ choices=list(ReasoningParserManager.reasoning_parsers),
639
+ **guided_decoding_kwargs["reasoning_backend"])
640
+
641
+ # Parallel arguments
642
+ parallel_kwargs = get_kwargs(ParallelConfig)
643
+ parallel_group = parser.add_argument_group(
644
+ title="ParallelConfig",
645
+ description=ParallelConfig.__doc__,
646
+ )
647
+ parallel_group.add_argument(
648
+ "--distributed-executor-backend",
649
+ **parallel_kwargs["distributed_executor_backend"])
650
+ parallel_group.add_argument(
651
+ "--pipeline-parallel-size", "-pp",
652
+ **parallel_kwargs["pipeline_parallel_size"])
653
+ parallel_group.add_argument("--tensor-parallel-size", "-tp",
654
+ **parallel_kwargs["tensor_parallel_size"])
655
+ parallel_group.add_argument("--data-parallel-size", "-dp",
656
+ **parallel_kwargs["data_parallel_size"])
657
+ parallel_group.add_argument(
658
+ '--data-parallel-rank',
659
+ '-dpn',
660
+ type=int,
661
+ help='Data parallel rank of this instance. '
662
+ 'When set, enables external load balancer mode.')
663
+ parallel_group.add_argument('--data-parallel-size-local',
664
+ '-dpl',
665
+ type=int,
666
+ help='Number of data parallel replicas '
667
+ 'to run on this node.')
668
+ parallel_group.add_argument('--data-parallel-address',
669
+ '-dpa',
670
+ type=str,
671
+ help='Address of data parallel cluster '
672
+ 'head-node.')
673
+ parallel_group.add_argument('--data-parallel-rpc-port',
674
+ '-dpp',
675
+ type=int,
676
+ help='Port for data parallel RPC '
677
+ 'communication.')
678
+ parallel_group.add_argument('--data-parallel-backend',
679
+ '-dpb',
680
+ type=str,
681
+ default='mp',
682
+ help='Backend for data parallel, either '
683
+ '"mp" or "ray".')
684
+ parallel_group.add_argument(
685
+ "--enable-expert-parallel",
686
+ **parallel_kwargs["enable_expert_parallel"])
687
+ parallel_group.add_argument("--enable-eplb",
688
+ **parallel_kwargs["enable_eplb"])
689
+ parallel_group.add_argument("--num-redundant-experts",
690
+ **parallel_kwargs["num_redundant_experts"])
691
+ parallel_group.add_argument("--eplb-window-size",
692
+ **parallel_kwargs["eplb_window_size"])
693
+ parallel_group.add_argument("--eplb-step-interval",
694
+ **parallel_kwargs["eplb_step_interval"])
695
+ parallel_group.add_argument("--eplb-log-balancedness",
696
+ **parallel_kwargs["eplb_log_balancedness"])
697
+ parallel_group.add_argument(
698
+ "--max-parallel-loading-workers",
699
+ **parallel_kwargs["max_parallel_loading_workers"])
700
+ parallel_group.add_argument(
701
+ "--ray-workers-use-nsight",
702
+ **parallel_kwargs["ray_workers_use_nsight"])
703
+ parallel_group.add_argument(
704
+ "--disable-custom-all-reduce",
705
+ **parallel_kwargs["disable_custom_all_reduce"])
706
+ parallel_group.add_argument("--worker-cls",
707
+ **parallel_kwargs["worker_cls"])
708
+ parallel_group.add_argument("--worker-extension-cls",
709
+ **parallel_kwargs["worker_extension_cls"])
710
+ parallel_group.add_argument(
711
+ "--enable-multimodal-encoder-data-parallel",
712
+ **parallel_kwargs["enable_multimodal_encoder_data_parallel"])
713
+
714
+ # KV cache arguments
715
+ cache_kwargs = get_kwargs(CacheConfig)
716
+ cache_group = parser.add_argument_group(
717
+ title="CacheConfig",
718
+ description=CacheConfig.__doc__,
719
+ )
720
+ cache_group.add_argument("--block-size", **cache_kwargs["block_size"])
721
+ cache_group.add_argument("--gpu-memory-utilization",
722
+ **cache_kwargs["gpu_memory_utilization"])
723
+ cache_group.add_argument("--swap-space", **cache_kwargs["swap_space"])
724
+ cache_group.add_argument("--kv-cache-dtype",
725
+ **cache_kwargs["cache_dtype"])
726
+ cache_group.add_argument("--num-gpu-blocks-override",
727
+ **cache_kwargs["num_gpu_blocks_override"])
728
+ cache_group.add_argument("--enable-prefix-caching",
729
+ **cache_kwargs["enable_prefix_caching"])
730
+ cache_group.add_argument("--prefix-caching-hash-algo",
731
+ **cache_kwargs["prefix_caching_hash_algo"])
732
+ cache_group.add_argument("--cpu-offload-gb",
733
+ **cache_kwargs["cpu_offload_gb"])
734
+ cache_group.add_argument("--calculate-kv-scales",
735
+ **cache_kwargs["calculate_kv_scales"])
736
+
737
+ # Tokenizer arguments
738
+ tokenizer_kwargs = get_kwargs(TokenizerPoolConfig)
739
+ tokenizer_group = parser.add_argument_group(
740
+ title="TokenizerPoolConfig",
741
+ description=TokenizerPoolConfig.__doc__,
742
+ )
743
+ tokenizer_group.add_argument("--tokenizer-pool-size",
744
+ **tokenizer_kwargs["pool_size"])
745
+ tokenizer_group.add_argument("--tokenizer-pool-type",
746
+ **tokenizer_kwargs["pool_type"])
747
+ tokenizer_group.add_argument("--tokenizer-pool-extra-config",
748
+ **tokenizer_kwargs["extra_config"])
749
+
750
+ # Multimodal related configs
751
+ multimodal_kwargs = get_kwargs(MultiModalConfig)
752
+ multimodal_group = parser.add_argument_group(
753
+ title="MultiModalConfig",
754
+ description=MultiModalConfig.__doc__,
755
+ )
756
+ multimodal_group.add_argument("--limit-mm-per-prompt",
757
+ **multimodal_kwargs["limit_per_prompt"])
758
+ multimodal_group.add_argument("--media-io-kwargs",
759
+ **multimodal_kwargs["media_io_kwargs"])
760
+ multimodal_group.add_argument(
761
+ "--mm-processor-kwargs",
762
+ **multimodal_kwargs["mm_processor_kwargs"])
763
+ multimodal_group.add_argument(
764
+ "--disable-mm-preprocessor-cache",
765
+ **multimodal_kwargs["disable_mm_preprocessor_cache"])
766
+
767
+ # LoRA related configs
768
+ lora_kwargs = get_kwargs(LoRAConfig)
769
+ lora_group = parser.add_argument_group(
770
+ title="LoRAConfig",
771
+ description=LoRAConfig.__doc__,
772
+ )
773
+ lora_group.add_argument(
774
+ "--enable-lora",
775
+ action=argparse.BooleanOptionalAction,
776
+ help="If True, enable handling of LoRA adapters.")
777
+ lora_group.add_argument("--enable-lora-bias",
778
+ **lora_kwargs["bias_enabled"])
779
+ lora_group.add_argument("--max-loras", **lora_kwargs["max_loras"])
780
+ lora_group.add_argument("--max-lora-rank",
781
+ **lora_kwargs["max_lora_rank"])
782
+ lora_group.add_argument("--lora-extra-vocab-size",
783
+ **lora_kwargs["lora_extra_vocab_size"])
784
+ lora_group.add_argument(
785
+ "--lora-dtype",
786
+ **lora_kwargs["lora_dtype"],
787
+ )
788
+ lora_group.add_argument("--long-lora-scaling-factors",
789
+ **lora_kwargs["long_lora_scaling_factors"])
790
+ lora_group.add_argument("--max-cpu-loras",
791
+ **lora_kwargs["max_cpu_loras"])
792
+ lora_group.add_argument("--fully-sharded-loras",
793
+ **lora_kwargs["fully_sharded_loras"])
794
+
795
+ # PromptAdapter related configs
796
+ prompt_adapter_kwargs = get_kwargs(PromptAdapterConfig)
797
+ prompt_adapter_group = parser.add_argument_group(
798
+ title="PromptAdapterConfig",
799
+ description=PromptAdapterConfig.__doc__,
800
+ )
801
+ prompt_adapter_group.add_argument(
802
+ "--enable-prompt-adapter",
803
+ action=argparse.BooleanOptionalAction,
804
+ help="If True, enable handling of PromptAdapters.")
805
+ prompt_adapter_group.add_argument(
806
+ "--max-prompt-adapters",
807
+ **prompt_adapter_kwargs["max_prompt_adapters"])
808
+ prompt_adapter_group.add_argument(
809
+ "--max-prompt-adapter-token",
810
+ **prompt_adapter_kwargs["max_prompt_adapter_token"])
811
+
812
+ # Device arguments
813
+ device_kwargs = get_kwargs(DeviceConfig)
814
+ device_group = parser.add_argument_group(
815
+ title="DeviceConfig",
816
+ description=DeviceConfig.__doc__,
817
+ )
818
+ device_group.add_argument("--device",
819
+ **device_kwargs["device"],
820
+ deprecated=True)
821
+
822
+ # Speculative arguments
823
+ speculative_group = parser.add_argument_group(
824
+ title="SpeculativeConfig",
825
+ description=SpeculativeConfig.__doc__,
826
+ )
827
+ speculative_group.add_argument(
828
+ "--speculative-config",
829
+ type=json.loads,
830
+ default=None,
831
+ help="The configurations for speculative decoding. Should be a "
832
+ "JSON string.")
833
+
834
+ # Observability arguments
835
+ observability_kwargs = get_kwargs(ObservabilityConfig)
836
+ observability_group = parser.add_argument_group(
837
+ title="ObservabilityConfig",
838
+ description=ObservabilityConfig.__doc__,
839
+ )
840
+ observability_group.add_argument(
841
+ "--show-hidden-metrics-for-version",
842
+ **observability_kwargs["show_hidden_metrics_for_version"])
843
+ observability_group.add_argument(
844
+ "--otlp-traces-endpoint",
845
+ **observability_kwargs["otlp_traces_endpoint"])
846
+ # TODO: generalise this special case
847
+ choices = observability_kwargs["collect_detailed_traces"]["choices"]
848
+ metavar = f"{{{','.join(choices)}}}"
849
+ observability_kwargs["collect_detailed_traces"]["metavar"] = metavar
850
+ observability_kwargs["collect_detailed_traces"]["choices"] += [
851
+ ",".join(p)
852
+ for p in permutations(get_args(DetailedTraceModules), r=2)
853
+ ]
854
+ observability_group.add_argument(
855
+ "--collect-detailed-traces",
856
+ **observability_kwargs["collect_detailed_traces"])
857
+
858
+ # Scheduler arguments
859
+ scheduler_kwargs = get_kwargs(SchedulerConfig)
860
+ scheduler_group = parser.add_argument_group(
861
+ title="SchedulerConfig",
862
+ description=SchedulerConfig.__doc__,
863
+ )
864
+ scheduler_group.add_argument(
865
+ "--max-num-batched-tokens",
866
+ **scheduler_kwargs["max_num_batched_tokens"])
867
+ scheduler_group.add_argument("--max-num-seqs",
868
+ **scheduler_kwargs["max_num_seqs"])
869
+ scheduler_group.add_argument(
870
+ "--max-num-partial-prefills",
871
+ **scheduler_kwargs["max_num_partial_prefills"])
872
+ scheduler_group.add_argument(
873
+ "--max-long-partial-prefills",
874
+ **scheduler_kwargs["max_long_partial_prefills"])
875
+ scheduler_group.add_argument('--cuda-graph-sizes',
876
+ **scheduler_kwargs["cuda_graph_sizes"])
877
+ scheduler_group.add_argument(
878
+ "--long-prefill-token-threshold",
879
+ **scheduler_kwargs["long_prefill_token_threshold"])
880
+ scheduler_group.add_argument("--num-lookahead-slots",
881
+ **scheduler_kwargs["num_lookahead_slots"])
882
+ scheduler_group.add_argument("--scheduler-delay-factor",
883
+ **scheduler_kwargs["delay_factor"])
884
+ scheduler_group.add_argument("--preemption-mode",
885
+ **scheduler_kwargs["preemption_mode"])
886
+ scheduler_group.add_argument("--num-scheduler-steps",
887
+ **scheduler_kwargs["num_scheduler_steps"])
888
+ scheduler_group.add_argument(
889
+ "--multi-step-stream-outputs",
890
+ **scheduler_kwargs["multi_step_stream_outputs"])
891
+ scheduler_group.add_argument("--scheduling-policy",
892
+ **scheduler_kwargs["policy"])
893
+ scheduler_group.add_argument(
894
+ "--enable-chunked-prefill",
895
+ **scheduler_kwargs["enable_chunked_prefill"])
896
+ scheduler_group.add_argument(
897
+ "--disable-chunked-mm-input",
898
+ **scheduler_kwargs["disable_chunked_mm_input"])
899
+ scheduler_group.add_argument("--scheduler-cls",
900
+ **scheduler_kwargs["scheduler_cls"])
901
+ scheduler_group.add_argument(
902
+ "--disable-hybrid-kv-cache-manager",
903
+ **scheduler_kwargs["disable_hybrid_kv_cache_manager"])
904
+
905
+ # vLLM arguments
906
+ vllm_kwargs = get_kwargs(VllmConfig)
907
+ vllm_group = parser.add_argument_group(
908
+ title="VllmConfig",
909
+ description=VllmConfig.__doc__,
910
+ )
911
+ vllm_group.add_argument("--kv-transfer-config",
912
+ **vllm_kwargs["kv_transfer_config"])
913
+ vllm_group.add_argument('--kv-events-config',
914
+ **vllm_kwargs["kv_events_config"])
915
+ vllm_group.add_argument("--compilation-config", "-O",
916
+ **vllm_kwargs["compilation_config"])
917
+ vllm_group.add_argument("--additional-config",
918
+ **vllm_kwargs["additional_config"])
919
+
920
+ # Other arguments
921
+ parser.add_argument('--use-v2-block-manager',
922
+ action='store_true',
923
+ default=True,
924
+ deprecated=True,
925
+ help='[DEPRECATED] block manager v1 has been '
926
+ 'removed and SelfAttnBlockSpaceManager (i.e. '
927
+ 'block manager v2) is now the default. '
928
+ 'Setting this flag to True or False'
929
+ ' has no effect on vLLM behavior.')
930
+ parser.add_argument('--disable-log-stats',
931
+ action='store_true',
932
+ help='Disable logging statistics.')
933
+
934
+ return parser
935
+
936
+ @classmethod
937
+ def from_cli_args(cls, args: argparse.Namespace):
938
+ # Get the list of attributes of this dataclass.
939
+ attrs = [attr.name for attr in dataclasses.fields(cls)]
940
+ # Set the attributes from the parsed arguments.
941
+ engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
942
+ return engine_args
943
+
944
+ def create_model_config(self) -> ModelConfig:
945
+ # gguf file needs a specific model loader and doesn't use hf_repo
946
+ if check_gguf_file(self.model):
947
+ self.quantization = self.load_format = "gguf"
948
+
949
+ # NOTE: This is to allow model loading from S3 in CI
950
+ if (not isinstance(self, AsyncEngineArgs) and envs.VLLM_CI_USE_S3
951
+ and self.model in MODELS_ON_S3
952
+ and self.load_format == LoadFormat.AUTO): # noqa: E501
953
+ self.model = f"{MODEL_WEIGHTS_S3_BUCKET}/{self.model}"
954
+ self.load_format = LoadFormat.RUNAI_STREAMER
955
+
956
+ return ModelConfig(
957
+ model=self.model,
958
+ hf_config_path=self.hf_config_path,
959
+ task=self.task,
960
+ tokenizer=self.tokenizer,
961
+ tokenizer_mode=self.tokenizer_mode,
962
+ trust_remote_code=self.trust_remote_code,
963
+ allowed_local_media_path=self.allowed_local_media_path,
964
+ dtype=self.dtype,
965
+ seed=self.seed,
966
+ revision=self.revision,
967
+ code_revision=self.code_revision,
968
+ rope_scaling=self.rope_scaling,
969
+ rope_theta=self.rope_theta,
970
+ hf_token=self.hf_token,
971
+ hf_overrides=self.hf_overrides,
972
+ tokenizer_revision=self.tokenizer_revision,
973
+ max_model_len=self.max_model_len,
974
+ quantization=self.quantization,
975
+ enforce_eager=self.enforce_eager,
976
+ max_seq_len_to_capture=self.max_seq_len_to_capture,
977
+ max_logprobs=self.max_logprobs,
978
+ disable_sliding_window=self.disable_sliding_window,
979
+ disable_cascade_attn=self.disable_cascade_attn,
980
+ skip_tokenizer_init=self.skip_tokenizer_init,
981
+ enable_prompt_embeds=self.enable_prompt_embeds,
982
+ served_model_name=self.served_model_name,
983
+ limit_mm_per_prompt=self.limit_mm_per_prompt,
984
+ media_io_kwargs=self.media_io_kwargs,
985
+ use_async_output_proc=not self.disable_async_output_proc,
986
+ config_format=self.config_format,
987
+ mm_processor_kwargs=self.mm_processor_kwargs,
988
+ disable_mm_preprocessor_cache=self.disable_mm_preprocessor_cache,
989
+ override_neuron_config=self.override_neuron_config,
990
+ override_pooler_config=self.override_pooler_config,
991
+ logits_processor_pattern=self.logits_processor_pattern,
992
+ generation_config=self.generation_config,
993
+ override_generation_config=self.override_generation_config,
994
+ enable_sleep_mode=self.enable_sleep_mode,
995
+ model_impl=self.model_impl,
996
+ override_attention_dtype=self.override_attention_dtype,
997
+ )
998
+
999
+ def create_load_config(self) -> LoadConfig:
1000
+
1001
+ if self.quantization == "bitsandbytes":
1002
+ self.load_format = "bitsandbytes"
1003
+
1004
+ return LoadConfig(
1005
+ load_format=self.load_format,
1006
+ download_dir=self.download_dir,
1007
+ model_loader_extra_config=self.model_loader_extra_config,
1008
+ ignore_patterns=self.ignore_patterns,
1009
+ use_tqdm_on_load=self.use_tqdm_on_load,
1010
+ pt_load_map_location=self.pt_load_map_location,
1011
+ )
1012
+
1013
+ def create_speculative_config(
1014
+ self,
1015
+ target_model_config: ModelConfig,
1016
+ target_parallel_config: ParallelConfig,
1017
+ enable_chunked_prefill: bool,
1018
+ disable_log_stats: bool,
1019
+ ) -> Optional["SpeculativeConfig"]:
1020
+ """Initializes and returns a SpeculativeConfig object based on
1021
+ `speculative_config`.
1022
+
1023
+ This function utilizes `speculative_config` to create a
1024
+ SpeculativeConfig object. The `speculative_config` can either be
1025
+ provided as a JSON string input via CLI arguments or directly as a
1026
+ dictionary from the engine.
1027
+ """
1028
+ if self.speculative_config is None:
1029
+ return None
1030
+
1031
+ # Note(Shangming): These parameters are not obtained from the cli arg
1032
+ # '--speculative-config' and must be passed in when creating the engine
1033
+ # config.
1034
+ self.speculative_config.update({
1035
+ "target_model_config": target_model_config,
1036
+ "target_parallel_config": target_parallel_config,
1037
+ "enable_chunked_prefill": enable_chunked_prefill,
1038
+ "disable_log_stats": disable_log_stats,
1039
+ })
1040
+ speculative_config = SpeculativeConfig.from_dict(
1041
+ self.speculative_config)
1042
+
1043
+ return speculative_config
1044
+
1045
+ def create_engine_config(
1046
+ self,
1047
+ usage_context: Optional[UsageContext] = None,
1048
+ ) -> VllmConfig:
1049
+ """
1050
+ Create the VllmConfig.
1051
+
1052
+ NOTE: for autoselection of V0 vs V1 engine, we need to
1053
+ create the ModelConfig first, since ModelConfig's attrs
1054
+ (e.g. the model arch) are needed to make the decision.
1055
+
1056
+ This function set VLLM_USE_V1=X if VLLM_USE_V1 is
1057
+ unspecified by the user.
1058
+
1059
+ If VLLM_USE_V1 is specified by the user but the VllmConfig
1060
+ is incompatible, we raise an error.
1061
+ """
1062
+ from vllm.platforms import current_platform
1063
+ current_platform.pre_register_and_update()
1064
+
1065
+ device_config = DeviceConfig(
1066
+ device=cast(Device, current_platform.device_type))
1067
+ model_config = self.create_model_config()
1068
+
1069
+ # * If VLLM_USE_V1 is unset, we enable V1 for "supported features"
1070
+ # and fall back to V0 for experimental or unsupported features.
1071
+ # * If VLLM_USE_V1=1, we enable V1 for supported + experimental
1072
+ # features and raise error for unsupported features.
1073
+ # * If VLLM_USE_V1=0, we disable V1.
1074
+ use_v1 = False
1075
+ try_v1 = envs.VLLM_USE_V1 or not envs.is_set("VLLM_USE_V1")
1076
+ if try_v1 and self._is_v1_supported_oracle(model_config):
1077
+ use_v1 = True
1078
+
1079
+ # If user explicitly set VLLM_USE_V1, sanity check we respect it.
1080
+ if envs.is_set("VLLM_USE_V1"):
1081
+ assert use_v1 == envs.VLLM_USE_V1
1082
+ # Otherwise, set the VLLM_USE_V1 variable globally.
1083
+ else:
1084
+ envs.set_vllm_use_v1(use_v1)
1085
+
1086
+ # Set default arguments for V0 or V1 Engine.
1087
+ if use_v1:
1088
+ self._set_default_args_v1(usage_context, model_config)
1089
+ else:
1090
+ self._set_default_args_v0(model_config)
1091
+
1092
+ assert self.enable_chunked_prefill is not None
1093
+
1094
+ if envs.VLLM_ATTENTION_BACKEND in [STR_DUAL_CHUNK_FLASH_ATTN_VAL]:
1095
+ assert self.enforce_eager, (
1096
+ "Cuda graph is not supported with DualChunkFlashAttention. "
1097
+ "To run the model in eager mode, set 'enforce_eager=True' "
1098
+ "or use '--enforce-eager' in the CLI.")
1099
+ assert current_platform.is_cuda(), (
1100
+ "DualChunkFlashAttention is only supported on CUDA platform.")
1101
+ assert not use_v1, (
1102
+ "DualChunkFlashAttention is not supported on V1 engine. "
1103
+ "To run the model in V0 engine, try set 'VLLM_USE_V1=0'")
1104
+
1105
+ cache_config = CacheConfig(
1106
+ block_size=self.block_size,
1107
+ gpu_memory_utilization=self.gpu_memory_utilization,
1108
+ swap_space=self.swap_space,
1109
+ cache_dtype=self.kv_cache_dtype,
1110
+ is_attention_free=model_config.is_attention_free,
1111
+ num_gpu_blocks_override=self.num_gpu_blocks_override,
1112
+ sliding_window=model_config.get_sliding_window(),
1113
+ enable_prefix_caching=self.enable_prefix_caching,
1114
+ prefix_caching_hash_algo=self.prefix_caching_hash_algo,
1115
+ cpu_offload_gb=self.cpu_offload_gb,
1116
+ calculate_kv_scales=self.calculate_kv_scales,
1117
+ )
1118
+
1119
+ # Get the current placement group if Ray is initialized and
1120
+ # we are in a Ray actor. If so, then the placement group will be
1121
+ # passed to spawned processes.
1122
+ placement_group = None
1123
+ if is_in_ray_actor():
1124
+ import ray
1125
+
1126
+ # This call initializes Ray automatically if it is not initialized,
1127
+ # but we should not do this here.
1128
+ placement_group = ray.util.get_current_placement_group()
1129
+
1130
+ data_parallel_external_lb = self.data_parallel_rank is not None
1131
+ if data_parallel_external_lb:
1132
+ assert self.data_parallel_size_local in (1, None), (
1133
+ "data_parallel_size_local must be 1 when data_parallel_rank "
1134
+ "is set")
1135
+ data_parallel_size_local = 1
1136
+ elif self.data_parallel_size_local is not None:
1137
+ data_parallel_size_local = self.data_parallel_size_local
1138
+ else:
1139
+ # Local DP size defaults to global DP size if not set.
1140
+ data_parallel_size_local = self.data_parallel_size
1141
+
1142
+ # DP address, used in multi-node case for torch distributed group
1143
+ # and ZMQ sockets.
1144
+ if self.data_parallel_address is None:
1145
+ if self.data_parallel_backend == "ray":
1146
+ host_ip = get_ip()
1147
+ logger.info(
1148
+ "Using host IP %s as ray-based data parallel address",
1149
+ host_ip)
1150
+ data_parallel_address = host_ip
1151
+ else:
1152
+ assert self.data_parallel_backend == "mp", (
1153
+ "data_parallel_backend can only be ray or mp, got %s",
1154
+ self.data_parallel_backend)
1155
+ data_parallel_address = ParallelConfig.data_parallel_master_ip
1156
+ else:
1157
+ data_parallel_address = self.data_parallel_address
1158
+
1159
+ # This port is only used when there are remote data parallel engines,
1160
+ # otherwise the local IPC transport is used.
1161
+ data_parallel_rpc_port = self.data_parallel_rpc_port if (
1162
+ self.data_parallel_rpc_port
1163
+ is not None) else ParallelConfig.data_parallel_rpc_port
1164
+
1165
+ parallel_config = ParallelConfig(
1166
+ pipeline_parallel_size=self.pipeline_parallel_size,
1167
+ tensor_parallel_size=self.tensor_parallel_size,
1168
+ data_parallel_size=self.data_parallel_size,
1169
+ data_parallel_rank=self.data_parallel_rank or 0,
1170
+ data_parallel_external_lb=data_parallel_external_lb,
1171
+ data_parallel_size_local=data_parallel_size_local,
1172
+ data_parallel_master_ip=data_parallel_address,
1173
+ data_parallel_rpc_port=data_parallel_rpc_port,
1174
+ data_parallel_backend=self.data_parallel_backend,
1175
+ enable_expert_parallel=self.enable_expert_parallel,
1176
+ enable_eplb=self.enable_eplb,
1177
+ num_redundant_experts=self.num_redundant_experts,
1178
+ eplb_window_size=self.eplb_window_size,
1179
+ eplb_step_interval=self.eplb_step_interval,
1180
+ eplb_log_balancedness=self.eplb_log_balancedness,
1181
+ max_parallel_loading_workers=self.max_parallel_loading_workers,
1182
+ disable_custom_all_reduce=self.disable_custom_all_reduce,
1183
+ ray_workers_use_nsight=self.ray_workers_use_nsight,
1184
+ placement_group=placement_group,
1185
+ distributed_executor_backend=self.distributed_executor_backend,
1186
+ worker_cls=self.worker_cls,
1187
+ worker_extension_cls=self.worker_extension_cls,
1188
+ enable_multimodal_encoder_data_parallel=self.
1189
+ enable_multimodal_encoder_data_parallel,
1190
+ )
1191
+
1192
+ speculative_config = self.create_speculative_config(
1193
+ target_model_config=model_config,
1194
+ target_parallel_config=parallel_config,
1195
+ enable_chunked_prefill=self.enable_chunked_prefill,
1196
+ disable_log_stats=self.disable_log_stats,
1197
+ )
1198
+
1199
+ # Reminder: Please update docs/features/compatibility_matrix.md
1200
+ # If the feature combo become valid
1201
+ if self.num_scheduler_steps > 1:
1202
+ if speculative_config is not None:
1203
+ raise ValueError("Speculative decoding is not supported with "
1204
+ "multi-step (--num-scheduler-steps > 1)")
1205
+ if self.enable_chunked_prefill and self.pipeline_parallel_size > 1:
1206
+ raise ValueError("Multi-Step Chunked-Prefill is not supported "
1207
+ "for pipeline-parallel-size > 1")
1208
+ from vllm.platforms import current_platform
1209
+ if current_platform.is_cpu():
1210
+ logger.warning("Multi-Step (--num-scheduler-steps > 1) is "
1211
+ "currently not supported for CPUs and has been "
1212
+ "disabled.")
1213
+ self.num_scheduler_steps = 1
1214
+
1215
+ # make sure num_lookahead_slots is set the higher value depending on
1216
+ # if we are using speculative decoding or multi-step
1217
+ num_lookahead_slots = max(self.num_lookahead_slots,
1218
+ self.num_scheduler_steps - 1)
1219
+ num_lookahead_slots = num_lookahead_slots \
1220
+ if speculative_config is None \
1221
+ else speculative_config.num_lookahead_slots
1222
+
1223
+ scheduler_config = SchedulerConfig(
1224
+ runner_type=model_config.runner_type,
1225
+ max_num_batched_tokens=self.max_num_batched_tokens,
1226
+ max_num_seqs=self.max_num_seqs,
1227
+ max_model_len=model_config.max_model_len,
1228
+ cuda_graph_sizes=self.cuda_graph_sizes,
1229
+ num_lookahead_slots=num_lookahead_slots,
1230
+ delay_factor=self.scheduler_delay_factor,
1231
+ enable_chunked_prefill=self.enable_chunked_prefill,
1232
+ disable_chunked_mm_input=self.disable_chunked_mm_input,
1233
+ is_multimodal_model=model_config.is_multimodal_model,
1234
+ preemption_mode=self.preemption_mode,
1235
+ num_scheduler_steps=self.num_scheduler_steps,
1236
+ multi_step_stream_outputs=self.multi_step_stream_outputs,
1237
+ send_delta_data=(envs.VLLM_USE_RAY_SPMD_WORKER
1238
+ and parallel_config.use_ray),
1239
+ policy=self.scheduling_policy,
1240
+ scheduler_cls=self.scheduler_cls,
1241
+ max_num_partial_prefills=self.max_num_partial_prefills,
1242
+ max_long_partial_prefills=self.max_long_partial_prefills,
1243
+ long_prefill_token_threshold=self.long_prefill_token_threshold,
1244
+ disable_hybrid_kv_cache_manager=self.
1245
+ disable_hybrid_kv_cache_manager,
1246
+ )
1247
+
1248
+ lora_config = LoRAConfig(
1249
+ bias_enabled=self.enable_lora_bias,
1250
+ max_lora_rank=self.max_lora_rank,
1251
+ max_loras=self.max_loras,
1252
+ fully_sharded_loras=self.fully_sharded_loras,
1253
+ lora_extra_vocab_size=self.lora_extra_vocab_size,
1254
+ long_lora_scaling_factors=self.long_lora_scaling_factors,
1255
+ lora_dtype=self.lora_dtype,
1256
+ max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
1257
+ and self.max_cpu_loras > 0 else None) if self.enable_lora else None
1258
+
1259
+ # bitsandbytes pre-quantized model need a specific model loader
1260
+ if model_config.quantization == "bitsandbytes":
1261
+ self.quantization = self.load_format = "bitsandbytes"
1262
+
1263
+ load_config = self.create_load_config()
1264
+
1265
+ prompt_adapter_config = PromptAdapterConfig(
1266
+ max_prompt_adapters=self.max_prompt_adapters,
1267
+ max_prompt_adapter_token=self.max_prompt_adapter_token) \
1268
+ if self.enable_prompt_adapter else None
1269
+
1270
+ decoding_config = DecodingConfig(
1271
+ backend=self.guided_decoding_backend,
1272
+ disable_fallback=self.guided_decoding_disable_fallback,
1273
+ disable_any_whitespace=self.guided_decoding_disable_any_whitespace,
1274
+ disable_additional_properties=\
1275
+ self.guided_decoding_disable_additional_properties,
1276
+ reasoning_backend=self.reasoning_parser
1277
+ )
1278
+
1279
+ observability_config = ObservabilityConfig(
1280
+ show_hidden_metrics_for_version=self.
1281
+ show_hidden_metrics_for_version,
1282
+ otlp_traces_endpoint=self.otlp_traces_endpoint,
1283
+ collect_detailed_traces=self.collect_detailed_traces,
1284
+ )
1285
+
1286
+ config = VllmConfig(
1287
+ model_config=model_config,
1288
+ cache_config=cache_config,
1289
+ parallel_config=parallel_config,
1290
+ scheduler_config=scheduler_config,
1291
+ device_config=device_config,
1292
+ lora_config=lora_config,
1293
+ speculative_config=speculative_config,
1294
+ load_config=load_config,
1295
+ decoding_config=decoding_config,
1296
+ observability_config=observability_config,
1297
+ prompt_adapter_config=prompt_adapter_config,
1298
+ compilation_config=self.compilation_config,
1299
+ kv_transfer_config=self.kv_transfer_config,
1300
+ kv_events_config=self.kv_events_config,
1301
+ additional_config=self.additional_config,
1302
+ )
1303
+
1304
+ return config
1305
+
1306
+ def _is_v1_supported_oracle(self, model_config: ModelConfig) -> bool:
1307
+ """Oracle for whether to use V0 or V1 Engine by default."""
1308
+
1309
+ #############################################################
1310
+ # Unsupported Feature Flags on V1.
1311
+
1312
+ if self.load_format == LoadFormat.SHARDED_STATE.value:
1313
+ _raise_or_fallback(
1314
+ feature_name=f"--load_format {self.load_format}",
1315
+ recommend_to_remove=False)
1316
+ return False
1317
+
1318
+ if (self.logits_processor_pattern
1319
+ != EngineArgs.logits_processor_pattern):
1320
+ _raise_or_fallback(feature_name="--logits-processor-pattern",
1321
+ recommend_to_remove=False)
1322
+ return False
1323
+
1324
+ if self.preemption_mode != SchedulerConfig.preemption_mode:
1325
+ _raise_or_fallback(feature_name="--preemption-mode",
1326
+ recommend_to_remove=True)
1327
+ return False
1328
+
1329
+ if (self.disable_async_output_proc
1330
+ != EngineArgs.disable_async_output_proc):
1331
+ _raise_or_fallback(feature_name="--disable-async-output-proc",
1332
+ recommend_to_remove=True)
1333
+ return False
1334
+
1335
+ if self.num_scheduler_steps != SchedulerConfig.num_scheduler_steps:
1336
+ _raise_or_fallback(feature_name="--num-scheduler-steps",
1337
+ recommend_to_remove=True)
1338
+ return False
1339
+
1340
+ if self.scheduler_delay_factor != SchedulerConfig.delay_factor:
1341
+ _raise_or_fallback(feature_name="--scheduler-delay-factor",
1342
+ recommend_to_remove=True)
1343
+ return False
1344
+
1345
+ if self.guided_decoding_backend not in get_args(
1346
+ GuidedDecodingBackendV1):
1347
+ _raise_or_fallback(
1348
+ feature_name=
1349
+ f"--guided-decoding-backend={self.guided_decoding_backend}",
1350
+ recommend_to_remove=False)
1351
+ return False
1352
+
1353
+ # Need at least Ampere for now (FA support required).
1354
+ # Skip this check if we are running on a non-GPU platform,
1355
+ # or if the device capability is not available
1356
+ # (e.g. in a Ray actor without GPUs).
1357
+ from vllm.platforms import current_platform
1358
+ if (current_platform.is_cuda()
1359
+ and current_platform.get_device_capability()
1360
+ and current_platform.get_device_capability().major < 8):
1361
+ _raise_or_fallback(feature_name="Compute Capability < 8.0",
1362
+ recommend_to_remove=False)
1363
+ return False
1364
+
1365
+ # No Fp8 KV cache so far.
1366
+ if self.kv_cache_dtype != "auto":
1367
+ fp8_attention = self.kv_cache_dtype.startswith("fp8")
1368
+ will_use_fa = (
1369
+ current_platform.is_cuda()
1370
+ and not envs.is_set("VLLM_ATTENTION_BACKEND")
1371
+ ) or envs.VLLM_ATTENTION_BACKEND == "FLASH_ATTN_VLLM_V1"
1372
+ supported = False
1373
+ if current_platform.is_rocm():
1374
+ supported = True
1375
+ elif fp8_attention and will_use_fa:
1376
+ from vllm.attention.utils.fa_utils import (
1377
+ flash_attn_supports_fp8)
1378
+ supported = flash_attn_supports_fp8()
1379
+ if not supported:
1380
+ _raise_or_fallback(feature_name="--kv-cache-dtype",
1381
+ recommend_to_remove=False)
1382
+ return False
1383
+
1384
+ # No Prompt Adapter so far.
1385
+ if self.enable_prompt_adapter:
1386
+ _raise_or_fallback(feature_name="--enable-prompt-adapter",
1387
+ recommend_to_remove=False)
1388
+ return False
1389
+
1390
+ # No text embedding inputs so far.
1391
+ if self.enable_prompt_embeds:
1392
+ _raise_or_fallback(feature_name="--enable-prompt-embeds",
1393
+ recommend_to_remove=False)
1394
+ return False
1395
+
1396
+ # No Mamba or Encoder-Decoder so far.
1397
+ if not model_config.is_v1_compatible:
1398
+ _raise_or_fallback(feature_name=model_config.architectures,
1399
+ recommend_to_remove=False)
1400
+ return False
1401
+
1402
+ # V1 mamba models are unoptimized.
1403
+ if model_config.has_inner_state and _warn_or_fallback(
1404
+ feature_name="Mamba"):
1405
+ return False
1406
+
1407
+ # No Concurrent Partial Prefills so far.
1408
+ if (self.max_num_partial_prefills
1409
+ != SchedulerConfig.max_num_partial_prefills
1410
+ or self.max_long_partial_prefills
1411
+ != SchedulerConfig.max_long_partial_prefills):
1412
+ _raise_or_fallback(feature_name="Concurrent Partial Prefill",
1413
+ recommend_to_remove=False)
1414
+ return False
1415
+
1416
+ # No OTLP observability so far.
1417
+ if (self.otlp_traces_endpoint or self.collect_detailed_traces):
1418
+ _raise_or_fallback(feature_name="--otlp-traces-endpoint",
1419
+ recommend_to_remove=False)
1420
+ return False
1421
+
1422
+ # V1 supports N-gram, Medusa, and Eagle speculative decoding.
1423
+ is_ngram_enabled = False
1424
+ is_eagle_enabled = False
1425
+ is_medusa_enabled = False
1426
+ if self.speculative_config is not None:
1427
+ # This is supported but experimental (handled below).
1428
+ speculative_method = self.speculative_config.get("method")
1429
+ if speculative_method:
1430
+ if speculative_method in ("ngram", "[ngram]"):
1431
+ is_ngram_enabled = True
1432
+ elif speculative_method == "medusa":
1433
+ is_medusa_enabled = True
1434
+ elif speculative_method in ("eagle", "eagle3", "deepseek_mtp"):
1435
+ is_eagle_enabled = True
1436
+ else:
1437
+ speculative_model = self.speculative_config.get("model")
1438
+ if speculative_model in ("ngram", "[ngram]"):
1439
+ is_ngram_enabled = True
1440
+ if not (is_ngram_enabled or is_eagle_enabled or is_medusa_enabled):
1441
+ # Other speculative decoding methods are not supported yet.
1442
+ _raise_or_fallback(feature_name="Speculative Decoding",
1443
+ recommend_to_remove=False)
1444
+ return False
1445
+
1446
+ # No XFormers so far.
1447
+ V1_BACKENDS = [
1448
+ "FLASH_ATTN_VLLM_V1",
1449
+ "FLASH_ATTN",
1450
+ "PALLAS",
1451
+ "PALLAS_VLLM_V1",
1452
+ "TRITON_ATTN_VLLM_V1",
1453
+ "TRITON_MLA",
1454
+ "CUTLASS_MLA_VLLM_V1",
1455
+ "FLASHMLA",
1456
+ "FLASHINFER",
1457
+ "FLASHINFER_VLLM_V1",
1458
+ "ROCM_AITER_MLA",
1459
+ "TORCH_SDPA_VLLM_V1",
1460
+ "FLEX_ATTENTION",
1461
+ ]
1462
+ if (envs.is_set("VLLM_ATTENTION_BACKEND")
1463
+ and envs.VLLM_ATTENTION_BACKEND not in V1_BACKENDS):
1464
+ name = f"VLLM_ATTENTION_BACKEND={envs.VLLM_ATTENTION_BACKEND}"
1465
+ _raise_or_fallback(feature_name=name, recommend_to_remove=True)
1466
+ return False
1467
+
1468
+ # Platforms must decide if they can support v1 for this model
1469
+ if not current_platform.supports_v1(model_config=model_config):
1470
+ _raise_or_fallback(
1471
+ feature_name=f"device type={current_platform.device_type}",
1472
+ recommend_to_remove=False)
1473
+ return False
1474
+ #############################################################
1475
+ # Experimental Features - allow users to opt in.
1476
+
1477
+ # Signal Handlers requires running in main thread.
1478
+ if (threading.current_thread() != threading.main_thread()
1479
+ and _warn_or_fallback("Engine in background thread")):
1480
+ return False
1481
+
1482
+ if (self.pipeline_parallel_size > 1
1483
+ and self.distributed_executor_backend
1484
+ not in (ParallelConfig.distributed_executor_backend, "ray",
1485
+ "mp", "external_launcher")):
1486
+ name = "Pipeline Parallelism without Ray distributed executor " \
1487
+ "or multiprocessing executor or external launcher"
1488
+ _raise_or_fallback(feature_name=name, recommend_to_remove=False)
1489
+ return False
1490
+
1491
+ # The platform may be supported on V1, but off by default for now.
1492
+ if not current_platform.default_v1( # noqa: SIM103
1493
+ model_config=model_config) and _warn_or_fallback(
1494
+ current_platform.device_name):
1495
+ return False
1496
+
1497
+ if (current_platform.is_cpu()
1498
+ and model_config.get_sliding_window() is not None):
1499
+ _raise_or_fallback(feature_name="sliding window (CPU backend)",
1500
+ recommend_to_remove=False)
1501
+ return False
1502
+
1503
+ #############################################################
1504
+
1505
+ return True
1506
+
1507
+ def _set_default_args_v0(self, model_config: ModelConfig) -> None:
1508
+ """Set Default Arguments for V0 Engine."""
1509
+
1510
+ max_model_len = model_config.max_model_len
1511
+ use_long_context = max_model_len > 32768
1512
+ if self.enable_chunked_prefill is None:
1513
+ # Chunked prefill not supported for Multimodal or MLA in V0.
1514
+ if model_config.is_multimodal_model or model_config.use_mla:
1515
+ self.enable_chunked_prefill = False
1516
+
1517
+ # Enable chunked prefill by default for long context (> 32K)
1518
+ # models to avoid OOM errors in initial memory profiling phase.
1519
+ elif use_long_context:
1520
+ from vllm.platforms import current_platform
1521
+ is_gpu = current_platform.is_cuda()
1522
+ use_sliding_window = (model_config.get_sliding_window()
1523
+ is not None)
1524
+ use_spec_decode = self.speculative_config is not None
1525
+
1526
+ if (is_gpu and not use_sliding_window and not use_spec_decode
1527
+ and not self.enable_lora
1528
+ and not self.enable_prompt_adapter
1529
+ and model_config.runner_type != "pooling"):
1530
+ self.enable_chunked_prefill = True
1531
+ logger.warning(
1532
+ "Chunked prefill is enabled by default for models "
1533
+ "with max_model_len > 32K. Chunked prefill might "
1534
+ "not work with some features or models. If you "
1535
+ "encounter any issues, please disable by launching "
1536
+ "with --enable-chunked-prefill=False.")
1537
+
1538
+ if self.enable_chunked_prefill is None:
1539
+ self.enable_chunked_prefill = False
1540
+
1541
+ if not self.enable_chunked_prefill and use_long_context:
1542
+ logger.warning(
1543
+ "The model has a long context length (%s). This may cause"
1544
+ "OOM during the initial memory profiling phase, or result "
1545
+ "in low performance due to small KV cache size. Consider "
1546
+ "setting --max-model-len to a smaller value.", max_model_len)
1547
+ elif (self.enable_chunked_prefill
1548
+ and model_config.runner_type == "pooling"):
1549
+ msg = "Chunked prefill is not supported for pooling models"
1550
+ raise ValueError(msg)
1551
+
1552
+ # if using prefix caching, we must set a hash algo
1553
+ if self.enable_prefix_caching:
1554
+ # Disable prefix caching for multimodal models for VLLM_V0.
1555
+ if model_config.is_multimodal_model:
1556
+ logger.warning(
1557
+ "--enable-prefix-caching is not supported for multimodal "
1558
+ "models in V0 and has been disabled.")
1559
+ self.enable_prefix_caching = False
1560
+
1561
+ # VLLM_V0 only supports builtin hash algo for prefix caching.
1562
+ if self.prefix_caching_hash_algo == "sha256":
1563
+ raise ValueError(
1564
+ "sha256 is not supported for prefix caching in V0 engine. "
1565
+ "Please use 'builtin'.")
1566
+
1567
+ # Set max_num_seqs to 256 for VLLM_V0.
1568
+ if self.max_num_seqs is None:
1569
+ self.max_num_seqs = 256
1570
+
1571
+ def _set_default_args_v1(self, usage_context: UsageContext,
1572
+ model_config: ModelConfig) -> None:
1573
+ """Set Default Arguments for V1 Engine."""
1574
+
1575
+ # V1 always uses chunked prefills and prefix caching
1576
+ # for non-pooling tasks.
1577
+ # For pooling tasks the default is False
1578
+ if model_config.runner_type != "pooling":
1579
+ self.enable_chunked_prefill = True
1580
+ if self.enable_prefix_caching is None:
1581
+ self.enable_prefix_caching = True
1582
+ else:
1583
+
1584
+ pooling_type = model_config.pooler_config.pooling_type
1585
+
1586
+ # TODO: when encoder models are supported we'll have to
1587
+ # check for causal attention here.
1588
+ incremental_prefill_supported = (pooling_type is not None and
1589
+ pooling_type.lower() == "last")
1590
+
1591
+ action = "Enabling" if \
1592
+ incremental_prefill_supported else "Disabling"
1593
+
1594
+ if self.enable_chunked_prefill is None:
1595
+ self.enable_chunked_prefill = incremental_prefill_supported
1596
+ logger.info("(%s) chunked prefill by default", action)
1597
+ if self.enable_prefix_caching is None:
1598
+ self.enable_prefix_caching = incremental_prefill_supported
1599
+ logger.info("(%s) prefix caching by default", action)
1600
+
1601
+ if not self.enable_chunked_prefill:
1602
+ self.max_num_batched_tokens = model_config.max_model_len
1603
+
1604
+ # V1 should use the new scheduler by default.
1605
+ # Swap it only if this arg is set to the original V0 default
1606
+ if self.scheduler_cls == EngineArgs.scheduler_cls:
1607
+ self.scheduler_cls = "vllm.v1.core.sched.scheduler.Scheduler"
1608
+
1609
+ # When no user override, set the default values based on the usage
1610
+ # context.
1611
+ # Use different default values for different hardware.
1612
+
1613
+ # Try to query the device name on the current platform. If it fails,
1614
+ # it may be because the platform that imports vLLM is not the same
1615
+ # as the platform that vLLM is running on (e.g. the case of scaling
1616
+ # vLLM with Ray) and has no GPUs. In this case we use the default
1617
+ # values for non-H100/H200 GPUs.
1618
+ from vllm.platforms import current_platform
1619
+ try:
1620
+ device_memory = current_platform.get_device_total_memory()
1621
+ device_name = current_platform.get_device_name().lower()
1622
+ except Exception:
1623
+ # This is only used to set default_max_num_batched_tokens
1624
+ device_memory = 0
1625
+
1626
+ # NOTE(Kuntai): Setting large `max_num_batched_tokens` for A100 reduces
1627
+ # throughput, see PR #17885 for more details.
1628
+ # So here we do an extra device name check to prevent such regression.
1629
+ if device_memory >= 70 * GiB_bytes and "a100" not in device_name:
1630
+ # For GPUs like H100 and MI300x, use larger default values.
1631
+ default_max_num_batched_tokens = {
1632
+ UsageContext.LLM_CLASS: 16384,
1633
+ UsageContext.OPENAI_API_SERVER: 8192,
1634
+ }
1635
+ default_max_num_seqs = {
1636
+ UsageContext.LLM_CLASS: 1024,
1637
+ UsageContext.OPENAI_API_SERVER: 1024,
1638
+ }
1639
+ else:
1640
+ # TODO(woosuk): Tune the default values for other hardware.
1641
+ default_max_num_batched_tokens = {
1642
+ UsageContext.LLM_CLASS: 8192,
1643
+ UsageContext.OPENAI_API_SERVER: 2048,
1644
+ }
1645
+ default_max_num_seqs = {
1646
+ UsageContext.LLM_CLASS: 256,
1647
+ UsageContext.OPENAI_API_SERVER: 256,
1648
+ }
1649
+
1650
+ # tpu specific default values.
1651
+ if current_platform.is_tpu():
1652
+ default_max_num_batched_tokens_tpu = {
1653
+ UsageContext.LLM_CLASS: {
1654
+ 'V6E': 2048,
1655
+ 'V5E': 1024,
1656
+ 'V5P': 512,
1657
+ },
1658
+ UsageContext.OPENAI_API_SERVER: {
1659
+ 'V6E': 1024,
1660
+ 'V5E': 512,
1661
+ 'V5P': 256,
1662
+ }
1663
+ }
1664
+
1665
+ # cpu specific default values.
1666
+ if current_platform.is_cpu():
1667
+ default_max_num_batched_tokens = {
1668
+ UsageContext.LLM_CLASS: 4096,
1669
+ UsageContext.OPENAI_API_SERVER: 2048,
1670
+ }
1671
+ default_max_num_seqs = {
1672
+ UsageContext.LLM_CLASS: 128,
1673
+ UsageContext.OPENAI_API_SERVER: 32,
1674
+ }
1675
+
1676
+ use_context_value = usage_context.value if usage_context else None
1677
+ if (self.max_num_batched_tokens is None
1678
+ and usage_context in default_max_num_batched_tokens):
1679
+ if current_platform.is_tpu():
1680
+ chip_name = current_platform.get_device_name()
1681
+ if chip_name in default_max_num_batched_tokens_tpu[
1682
+ usage_context]:
1683
+ self.max_num_batched_tokens = \
1684
+ default_max_num_batched_tokens_tpu[
1685
+ usage_context][chip_name]
1686
+ else:
1687
+ self.max_num_batched_tokens = \
1688
+ default_max_num_batched_tokens[usage_context]
1689
+ else:
1690
+ self.max_num_batched_tokens = default_max_num_batched_tokens[
1691
+ usage_context]
1692
+ logger.debug(
1693
+ "Setting max_num_batched_tokens to %d for %s usage context.",
1694
+ self.max_num_batched_tokens, use_context_value)
1695
+
1696
+ if (self.max_num_seqs is None
1697
+ and usage_context in default_max_num_seqs):
1698
+ self.max_num_seqs = default_max_num_seqs[usage_context]
1699
+
1700
+ logger.debug("Setting max_num_seqs to %d for %s usage context.",
1701
+ self.max_num_seqs, use_context_value)
1702
+
1703
+
1704
+ @dataclass
1705
+ class AsyncEngineArgs(EngineArgs):
1706
+ """Arguments for asynchronous vLLM engine."""
1707
+ disable_log_requests: bool = False
1708
+
1709
+ @staticmethod
1710
+ def add_cli_args(parser: FlexibleArgumentParser,
1711
+ async_args_only: bool = False) -> FlexibleArgumentParser:
1712
+ # Initialize plugin to update the parser, for example, The plugin may
1713
+ # adding a new kind of quantization method to --quantization argument or
1714
+ # a new device to --device argument.
1715
+ load_general_plugins()
1716
+ if not async_args_only:
1717
+ parser = EngineArgs.add_cli_args(parser)
1718
+ parser.add_argument('--disable-log-requests',
1719
+ action='store_true',
1720
+ help='Disable logging requests.')
1721
+ from vllm.platforms import current_platform
1722
+ current_platform.pre_register_and_update(parser)
1723
+ return parser
1724
+
1725
+
1726
+ def _raise_or_fallback(feature_name: str, recommend_to_remove: bool):
1727
+ if envs.is_set("VLLM_USE_V1") and envs.VLLM_USE_V1:
1728
+ raise NotImplementedError(
1729
+ f"VLLM_USE_V1=1 is not supported with {feature_name}.")
1730
+ msg = f"{feature_name} is not supported by the V1 Engine. "
1731
+ msg += "Falling back to V0. "
1732
+ if recommend_to_remove:
1733
+ msg += f"We recommend to remove {feature_name} from your config "
1734
+ msg += "in favor of the V1 Engine."
1735
+ logger.warning(msg)
1736
+
1737
+
1738
+ def _warn_or_fallback(feature_name: str) -> bool:
1739
+ if envs.is_set("VLLM_USE_V1") and envs.VLLM_USE_V1:
1740
+ logger.warning(
1741
+ "Detected VLLM_USE_V1=1 with %s. Usage should "
1742
+ "be considered experimental. Please report any "
1743
+ "issues on Github.", feature_name)
1744
+ should_exit = False
1745
+ else:
1746
+ logger.info(
1747
+ "%s is experimental on VLLM_USE_V1=1. "
1748
+ "Falling back to V0 Engine.", feature_name)
1749
+ should_exit = True
1750
+ return should_exit
1751
+
1752
+
1753
+ def human_readable_int(value):
1754
+ """Parse human-readable integers like '1k', '2M', etc.
1755
+ Including decimal values with decimal multipliers.
1756
+
1757
+ Examples:
1758
+ - '1k' -> 1,000
1759
+ - '1K' -> 1,024
1760
+ - '25.6k' -> 25,600
1761
+ """
1762
+ value = value.strip()
1763
+ match = re.fullmatch(r'(\d+(?:\.\d+)?)([kKmMgGtT])', value)
1764
+ if match:
1765
+ decimal_multiplier = {
1766
+ 'k': 10**3,
1767
+ 'm': 10**6,
1768
+ 'g': 10**9,
1769
+ }
1770
+ binary_multiplier = {
1771
+ 'K': 2**10,
1772
+ 'M': 2**20,
1773
+ 'G': 2**30,
1774
+ }
1775
+
1776
+ number, suffix = match.groups()
1777
+ if suffix in decimal_multiplier:
1778
+ mult = decimal_multiplier[suffix]
1779
+ return int(float(number) * mult)
1780
+ elif suffix in binary_multiplier:
1781
+ mult = binary_multiplier[suffix]
1782
+ # Do not allow decimals with binary multipliers
1783
+ try:
1784
+ return int(number) * mult
1785
+ except ValueError as e:
1786
+ raise argparse.ArgumentTypeError("Decimals are not allowed " \
1787
+ f"with binary suffixes like {suffix}. Did you mean to use " \
1788
+ f"{number}{suffix.lower()} instead?") from e
1789
+
1790
+ # Regular plain number.
1791
+ return int(value)
1792
+
1793
+
1794
+ # These functions are used by sphinx to build the documentation
1795
+ def _engine_args_parser():
1796
+ return EngineArgs.add_cli_args(FlexibleArgumentParser())
1797
+
1798
+
1799
+ def _async_engine_args_parser():
1800
+ return AsyncEngineArgs.add_cli_args(FlexibleArgumentParser(),
1801
+ async_args_only=True)