vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1197) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +53 -0
  3. vllm/_custom_ops.py +1828 -0
  4. vllm/_ipex_ops.py +244 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +115 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +308 -0
  20. vllm/attention/backends/blocksparse_attn.py +461 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
  23. vllm/attention/backends/flash_attn.py +1003 -0
  24. vllm/attention/backends/flashinfer.py +1104 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +313 -0
  27. vllm/attention/backends/ipex_attn.py +398 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1385 -0
  30. vllm/attention/backends/pallas.py +351 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +975 -0
  34. vllm/attention/backends/torch_sdpa.py +703 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +802 -0
  38. vllm/attention/layer.py +468 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +906 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/prefix_prefill.py +902 -0
  52. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  53. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  54. vllm/attention/ops/triton_decode_attention.py +674 -0
  55. vllm/attention/ops/triton_flash_attention.py +979 -0
  56. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  57. vllm/attention/ops/triton_unified_attention.py +334 -0
  58. vllm/attention/selector.py +187 -0
  59. vllm/attention/utils/fa_utils.py +55 -0
  60. vllm/beam_search.py +87 -0
  61. vllm/benchmarks/__init__.py +0 -0
  62. vllm/benchmarks/datasets.py +1185 -0
  63. vllm/benchmarks/endpoint_request_func.py +381 -0
  64. vllm/benchmarks/latency.py +168 -0
  65. vllm/benchmarks/serve.py +1135 -0
  66. vllm/benchmarks/throughput.py +609 -0
  67. vllm/benchmarks/utils.py +70 -0
  68. vllm/collect_env.py +820 -0
  69. vllm/compilation/__init__.py +0 -0
  70. vllm/compilation/activation_quant_fusion.py +89 -0
  71. vllm/compilation/backends.py +563 -0
  72. vllm/compilation/base_piecewise_backend.py +72 -0
  73. vllm/compilation/collective_fusion.py +127 -0
  74. vllm/compilation/compiler_interface.py +544 -0
  75. vllm/compilation/counter.py +38 -0
  76. vllm/compilation/cuda_piecewise_backend.py +214 -0
  77. vllm/compilation/decorators.py +250 -0
  78. vllm/compilation/fix_functionalization.py +191 -0
  79. vllm/compilation/fusion.py +618 -0
  80. vllm/compilation/fx_utils.py +62 -0
  81. vllm/compilation/inductor_pass.py +115 -0
  82. vllm/compilation/monitor.py +39 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +137 -0
  85. vllm/compilation/pass_manager.py +78 -0
  86. vllm/compilation/sequence_parallelism.py +268 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +67 -0
  89. vllm/compilation/wrapper.py +135 -0
  90. vllm/config.py +4746 -0
  91. vllm/connections.py +174 -0
  92. vllm/core/__init__.py +0 -0
  93. vllm/core/block/__init__.py +0 -0
  94. vllm/core/block/block_table.py +399 -0
  95. vllm/core/block/common.py +371 -0
  96. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  97. vllm/core/block/interfaces.py +319 -0
  98. vllm/core/block/naive_block.py +466 -0
  99. vllm/core/block/prefix_caching_block.py +1135 -0
  100. vllm/core/block/utils.py +28 -0
  101. vllm/core/block_manager.py +521 -0
  102. vllm/core/evictor.py +157 -0
  103. vllm/core/interfaces.py +135 -0
  104. vllm/core/placeholder_block_space_manager.py +100 -0
  105. vllm/core/scheduler.py +2093 -0
  106. vllm/device_allocator/__init__.py +0 -0
  107. vllm/device_allocator/cumem.py +281 -0
  108. vllm/distributed/__init__.py +6 -0
  109. vllm/distributed/communication_op.py +41 -0
  110. vllm/distributed/device_communicators/__init__.py +0 -0
  111. vllm/distributed/device_communicators/all2all.py +264 -0
  112. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  113. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  114. vllm/distributed/device_communicators/cuda_communicator.py +176 -0
  115. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  116. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  117. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  118. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  119. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  120. vllm/distributed/device_communicators/pynccl.py +218 -0
  121. vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
  122. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  123. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  124. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  125. vllm/distributed/kv_events.py +356 -0
  126. vllm/distributed/kv_transfer/README.md +29 -0
  127. vllm/distributed/kv_transfer/__init__.py +12 -0
  128. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  129. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  130. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  131. vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
  132. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  133. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  134. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
  142. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  145. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  146. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  147. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  149. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
  150. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  151. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  152. vllm/distributed/parallel_state.py +1296 -0
  153. vllm/distributed/tpu_distributed_utils.py +177 -0
  154. vllm/distributed/utils.py +536 -0
  155. vllm/engine/__init__.py +0 -0
  156. vllm/engine/arg_utils.py +1708 -0
  157. vllm/engine/async_llm_engine.py +1200 -0
  158. vllm/engine/async_timeout.py +173 -0
  159. vllm/engine/llm_engine.py +2097 -0
  160. vllm/engine/metrics.py +629 -0
  161. vllm/engine/metrics_types.py +94 -0
  162. vllm/engine/multiprocessing/__init__.py +148 -0
  163. vllm/engine/multiprocessing/client.py +681 -0
  164. vllm/engine/multiprocessing/engine.py +460 -0
  165. vllm/engine/output_processor/__init__.py +0 -0
  166. vllm/engine/output_processor/interfaces.py +75 -0
  167. vllm/engine/output_processor/multi_step.py +216 -0
  168. vllm/engine/output_processor/single_step.py +145 -0
  169. vllm/engine/output_processor/stop_checker.py +131 -0
  170. vllm/engine/output_processor/util.py +28 -0
  171. vllm/engine/protocol.py +317 -0
  172. vllm/entrypoints/__init__.py +0 -0
  173. vllm/entrypoints/api_server.py +178 -0
  174. vllm/entrypoints/chat_utils.py +1299 -0
  175. vllm/entrypoints/cli/__init__.py +0 -0
  176. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  177. vllm/entrypoints/cli/benchmark/base.py +39 -0
  178. vllm/entrypoints/cli/benchmark/latency.py +30 -0
  179. vllm/entrypoints/cli/benchmark/main.py +54 -0
  180. vllm/entrypoints/cli/benchmark/serve.py +30 -0
  181. vllm/entrypoints/cli/benchmark/throughput.py +30 -0
  182. vllm/entrypoints/cli/collect_env.py +35 -0
  183. vllm/entrypoints/cli/main.py +65 -0
  184. vllm/entrypoints/cli/openai.py +205 -0
  185. vllm/entrypoints/cli/run_batch.py +62 -0
  186. vllm/entrypoints/cli/serve.py +328 -0
  187. vllm/entrypoints/cli/types.py +25 -0
  188. vllm/entrypoints/launcher.py +147 -0
  189. vllm/entrypoints/llm.py +1544 -0
  190. vllm/entrypoints/logger.py +50 -0
  191. vllm/entrypoints/openai/__init__.py +0 -0
  192. vllm/entrypoints/openai/api_server.py +1387 -0
  193. vllm/entrypoints/openai/cli_args.py +315 -0
  194. vllm/entrypoints/openai/logits_processors.py +90 -0
  195. vllm/entrypoints/openai/protocol.py +1913 -0
  196. vllm/entrypoints/openai/run_batch.py +463 -0
  197. vllm/entrypoints/openai/serving_chat.py +1221 -0
  198. vllm/entrypoints/openai/serving_classification.py +160 -0
  199. vllm/entrypoints/openai/serving_completion.py +592 -0
  200. vllm/entrypoints/openai/serving_embedding.py +201 -0
  201. vllm/entrypoints/openai/serving_engine.py +986 -0
  202. vllm/entrypoints/openai/serving_models.py +315 -0
  203. vllm/entrypoints/openai/serving_pooling.py +232 -0
  204. vllm/entrypoints/openai/serving_score.py +433 -0
  205. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  206. vllm/entrypoints/openai/serving_transcription.py +424 -0
  207. vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
  208. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  209. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  210. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  211. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  212. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  213. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  214. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  215. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  216. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  217. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  218. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  219. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  220. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  221. vllm/entrypoints/score_utils.py +50 -0
  222. vllm/entrypoints/ssl.py +75 -0
  223. vllm/entrypoints/utils.py +233 -0
  224. vllm/env_override.py +41 -0
  225. vllm/envs.py +944 -0
  226. vllm/executor/__init__.py +0 -0
  227. vllm/executor/executor_base.py +401 -0
  228. vllm/executor/mp_distributed_executor.py +244 -0
  229. vllm/executor/msgspec_utils.py +30 -0
  230. vllm/executor/multiproc_worker_utils.py +313 -0
  231. vllm/executor/ray_distributed_executor.py +701 -0
  232. vllm/executor/ray_utils.py +399 -0
  233. vllm/executor/uniproc_executor.py +139 -0
  234. vllm/forward_context.py +179 -0
  235. vllm/inputs/__init__.py +41 -0
  236. vllm/inputs/data.py +331 -0
  237. vllm/inputs/parse.py +151 -0
  238. vllm/inputs/preprocess.py +909 -0
  239. vllm/inputs/registry.py +237 -0
  240. vllm/jsontree.py +80 -0
  241. vllm/logger.py +212 -0
  242. vllm/logging_utils/__init__.py +8 -0
  243. vllm/logging_utils/dump_input.py +85 -0
  244. vllm/logging_utils/formatter.py +18 -0
  245. vllm/logits_process.py +119 -0
  246. vllm/lora/__init__.py +0 -0
  247. vllm/lora/fully_sharded_layers.py +355 -0
  248. vllm/lora/layers.py +1285 -0
  249. vllm/lora/lora.py +199 -0
  250. vllm/lora/models.py +818 -0
  251. vllm/lora/ops/__init__.py +0 -0
  252. vllm/lora/ops/torch_ops/__init__.py +16 -0
  253. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  254. vllm/lora/ops/triton_ops/__init__.py +12 -0
  255. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  256. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  257. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  258. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  259. vllm/lora/ops/triton_ops/utils.py +120 -0
  260. vllm/lora/ops/xla_ops/__init__.py +7 -0
  261. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  262. vllm/lora/peft_helper.py +136 -0
  263. vllm/lora/punica_wrapper/__init__.py +10 -0
  264. vllm/lora/punica_wrapper/punica_base.py +485 -0
  265. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  266. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  267. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  268. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  269. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  270. vllm/lora/punica_wrapper/utils.py +164 -0
  271. vllm/lora/request.py +99 -0
  272. vllm/lora/resolver.py +85 -0
  273. vllm/lora/utils.py +240 -0
  274. vllm/lora/worker_manager.py +259 -0
  275. vllm/model_executor/__init__.py +16 -0
  276. vllm/model_executor/custom_op.py +152 -0
  277. vllm/model_executor/guided_decoding/__init__.py +181 -0
  278. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  279. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  280. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  281. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  282. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  283. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  284. vllm/model_executor/guided_decoding/utils.py +242 -0
  285. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  286. vllm/model_executor/layers/__init__.py +0 -0
  287. vllm/model_executor/layers/activation.py +369 -0
  288. vllm/model_executor/layers/fused_moe/__init__.py +54 -0
  289. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
  290. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  455. vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
  456. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
  457. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
  458. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
  459. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
  460. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
  461. vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
  462. vllm/model_executor/layers/fused_moe/layer.py +1535 -0
  463. vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
  464. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  465. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  466. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  467. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  468. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
  469. vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
  470. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
  471. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
  472. vllm/model_executor/layers/fused_moe/utils.py +98 -0
  473. vllm/model_executor/layers/layernorm.py +288 -0
  474. vllm/model_executor/layers/lightning_attn.py +652 -0
  475. vllm/model_executor/layers/linear.py +1524 -0
  476. vllm/model_executor/layers/logits_processor.py +197 -0
  477. vllm/model_executor/layers/mamba/__init__.py +0 -0
  478. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  479. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  480. vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
  481. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  482. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  483. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  484. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  485. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  486. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  487. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  488. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  489. vllm/model_executor/layers/pooler.py +350 -0
  490. vllm/model_executor/layers/quantization/__init__.py +157 -0
  491. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  492. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  493. vllm/model_executor/layers/quantization/awq.py +194 -0
  494. vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
  495. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  496. vllm/model_executor/layers/quantization/base_config.py +151 -0
  497. vllm/model_executor/layers/quantization/bitblas.py +461 -0
  498. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  499. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  500. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
  501. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
  502. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  503. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  504. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  505. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  506. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
  507. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
  508. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  509. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  510. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  511. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  512. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  513. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  514. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  515. vllm/model_executor/layers/quantization/experts_int8.py +196 -0
  516. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  517. vllm/model_executor/layers/quantization/fp8.py +906 -0
  518. vllm/model_executor/layers/quantization/gguf.py +565 -0
  519. vllm/model_executor/layers/quantization/gptq.py +278 -0
  520. vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
  521. vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
  522. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  523. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  524. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  525. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  526. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  527. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  528. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  529. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  530. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  531. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
  532. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  533. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  534. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  535. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  536. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  537. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  538. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  539. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  540. vllm/model_executor/layers/quantization/marlin.py +261 -0
  541. vllm/model_executor/layers/quantization/modelopt.py +737 -0
  542. vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
  543. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  544. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  545. vllm/model_executor/layers/quantization/qqq.py +275 -0
  546. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  547. vllm/model_executor/layers/quantization/quark/quark.py +441 -0
  548. vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
  549. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  550. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  551. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  552. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
  553. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  554. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  555. vllm/model_executor/layers/quantization/schema.py +86 -0
  556. vllm/model_executor/layers/quantization/torchao.py +161 -0
  557. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  558. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  559. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  560. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
  764. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  765. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  766. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  767. vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
  768. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  769. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  770. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  771. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  772. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  773. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  774. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  775. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
  776. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  777. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  778. vllm/model_executor/layers/rejection_sampler.py +406 -0
  779. vllm/model_executor/layers/resampler.py +270 -0
  780. vllm/model_executor/layers/rotary_embedding.py +1862 -0
  781. vllm/model_executor/layers/sampler.py +1204 -0
  782. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  783. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  784. vllm/model_executor/layers/utils.py +95 -0
  785. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  786. vllm/model_executor/model_loader/__init__.py +76 -0
  787. vllm/model_executor/model_loader/base_loader.py +43 -0
  788. vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
  789. vllm/model_executor/model_loader/default_loader.py +282 -0
  790. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  791. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  792. vllm/model_executor/model_loader/neuron.py +476 -0
  793. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  794. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  795. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  796. vllm/model_executor/model_loader/tensorizer.py +600 -0
  797. vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
  798. vllm/model_executor/model_loader/tpu.py +112 -0
  799. vllm/model_executor/model_loader/utils.py +302 -0
  800. vllm/model_executor/model_loader/weight_utils.py +782 -0
  801. vllm/model_executor/models/__init__.py +28 -0
  802. vllm/model_executor/models/adapters.py +248 -0
  803. vllm/model_executor/models/aimv2.py +246 -0
  804. vllm/model_executor/models/arctic.py +559 -0
  805. vllm/model_executor/models/aria.py +657 -0
  806. vllm/model_executor/models/aya_vision.py +466 -0
  807. vllm/model_executor/models/baichuan.py +474 -0
  808. vllm/model_executor/models/bamba.py +543 -0
  809. vllm/model_executor/models/bart.py +938 -0
  810. vllm/model_executor/models/bert.py +523 -0
  811. vllm/model_executor/models/bert_with_rope.py +769 -0
  812. vllm/model_executor/models/blip.py +339 -0
  813. vllm/model_executor/models/blip2.py +718 -0
  814. vllm/model_executor/models/bloom.py +373 -0
  815. vllm/model_executor/models/chameleon.py +1136 -0
  816. vllm/model_executor/models/chatglm.py +478 -0
  817. vllm/model_executor/models/clip.py +407 -0
  818. vllm/model_executor/models/commandr.py +472 -0
  819. vllm/model_executor/models/constant_size_cache.py +137 -0
  820. vllm/model_executor/models/dbrx.py +472 -0
  821. vllm/model_executor/models/deepseek.py +486 -0
  822. vllm/model_executor/models/deepseek_mtp.py +269 -0
  823. vllm/model_executor/models/deepseek_v2.py +843 -0
  824. vllm/model_executor/models/deepseek_vl2.py +648 -0
  825. vllm/model_executor/models/eagle.py +260 -0
  826. vllm/model_executor/models/exaone.py +551 -0
  827. vllm/model_executor/models/fairseq2_llama.py +154 -0
  828. vllm/model_executor/models/falcon.py +510 -0
  829. vllm/model_executor/models/falcon_h1.py +685 -0
  830. vllm/model_executor/models/florence2.py +1103 -0
  831. vllm/model_executor/models/fuyu.py +389 -0
  832. vllm/model_executor/models/gemma.py +425 -0
  833. vllm/model_executor/models/gemma2.py +425 -0
  834. vllm/model_executor/models/gemma3.py +533 -0
  835. vllm/model_executor/models/gemma3_mm.py +709 -0
  836. vllm/model_executor/models/glm.py +23 -0
  837. vllm/model_executor/models/glm4.py +305 -0
  838. vllm/model_executor/models/glm4v.py +648 -0
  839. vllm/model_executor/models/gpt2.py +328 -0
  840. vllm/model_executor/models/gpt_bigcode.py +335 -0
  841. vllm/model_executor/models/gpt_j.py +339 -0
  842. vllm/model_executor/models/gpt_neox.py +332 -0
  843. vllm/model_executor/models/granite.py +493 -0
  844. vllm/model_executor/models/granite_speech.py +779 -0
  845. vllm/model_executor/models/granitemoe.py +437 -0
  846. vllm/model_executor/models/granitemoehybrid.py +586 -0
  847. vllm/model_executor/models/granitemoeshared.py +341 -0
  848. vllm/model_executor/models/gritlm.py +224 -0
  849. vllm/model_executor/models/grok1.py +546 -0
  850. vllm/model_executor/models/h2ovl.py +546 -0
  851. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  852. vllm/model_executor/models/idefics3.py +776 -0
  853. vllm/model_executor/models/interfaces.py +572 -0
  854. vllm/model_executor/models/interfaces_base.py +164 -0
  855. vllm/model_executor/models/intern_vit.py +480 -0
  856. vllm/model_executor/models/internlm2.py +455 -0
  857. vllm/model_executor/models/internlm2_ve.py +147 -0
  858. vllm/model_executor/models/internvl.py +1418 -0
  859. vllm/model_executor/models/jais.py +373 -0
  860. vllm/model_executor/models/jamba.py +592 -0
  861. vllm/model_executor/models/kimi_vl.py +577 -0
  862. vllm/model_executor/models/llama.py +644 -0
  863. vllm/model_executor/models/llama4.py +532 -0
  864. vllm/model_executor/models/llama_eagle.py +165 -0
  865. vllm/model_executor/models/llama_eagle3.py +263 -0
  866. vllm/model_executor/models/llava.py +866 -0
  867. vllm/model_executor/models/llava_next.py +586 -0
  868. vllm/model_executor/models/llava_next_video.py +471 -0
  869. vllm/model_executor/models/llava_onevision.py +956 -0
  870. vllm/model_executor/models/mamba.py +273 -0
  871. vllm/model_executor/models/mamba2.py +308 -0
  872. vllm/model_executor/models/mamba_cache.py +76 -0
  873. vllm/model_executor/models/medusa.py +219 -0
  874. vllm/model_executor/models/mimo.py +192 -0
  875. vllm/model_executor/models/mimo_mtp.py +285 -0
  876. vllm/model_executor/models/minicpm.py +592 -0
  877. vllm/model_executor/models/minicpm3.py +230 -0
  878. vllm/model_executor/models/minicpm_eagle.py +391 -0
  879. vllm/model_executor/models/minicpmo.py +759 -0
  880. vllm/model_executor/models/minicpmv.py +1287 -0
  881. vllm/model_executor/models/minimax_cache.py +36 -0
  882. vllm/model_executor/models/minimax_text_01.py +1301 -0
  883. vllm/model_executor/models/minimax_vl_01.py +364 -0
  884. vllm/model_executor/models/mistral3.py +604 -0
  885. vllm/model_executor/models/mixtral.py +488 -0
  886. vllm/model_executor/models/mixtral_quant.py +453 -0
  887. vllm/model_executor/models/mllama.py +1624 -0
  888. vllm/model_executor/models/mllama4.py +938 -0
  889. vllm/model_executor/models/mlp_speculator.py +206 -0
  890. vllm/model_executor/models/modernbert.py +331 -0
  891. vllm/model_executor/models/module_mapping.py +72 -0
  892. vllm/model_executor/models/molmo.py +1568 -0
  893. vllm/model_executor/models/moonvit.py +630 -0
  894. vllm/model_executor/models/mpt.py +331 -0
  895. vllm/model_executor/models/nemotron.py +508 -0
  896. vllm/model_executor/models/nemotron_h.py +573 -0
  897. vllm/model_executor/models/nemotron_nas.py +484 -0
  898. vllm/model_executor/models/nvlm_d.py +216 -0
  899. vllm/model_executor/models/olmo.py +389 -0
  900. vllm/model_executor/models/olmo2.py +414 -0
  901. vllm/model_executor/models/olmoe.py +468 -0
  902. vllm/model_executor/models/opt.py +412 -0
  903. vllm/model_executor/models/orion.py +349 -0
  904. vllm/model_executor/models/ovis.py +567 -0
  905. vllm/model_executor/models/paligemma.py +398 -0
  906. vllm/model_executor/models/persimmon.py +344 -0
  907. vllm/model_executor/models/phi.py +356 -0
  908. vllm/model_executor/models/phi3.py +19 -0
  909. vllm/model_executor/models/phi3_small.py +465 -0
  910. vllm/model_executor/models/phi3v.py +723 -0
  911. vllm/model_executor/models/phi4mm.py +1246 -0
  912. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  913. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  914. vllm/model_executor/models/phimoe.py +665 -0
  915. vllm/model_executor/models/pixtral.py +1316 -0
  916. vllm/model_executor/models/plamo2.py +738 -0
  917. vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
  918. vllm/model_executor/models/qwen.py +362 -0
  919. vllm/model_executor/models/qwen2.py +497 -0
  920. vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
  921. vllm/model_executor/models/qwen2_5_vl.py +1166 -0
  922. vllm/model_executor/models/qwen2_audio.py +410 -0
  923. vllm/model_executor/models/qwen2_moe.py +540 -0
  924. vllm/model_executor/models/qwen2_rm.py +132 -0
  925. vllm/model_executor/models/qwen2_vl.py +1405 -0
  926. vllm/model_executor/models/qwen3.py +321 -0
  927. vllm/model_executor/models/qwen3_moe.py +535 -0
  928. vllm/model_executor/models/qwen_vl.py +785 -0
  929. vllm/model_executor/models/registry.py +622 -0
  930. vllm/model_executor/models/roberta.py +276 -0
  931. vllm/model_executor/models/siglip.py +524 -0
  932. vllm/model_executor/models/skyworkr1v.py +951 -0
  933. vllm/model_executor/models/smolvlm.py +52 -0
  934. vllm/model_executor/models/solar.py +506 -0
  935. vllm/model_executor/models/stablelm.py +343 -0
  936. vllm/model_executor/models/starcoder2.py +356 -0
  937. vllm/model_executor/models/tarsier.py +643 -0
  938. vllm/model_executor/models/telechat2.py +140 -0
  939. vllm/model_executor/models/teleflm.py +79 -0
  940. vllm/model_executor/models/transformers.py +508 -0
  941. vllm/model_executor/models/ultravox.py +656 -0
  942. vllm/model_executor/models/utils.py +731 -0
  943. vllm/model_executor/models/vision.py +147 -0
  944. vllm/model_executor/models/whisper.py +747 -0
  945. vllm/model_executor/models/zamba2.py +1009 -0
  946. vllm/model_executor/parameter.py +459 -0
  947. vllm/model_executor/pooling_metadata.py +72 -0
  948. vllm/model_executor/sampling_metadata.py +597 -0
  949. vllm/model_executor/utils.py +77 -0
  950. vllm/multimodal/__init__.py +33 -0
  951. vllm/multimodal/audio.py +106 -0
  952. vllm/multimodal/base.py +219 -0
  953. vllm/multimodal/hasher.py +118 -0
  954. vllm/multimodal/image.py +97 -0
  955. vllm/multimodal/inputs.py +876 -0
  956. vllm/multimodal/parse.py +461 -0
  957. vllm/multimodal/processing.py +1895 -0
  958. vllm/multimodal/profiling.py +258 -0
  959. vllm/multimodal/registry.py +331 -0
  960. vllm/multimodal/utils.py +436 -0
  961. vllm/multimodal/video.py +198 -0
  962. vllm/outputs.py +512 -0
  963. vllm/platforms/__init__.py +291 -0
  964. vllm/platforms/cpu.py +266 -0
  965. vllm/platforms/cuda.py +526 -0
  966. vllm/platforms/hpu.py +106 -0
  967. vllm/platforms/interface.py +538 -0
  968. vllm/platforms/neuron.py +150 -0
  969. vllm/platforms/rocm.py +435 -0
  970. vllm/platforms/tpu.py +216 -0
  971. vllm/platforms/xpu.py +156 -0
  972. vllm/plugins/__init__.py +94 -0
  973. vllm/plugins/lora_resolvers/README.md +15 -0
  974. vllm/plugins/lora_resolvers/__init__.py +0 -0
  975. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  976. vllm/pooling_params.py +54 -0
  977. vllm/profiler/__init__.py +0 -0
  978. vllm/profiler/layerwise_profile.py +375 -0
  979. vllm/profiler/utils.py +148 -0
  980. vllm/prompt_adapter/__init__.py +0 -0
  981. vllm/prompt_adapter/layers.py +83 -0
  982. vllm/prompt_adapter/models.py +358 -0
  983. vllm/prompt_adapter/request.py +37 -0
  984. vllm/prompt_adapter/utils.py +98 -0
  985. vllm/prompt_adapter/worker_manager.py +179 -0
  986. vllm/py.typed +2 -0
  987. vllm/reasoning/__init__.py +15 -0
  988. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  989. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  990. vllm/reasoning/granite_reasoning_parser.py +363 -0
  991. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  992. vllm/sampling_params.py +602 -0
  993. vllm/scalar_type.py +347 -0
  994. vllm/scripts.py +15 -0
  995. vllm/sequence.py +1568 -0
  996. vllm/spec_decode/__init__.py +0 -0
  997. vllm/spec_decode/batch_expansion.py +506 -0
  998. vllm/spec_decode/draft_model_runner.py +349 -0
  999. vllm/spec_decode/interfaces.py +99 -0
  1000. vllm/spec_decode/medusa_worker.py +138 -0
  1001. vllm/spec_decode/metrics.py +213 -0
  1002. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1003. vllm/spec_decode/mqa_scorer.py +160 -0
  1004. vllm/spec_decode/multi_step_worker.py +423 -0
  1005. vllm/spec_decode/ngram_worker.py +196 -0
  1006. vllm/spec_decode/proposer_worker_base.py +59 -0
  1007. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1008. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1009. vllm/spec_decode/target_model_runner.py +45 -0
  1010. vllm/spec_decode/top1_proposer.py +275 -0
  1011. vllm/spec_decode/util.py +277 -0
  1012. vllm/test_utils.py +130 -0
  1013. vllm/third_party/__init__.py +0 -0
  1014. vllm/third_party/pynvml.py +6140 -0
  1015. vllm/tracing.py +131 -0
  1016. vllm/transformers_utils/__init__.py +24 -0
  1017. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1018. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1019. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1020. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1021. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1022. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1023. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1024. vllm/transformers_utils/config.py +887 -0
  1025. vllm/transformers_utils/configs/__init__.py +61 -0
  1026. vllm/transformers_utils/configs/arctic.py +207 -0
  1027. vllm/transformers_utils/configs/chatglm.py +72 -0
  1028. vllm/transformers_utils/configs/cohere2.py +195 -0
  1029. vllm/transformers_utils/configs/dbrx.py +280 -0
  1030. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1031. vllm/transformers_utils/configs/eagle.py +85 -0
  1032. vllm/transformers_utils/configs/exaone.py +190 -0
  1033. vllm/transformers_utils/configs/falcon.py +90 -0
  1034. vllm/transformers_utils/configs/h2ovl.py +16 -0
  1035. vllm/transformers_utils/configs/internvl.py +54 -0
  1036. vllm/transformers_utils/configs/jais.py +238 -0
  1037. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1038. vllm/transformers_utils/configs/medusa.py +63 -0
  1039. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1040. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1041. vllm/transformers_utils/configs/mllama.py +31 -0
  1042. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1043. vllm/transformers_utils/configs/moonvit.py +33 -0
  1044. vllm/transformers_utils/configs/mpt.py +180 -0
  1045. vllm/transformers_utils/configs/nemotron.py +205 -0
  1046. vllm/transformers_utils/configs/nemotron_h.py +258 -0
  1047. vllm/transformers_utils/configs/nvlm_d.py +15 -0
  1048. vllm/transformers_utils/configs/ovis.py +184 -0
  1049. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1050. vllm/transformers_utils/configs/solar.py +247 -0
  1051. vllm/transformers_utils/configs/telechat2.py +64 -0
  1052. vllm/transformers_utils/configs/ultravox.py +108 -0
  1053. vllm/transformers_utils/detokenizer.py +168 -0
  1054. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1055. vllm/transformers_utils/processor.py +221 -0
  1056. vllm/transformers_utils/processors/__init__.py +8 -0
  1057. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1058. vllm/transformers_utils/processors/ovis.py +420 -0
  1059. vllm/transformers_utils/s3_utils.py +162 -0
  1060. vllm/transformers_utils/tokenizer.py +302 -0
  1061. vllm/transformers_utils/tokenizer_base.py +149 -0
  1062. vllm/transformers_utils/tokenizer_group.py +120 -0
  1063. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1064. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1065. vllm/transformers_utils/utils.py +99 -0
  1066. vllm/triton_utils/__init__.py +14 -0
  1067. vllm/triton_utils/importing.py +50 -0
  1068. vllm/usage/__init__.py +0 -0
  1069. vllm/usage/usage_lib.py +256 -0
  1070. vllm/utils.py +2910 -0
  1071. vllm/v1/__init__.py +0 -0
  1072. vllm/v1/attention/__init__.py +0 -0
  1073. vllm/v1/attention/backends/__init__.py +0 -0
  1074. vllm/v1/attention/backends/cpu_attn.py +163 -0
  1075. vllm/v1/attention/backends/flash_attn.py +869 -0
  1076. vllm/v1/attention/backends/flashinfer.py +651 -0
  1077. vllm/v1/attention/backends/flex_attention.py +477 -0
  1078. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1079. vllm/v1/attention/backends/mla/common.py +931 -0
  1080. vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
  1081. vllm/v1/attention/backends/mla/flashmla.py +152 -0
  1082. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
  1083. vllm/v1/attention/backends/mla/triton_mla.py +120 -0
  1084. vllm/v1/attention/backends/pallas.py +240 -0
  1085. vllm/v1/attention/backends/triton_attn.py +285 -0
  1086. vllm/v1/attention/backends/utils.py +52 -0
  1087. vllm/v1/core/__init__.py +0 -0
  1088. vllm/v1/core/block_pool.py +349 -0
  1089. vllm/v1/core/encoder_cache_manager.py +150 -0
  1090. vllm/v1/core/kv_cache_coordinator.py +363 -0
  1091. vllm/v1/core/kv_cache_manager.py +392 -0
  1092. vllm/v1/core/kv_cache_utils.py +996 -0
  1093. vllm/v1/core/sched/__init__.py +0 -0
  1094. vllm/v1/core/sched/interface.py +150 -0
  1095. vllm/v1/core/sched/output.py +154 -0
  1096. vllm/v1/core/sched/scheduler.py +1044 -0
  1097. vllm/v1/core/sched/utils.py +23 -0
  1098. vllm/v1/core/single_type_kv_cache_manager.py +403 -0
  1099. vllm/v1/engine/__init__.py +173 -0
  1100. vllm/v1/engine/async_llm.py +558 -0
  1101. vllm/v1/engine/coordinator.py +253 -0
  1102. vllm/v1/engine/core.py +961 -0
  1103. vllm/v1/engine/core_client.py +1129 -0
  1104. vllm/v1/engine/detokenizer.py +261 -0
  1105. vllm/v1/engine/exceptions.py +17 -0
  1106. vllm/v1/engine/llm_engine.py +317 -0
  1107. vllm/v1/engine/logprobs.py +199 -0
  1108. vllm/v1/engine/mm_input_cache.py +91 -0
  1109. vllm/v1/engine/output_processor.py +428 -0
  1110. vllm/v1/engine/parallel_sampling.py +133 -0
  1111. vllm/v1/engine/processor.py +407 -0
  1112. vllm/v1/executor/__init__.py +0 -0
  1113. vllm/v1/executor/abstract.py +113 -0
  1114. vllm/v1/executor/multiproc_executor.py +537 -0
  1115. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1116. vllm/v1/kv_cache_interface.py +194 -0
  1117. vllm/v1/metrics/__init__.py +0 -0
  1118. vllm/v1/metrics/loggers.py +523 -0
  1119. vllm/v1/metrics/prometheus.py +82 -0
  1120. vllm/v1/metrics/ray_wrappers.py +131 -0
  1121. vllm/v1/metrics/reader.py +246 -0
  1122. vllm/v1/metrics/stats.py +239 -0
  1123. vllm/v1/outputs.py +116 -0
  1124. vllm/v1/request.py +193 -0
  1125. vllm/v1/sample/__init__.py +0 -0
  1126. vllm/v1/sample/metadata.py +44 -0
  1127. vllm/v1/sample/ops/__init__.py +0 -0
  1128. vllm/v1/sample/ops/bad_words.py +39 -0
  1129. vllm/v1/sample/ops/penalties.py +59 -0
  1130. vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
  1131. vllm/v1/sample/rejection_sampler.py +631 -0
  1132. vllm/v1/sample/sampler.py +286 -0
  1133. vllm/v1/sample/tpu/__init__.py +0 -0
  1134. vllm/v1/sample/tpu/metadata.py +124 -0
  1135. vllm/v1/sample/tpu/sampler.py +145 -0
  1136. vllm/v1/serial_utils.py +315 -0
  1137. vllm/v1/spec_decode/__init__.py +0 -0
  1138. vllm/v1/spec_decode/eagle.py +432 -0
  1139. vllm/v1/spec_decode/medusa.py +62 -0
  1140. vllm/v1/spec_decode/metadata.py +62 -0
  1141. vllm/v1/spec_decode/metrics.py +178 -0
  1142. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1143. vllm/v1/spec_decode/utils.py +46 -0
  1144. vllm/v1/structured_output/__init__.py +222 -0
  1145. vllm/v1/structured_output/backend_guidance.py +245 -0
  1146. vllm/v1/structured_output/backend_types.py +134 -0
  1147. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1148. vllm/v1/structured_output/request.py +86 -0
  1149. vllm/v1/structured_output/utils.py +175 -0
  1150. vllm/v1/utils.py +743 -0
  1151. vllm/v1/worker/__init__.py +0 -0
  1152. vllm/v1/worker/block_table.py +142 -0
  1153. vllm/v1/worker/cpu_model_runner.py +86 -0
  1154. vllm/v1/worker/cpu_worker.py +152 -0
  1155. vllm/v1/worker/gpu_input_batch.py +681 -0
  1156. vllm/v1/worker/gpu_model_runner.py +2320 -0
  1157. vllm/v1/worker/gpu_worker.py +393 -0
  1158. vllm/v1/worker/lora_model_runner_mixin.py +173 -0
  1159. vllm/v1/worker/tpu_model_runner.py +1673 -0
  1160. vllm/v1/worker/tpu_worker.py +299 -0
  1161. vllm/v1/worker/utils.py +111 -0
  1162. vllm/v1/worker/worker_base.py +65 -0
  1163. vllm/version.py +41 -0
  1164. vllm/vllm_flash_attn/.gitkeep +0 -0
  1165. vllm/worker/__init__.py +0 -0
  1166. vllm/worker/cache_engine.py +145 -0
  1167. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1168. vllm/worker/cpu_model_runner.py +671 -0
  1169. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1170. vllm/worker/cpu_worker.py +450 -0
  1171. vllm/worker/enc_dec_model_runner.py +555 -0
  1172. vllm/worker/hpu_model_runner.py +2320 -0
  1173. vllm/worker/hpu_worker.py +484 -0
  1174. vllm/worker/model_runner.py +2178 -0
  1175. vllm/worker/model_runner_base.py +282 -0
  1176. vllm/worker/multi_step_hpu_worker.py +123 -0
  1177. vllm/worker/multi_step_model_runner.py +911 -0
  1178. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1179. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1180. vllm/worker/multi_step_tpu_worker.py +108 -0
  1181. vllm/worker/multi_step_worker.py +197 -0
  1182. vllm/worker/neuron_model_runner.py +460 -0
  1183. vllm/worker/neuron_worker.py +193 -0
  1184. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1185. vllm/worker/pooling_model_runner.py +211 -0
  1186. vllm/worker/tpu_model_runner.py +909 -0
  1187. vllm/worker/tpu_worker.py +337 -0
  1188. vllm/worker/utils.py +53 -0
  1189. vllm/worker/worker.py +577 -0
  1190. vllm/worker/worker_base.py +646 -0
  1191. vllm/worker/xpu_model_runner.py +606 -0
  1192. vllm/worker/xpu_worker.py +186 -0
  1193. vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
  1194. vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
  1195. vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
  1196. vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
  1197. vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
vllm/config.py ADDED
@@ -0,0 +1,4746 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import ast
5
+ import copy
6
+ import enum
7
+ import hashlib
8
+ import inspect
9
+ import json
10
+ import textwrap
11
+ import uuid
12
+ import warnings
13
+ from collections import Counter
14
+ from contextlib import contextmanager
15
+ from dataclasses import (MISSING, Field, asdict, field, fields, is_dataclass,
16
+ replace)
17
+ from functools import cached_property
18
+ from importlib.util import find_spec
19
+ from pathlib import Path
20
+ from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Literal, Optional,
21
+ Protocol, TypeVar, Union, cast, get_args, get_origin)
22
+
23
+ import regex as re
24
+ import torch
25
+ from pydantic import (ConfigDict, SkipValidation, TypeAdapter, field_validator,
26
+ model_validator)
27
+ from pydantic.dataclasses import dataclass
28
+ from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE
29
+ from torch.distributed import ProcessGroup, ReduceOp
30
+ from transformers import PretrainedConfig
31
+ from typing_extensions import deprecated, runtime_checkable
32
+
33
+ import vllm.envs as envs
34
+ from vllm import version
35
+ from vllm.compilation.inductor_pass import CallableInductorPass, InductorPass
36
+ from vllm.logger import init_logger
37
+ from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS,
38
+ QuantizationMethods,
39
+ get_quantization_config)
40
+ from vllm.model_executor.models import ModelRegistry
41
+ from vllm.platforms import current_platform
42
+ from vllm.tracing import is_otel_available, otel_import_error_traceback
43
+ from vllm.transformers_utils.config import (
44
+ ConfigFormat, get_config, get_hf_image_processor_config,
45
+ get_hf_text_config, get_pooling_config,
46
+ get_sentence_transformer_tokenizer_config, is_encoder_decoder,
47
+ try_get_generation_config, try_get_safetensors_metadata,
48
+ try_get_tokenizer_config, uses_mrope)
49
+ from vllm.transformers_utils.s3_utils import S3Model
50
+ from vllm.transformers_utils.utils import is_s3, maybe_model_redirect
51
+ from vllm.utils import (DEFAULT_MAX_NUM_BATCHED_TOKENS,
52
+ MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS,
53
+ POOLING_MODEL_MAX_NUM_BATCHED_TOKENS, GiB_bytes,
54
+ LayerBlockType, common_broadcastable_dtype,
55
+ cuda_device_count_stateless, get_cpu_memory,
56
+ get_open_port, is_torch_equal_or_newer, random_uuid,
57
+ resolve_obj_by_qualname)
58
+
59
+ if TYPE_CHECKING:
60
+ from _typeshed import DataclassInstance
61
+ from ray.util.placement_group import PlacementGroup
62
+
63
+ from vllm.executor.executor_base import ExecutorBase
64
+ from vllm.model_executor.layers.quantization.base_config import (
65
+ QuantizationConfig)
66
+ from vllm.model_executor.model_loader import BaseModelLoader
67
+ from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
68
+
69
+ ConfigType = type[DataclassInstance]
70
+ else:
71
+ PlacementGroup = Any
72
+ ExecutorBase = Any
73
+ QuantizationConfig = Any
74
+ BaseModelLoader = Any
75
+ TensorizerConfig = Any
76
+ ConfigType = type
77
+
78
+ logger = init_logger(__name__)
79
+
80
+ ConfigT = TypeVar("ConfigT", bound=ConfigType)
81
+
82
+ TaskOption = Literal["auto", "generate", "embedding", "embed", "classify",
83
+ "score", "reward", "transcription"]
84
+
85
+ _ResolvedTask = Literal["generate", "embed", "classify", "score", "reward",
86
+ "draft", "transcription"]
87
+
88
+ RunnerType = Literal["generate", "pooling", "draft", "transcription"]
89
+
90
+ _RUNNER_TASKS: dict[RunnerType, list[_ResolvedTask]] = {
91
+ "generate": ["generate"],
92
+ "pooling": ["embed", "classify", "score", "reward"],
93
+ "draft": ["draft"],
94
+ "transcription": ["transcription"],
95
+ }
96
+
97
+ _TASK_RUNNER: dict[_ResolvedTask, RunnerType] = {
98
+ task: runner
99
+ for runner, tasks in _RUNNER_TASKS.items()
100
+ for task in tasks
101
+ }
102
+
103
+ HfOverrides = Union[dict[str, Any], Callable[[PretrainedConfig],
104
+ PretrainedConfig]]
105
+
106
+
107
+ @runtime_checkable
108
+ class SupportsHash(Protocol):
109
+
110
+ def compute_hash(self) -> str:
111
+ ...
112
+
113
+
114
+ class SupportsMetricsInfo(Protocol):
115
+
116
+ def metrics_info(self) -> dict[str, str]:
117
+ ...
118
+
119
+
120
+ class ModelImpl(str, enum.Enum):
121
+ AUTO = "auto"
122
+ VLLM = "vllm"
123
+ TRANSFORMERS = "transformers"
124
+
125
+
126
+ def get_attr_docs(cls: type[Any]) -> dict[str, str]:
127
+ """
128
+ Get any docstrings placed after attribute assignments in a class body.
129
+
130
+ https://davidism.com/mit-license/
131
+ """
132
+
133
+ def pairwise(iterable):
134
+ """
135
+ Manually implement https://docs.python.org/3/library/itertools.html#itertools.pairwise
136
+
137
+ Can be removed when Python 3.9 support is dropped.
138
+ """
139
+ iterator = iter(iterable)
140
+ a = next(iterator, None)
141
+
142
+ for b in iterator:
143
+ yield a, b
144
+ a = b
145
+
146
+ cls_node = ast.parse(textwrap.dedent(inspect.getsource(cls))).body[0]
147
+
148
+ if not isinstance(cls_node, ast.ClassDef):
149
+ raise TypeError("Given object was not a class.")
150
+
151
+ out = {}
152
+
153
+ # Consider each pair of nodes.
154
+ for a, b in pairwise(cls_node.body):
155
+ # Must be an assignment then a constant string.
156
+ if (not isinstance(a, (ast.Assign, ast.AnnAssign))
157
+ or not isinstance(b, ast.Expr)
158
+ or not isinstance(b.value, ast.Constant)
159
+ or not isinstance(b.value.value, str)):
160
+ continue
161
+
162
+ doc = inspect.cleandoc(b.value.value)
163
+
164
+ # An assignment can have multiple targets (a = b = v), but an
165
+ # annotated assignment only has one target.
166
+ targets = a.targets if isinstance(a, ast.Assign) else [a.target]
167
+
168
+ for target in targets:
169
+ # Must be assigning to a plain name.
170
+ if not isinstance(target, ast.Name):
171
+ continue
172
+
173
+ out[target.id] = doc
174
+
175
+ return out
176
+
177
+
178
+ def config(cls: ConfigT) -> ConfigT:
179
+ """
180
+ A decorator that ensures all fields in a dataclass have default values
181
+ and that each field has a docstring.
182
+
183
+ If a `ConfigT` is used as a CLI argument itself, the default value provided
184
+ by `get_kwargs` will be the result parsing a JSON string as the kwargs
185
+ (i.e. `ConfigT(**json.loads(cli_arg))`). However, if a particular `ConfigT`
186
+ requires custom construction from CLI (i.e. `CompilationConfig`), it can
187
+ have a `from_cli` method, which will be called instead.
188
+ """
189
+ if not is_dataclass(cls):
190
+ raise TypeError("The decorated class must be a dataclass.")
191
+ attr_docs = get_attr_docs(cls)
192
+ for f in fields(cls):
193
+ if f.init and f.default is MISSING and f.default_factory is MISSING:
194
+ raise ValueError(
195
+ f"Field '{f.name}' in {cls.__name__} must have a default value."
196
+ )
197
+
198
+ if f.name not in attr_docs:
199
+ raise ValueError(
200
+ f"Field '{f.name}' in {cls.__name__} must have a docstring.")
201
+
202
+ if get_origin(f.type) is Union:
203
+ args = get_args(f.type)
204
+ literal_args = [arg for arg in args if get_origin(arg) is Literal]
205
+ if len(literal_args) > 1:
206
+ raise ValueError(
207
+ f"Field '{f.name}' in {cls.__name__} must use a single "
208
+ "Literal type. Please use 'Literal[Literal1, Literal2]' "
209
+ "instead of 'Union[Literal1, Literal2]'.")
210
+ return cls
211
+
212
+
213
+ def get_field(cls: ConfigType, name: str) -> Field:
214
+ """Get the default factory field of a dataclass by name. Used for getting
215
+ default factory fields in `EngineArgs`."""
216
+ if not is_dataclass(cls):
217
+ raise TypeError("The given class is not a dataclass.")
218
+ cls_fields = {f.name: f for f in fields(cls)}
219
+ if name not in cls_fields:
220
+ raise ValueError(f"Field '{name}' not found in {cls.__name__}.")
221
+ named_field: Field = cls_fields[name]
222
+ if (default_factory := named_field.default_factory) is not MISSING:
223
+ return field(default_factory=default_factory)
224
+ if (default := named_field.default) is not MISSING:
225
+ return field(default=default)
226
+ raise ValueError(
227
+ f"{cls.__name__}.{name} must have a default value or default factory.")
228
+
229
+
230
+ def is_init_field(cls: ConfigType, name: str) -> bool:
231
+ return next(f for f in fields(cls) if f.name == name).init
232
+
233
+
234
+ TokenizerMode = Literal["auto", "slow", "mistral", "custom"]
235
+ ModelDType = Literal["auto", "half", "float16", "bfloat16", "float", "float32"]
236
+
237
+
238
+ @config
239
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
240
+ class ModelConfig:
241
+ """Configuration for the model."""
242
+
243
+ model: str = "facebook/opt-125m"
244
+ """Name or path of the Hugging Face model to use. It is also used as the
245
+ content for `model_name` tag in metrics output when `served_model_name` is
246
+ not specified."""
247
+ task: Literal[TaskOption, Literal["draft"]] = "auto"
248
+ """The task to use the model for. Each vLLM instance only supports one
249
+ task, even if the same model can be used for multiple tasks. When the model
250
+ only supports one task, "auto" can be used to select it; otherwise, you
251
+ must specify explicitly which task to use."""
252
+ tokenizer: SkipValidation[str] = None # type: ignore
253
+ """Name or path of the Hugging Face tokenizer to use. If unspecified, model
254
+ name or path will be used."""
255
+ tokenizer_mode: TokenizerMode = "auto"
256
+ """Tokenizer mode:\n
257
+ - "auto" will use the fast tokenizer if available.\n
258
+ - "slow" will always use the slow tokenizer.\n
259
+ - "mistral" will always use the tokenizer from `mistral_common`.\n
260
+ - "custom" will use --tokenizer to select the preregistered tokenizer."""
261
+ trust_remote_code: bool = False
262
+ """Trust remote code (e.g., from HuggingFace) when downloading the model
263
+ and tokenizer."""
264
+ dtype: Union[ModelDType, torch.dtype] = "auto"
265
+ """Data type for model weights and activations:\n
266
+ - "auto" will use FP16 precision for FP32 and FP16 models, and BF16
267
+ precision for BF16 models.\n
268
+ - "half" for FP16. Recommended for AWQ quantization.\n
269
+ - "float16" is the same as "half".\n
270
+ - "bfloat16" for a balance between precision and range.\n
271
+ - "float" is shorthand for FP32 precision.\n
272
+ - "float32" for FP32 precision."""
273
+ seed: Optional[int] = None
274
+ """Random seed for reproducibility. Initialized to None in V0, but
275
+ initialized to 0 in V1."""
276
+ hf_config_path: Optional[str] = None
277
+ """Name or path of the Hugging Face config to use. If unspecified, model
278
+ name or path will be used."""
279
+ allowed_local_media_path: str = ""
280
+ """Allowing API requests to read local images or videos from directories
281
+ specified by the server file system. This is a security risk. Should only
282
+ be enabled in trusted environments."""
283
+ revision: Optional[str] = None
284
+ """The specific model version to use. It can be a branch name, a tag name,
285
+ or a commit id. If unspecified, will use the default version."""
286
+ code_revision: Optional[str] = None
287
+ """The specific revision to use for the model code on the Hugging Face Hub.
288
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
289
+ use the default version."""
290
+ rope_scaling: dict[str, Any] = field(default_factory=dict)
291
+ """RoPE scaling configuration. For example,
292
+ `{"rope_type":"dynamic","factor":2.0}`."""
293
+ rope_theta: Optional[float] = None
294
+ """RoPE theta. Use with `rope_scaling`. In some cases, changing the RoPE
295
+ theta improves the performance of the scaled model."""
296
+ tokenizer_revision: Optional[str] = None
297
+ """The specific revision to use for the tokenizer on the Hugging Face Hub.
298
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
299
+ use the default version."""
300
+ max_model_len: SkipValidation[int] = None # type: ignore
301
+ """Model context length (prompt and output). If unspecified, will be
302
+ automatically derived from the model config.
303
+
304
+ When passing via `--max-model-len`, supports k/m/g/K/M/G in human-readable
305
+ format. Examples:\n
306
+ - 1k -> 1000\n
307
+ - 1K -> 1024\n
308
+ - 25.6k -> 25,600"""
309
+ spec_target_max_model_len: Optional[int] = None
310
+ """Specify the maximum length for spec decoding draft models."""
311
+ quantization: SkipValidation[Optional[QuantizationMethods]] = None
312
+ """Method used to quantize the weights. If `None`, we first check the
313
+ `quantization_config` attribute in the model config file. If that is
314
+ `None`, we assume the model weights are not quantized and use `dtype` to
315
+ determine the data type of the weights."""
316
+ enforce_eager: bool = False
317
+ """Whether to always use eager-mode PyTorch. If True, we will disable CUDA
318
+ graph and always execute the model in eager mode. If False, we will use
319
+ CUDA graph and eager execution in hybrid for maximal performance and
320
+ flexibility."""
321
+ max_seq_len_to_capture: int = 8192
322
+ """Maximum sequence len covered by CUDA graphs. When a sequence has context
323
+ length larger than this, we fall back to eager mode. Additionally for
324
+ encoder-decoder models, if the sequence length of the encoder input is
325
+ larger than this, we fall back to the eager mode."""
326
+ max_logprobs: int = 20
327
+ """Maximum number of log probabilities to return when `logprobs` is
328
+ specified in `SamplingParams`. The default value comes the default for the
329
+ OpenAI Chat Completions API."""
330
+ disable_sliding_window: bool = False
331
+ """Whether to disable sliding window. If True, we will disable the sliding
332
+ window functionality of the model, capping to sliding window size. If the
333
+ model does not support sliding window, this argument is ignored."""
334
+ disable_cascade_attn: bool = False
335
+ """Disable cascade attention for V1. While cascade attention does not
336
+ change the mathematical correctness, disabling it could be useful for
337
+ preventing potential numerical issues. Note that even if this is set to
338
+ False, cascade attention will be only used when the heuristic tells that
339
+ it's beneficial."""
340
+ skip_tokenizer_init: bool = False
341
+ """Skip initialization of tokenizer and detokenizer. Expects valid
342
+ `prompt_token_ids` and `None` for prompt from the input. The generated
343
+ output will contain token ids."""
344
+ enable_prompt_embeds: bool = False
345
+ """If `True`, enables passing text embeddings as inputs via the
346
+ `prompt_embeds` key. Note that enabling this will double the time required
347
+ for graph compilation."""
348
+ served_model_name: Optional[Union[str, list[str]]] = None
349
+ """The model name(s) used in the API. If multiple names are provided, the
350
+ server will respond to any of the provided names. The model name in the
351
+ model field of a response will be the first name in this list. If not
352
+ specified, the model name will be the same as the `--model` argument. Noted
353
+ that this name(s) will also be used in `model_name` tag content of
354
+ prometheus metrics, if multiple names provided, metrics tag will take the
355
+ first one."""
356
+ limit_mm_per_prompt: dict[str, int] = field(default_factory=dict)
357
+ """Maximum number of data items per modality per prompt. Only applicable
358
+ for multimodal models."""
359
+ use_async_output_proc: bool = True
360
+ """Whether to use async output processor."""
361
+ config_format: Union[str, ConfigFormat] = ConfigFormat.AUTO.value
362
+ """The format of the model config to load:\n
363
+ - "auto" will try to load the config in hf format if available else it
364
+ will try to load in mistral format.\n
365
+ - "hf" will load the config in hf format.\n
366
+ - "mistral" will load the config in mistral format."""
367
+ hf_token: Optional[Union[bool, str]] = None
368
+ """The token to use as HTTP bearer authorization for remote files . If
369
+ `True`, will use the token generated when running `huggingface-cli login`
370
+ (stored in `~/.huggingface`)."""
371
+ hf_overrides: HfOverrides = field(default_factory=dict)
372
+ """If a dictionary, contains arguments to be forwarded to the Hugging Face
373
+ config. If a callable, it is called to update the HuggingFace config."""
374
+ mm_processor_kwargs: Optional[dict[str, Any]] = None
375
+ """Arguments to be forwarded to the model's processor for multi-modal data,
376
+ e.g., image processor. Overrides for the multi-modal processor obtained
377
+ from `AutoProcessor.from_pretrained`. The available overrides depend on the
378
+ model that is being run. For example, for Phi-3-Vision: `{"num_crops": 4}`.
379
+ """
380
+ disable_mm_preprocessor_cache: bool = False
381
+ """If `True`, disable caching of the multi-modal preprocessor/mapper (not
382
+ recommended)."""
383
+ override_neuron_config: dict[str, Any] = field(default_factory=dict)
384
+ """Initialize non-default neuron config or override default neuron config
385
+ that are specific to Neuron devices, this argument will be used to
386
+ configure the neuron config that can not be gathered from the vllm
387
+ arguments. e.g. `{"cast_logits_dtype": "bfloat16"}`."""
388
+ pooler_config: Optional["PoolerConfig"] = field(init=False)
389
+ """Pooler config which controls the behaviour of output pooling in pooling
390
+ models."""
391
+ override_pooler_config: Optional[Union[dict, "PoolerConfig"]] = None
392
+ """Initialize non-default pooling config or override default pooling config
393
+ for the pooling model. e.g. `{"pooling_type": "mean", "normalize": false}`.
394
+ """
395
+ logits_processor_pattern: Optional[str] = None
396
+ """Optional regex pattern specifying valid logits processor qualified names
397
+ that can be passed with the `logits_processors` extra completion argument.
398
+ Defaults to `None`, which allows no processors."""
399
+ generation_config: str = "auto"
400
+ """The folder path to the generation config. Defaults to `"auto"`, the
401
+ generation config will be loaded from model path. If set to `"vllm"`, no
402
+ generation config is loaded, vLLM defaults will be used. If set to a folder
403
+ path, the generation config will be loaded from the specified folder path.
404
+ If `max_new_tokens` is specified in generation config, then it sets a
405
+ server-wide limit on the number of output tokens for all requests."""
406
+ override_generation_config: dict[str, Any] = field(default_factory=dict)
407
+ """Overrides or sets generation config. e.g. `{"temperature": 0.5}`. If
408
+ used with `--generation-config auto`, the override parameters will be
409
+ merged with the default config from the model. If used with
410
+ `--generation-config vllm`, only the override parameters are used."""
411
+ enable_sleep_mode: bool = False
412
+ """Enable sleep mode for the engine (only cuda platform is supported)."""
413
+ model_impl: Union[str, ModelImpl] = ModelImpl.AUTO.value
414
+ """Which implementation of the model to use:\n
415
+ - "auto" will try to use the vLLM implementation, if it exists, and fall
416
+ back to the Transformers implementation if no vLLM implementation is
417
+ available.\n
418
+ - "vllm" will use the vLLM model implementation.\n
419
+ - "transformers" will use the Transformers model implementation."""
420
+
421
+ def compute_hash(self) -> str:
422
+ """
423
+ WARNING: Whenever a new field is added to this config,
424
+ ensure that it is included in the factors list if
425
+ it affects the computation graph.
426
+
427
+ Provide a hash that uniquely identifies all the configs
428
+ that affect the structure of the computation
429
+ graph from input ids/embeddings to the final hidden states,
430
+ excluding anything before input ids/embeddings and after
431
+ the final hidden states.
432
+ """
433
+ factors: list[Any] = []
434
+ factors.append(self.model)
435
+ factors.append(self.dtype)
436
+ factors.append(self.quantization)
437
+ factors.append(self.revision)
438
+ factors.append(self.code_revision)
439
+ factors.append(self.max_model_len)
440
+ factors.append(self.max_logprobs)
441
+ factors.append(self.disable_sliding_window)
442
+ factors.append(self.trust_remote_code)
443
+ factors.append(self.generation_config)
444
+ factors.append(self.model_impl)
445
+ factors.append(self.override_generation_config)
446
+ factors.append(self.rope_scaling)
447
+ factors.append(self.rope_theta)
448
+ # hf_config can control how the model looks!
449
+ factors.append(self.hf_config.to_json_string())
450
+ str_factors = str(factors)
451
+ assert_hashable(str_factors)
452
+ return hashlib.sha256(str(factors).encode()).hexdigest()
453
+
454
+ def __post_init__(self) -> None:
455
+ # Set the default seed to 0 in V1.
456
+ # NOTE(woosuk): In V0, we set the default seed to None because the
457
+ # driver worker shares the same process as the user process, and thus
458
+ # setting a seed affects the user process as well.
459
+ # In V1, we use separate processes for workers (unless
460
+ # VLLM_ENABLE_V1_MULTIPROCESSING=0), so setting a seed here
461
+ # doesn't affect the user process. However, without a consistent seed,
462
+ # different tensor parallel workers would sample different tokens,
463
+ # leading to inconsistent results.
464
+ if envs.VLLM_USE_V1 and self.seed is None:
465
+ self.seed = 0
466
+ if not envs.VLLM_ENABLE_V1_MULTIPROCESSING:
467
+ logger.warning(
468
+ "The global random seed is set to %d. Since "
469
+ "VLLM_ENABLE_V1_MULTIPROCESSING is set to False, this may "
470
+ "affect the random state of the Python process that "
471
+ "launched vLLM.", self.seed)
472
+
473
+ self.model = maybe_model_redirect(self.model)
474
+ # The tokenizer is consistent with the model by default.
475
+ if self.tokenizer is None:
476
+ self.tokenizer = self.model
477
+ if self.tokenizer_revision is None:
478
+ self.tokenizer_revision = self.revision
479
+ self.tokenizer = maybe_model_redirect(self.tokenizer)
480
+
481
+ if isinstance(self.hf_config_path, str):
482
+ self.hf_config_path = maybe_model_redirect(self.hf_config_path)
483
+
484
+ if callable(self.hf_overrides):
485
+ hf_overrides_kw = {}
486
+ hf_overrides_fn = self.hf_overrides
487
+ else:
488
+ hf_overrides_kw = self.hf_overrides
489
+ hf_overrides_fn = None
490
+
491
+ if self.rope_scaling:
492
+ hf_override: dict[str, Any] = {"rope_scaling": self.rope_scaling}
493
+ hf_overrides_kw.update(hf_override)
494
+ hf_overrides_str = json.dumps(hf_overrides_kw)
495
+ msg = (
496
+ "`--rope-scaling` will be removed in a future release. "
497
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
498
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
499
+ if self.rope_theta is not None:
500
+ hf_override = {"rope_theta": self.rope_theta}
501
+ hf_overrides_kw.update(hf_override)
502
+ hf_overrides_str = json.dumps(hf_overrides_kw)
503
+ msg = (
504
+ "`--rope-theta` will be removed in a future release. "
505
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
506
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
507
+
508
+ self.maybe_pull_model_tokenizer_for_s3(self.model, self.tokenizer)
509
+
510
+ if (backend := envs.VLLM_ATTENTION_BACKEND
511
+ ) and backend == "FLASHINFER" and find_spec("flashinfer") is None:
512
+ raise ValueError(
513
+ "VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
514
+ "module was not found. See "
515
+ "https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile " # noqa: E501
516
+ "for instructions on how to install it.")
517
+
518
+ from vllm.platforms import current_platform
519
+
520
+ if (self.enable_sleep_mode
521
+ and not current_platform.is_sleep_mode_available()):
522
+ raise ValueError(
523
+ "Sleep mode is not supported on current platform.")
524
+
525
+ if isinstance(self.config_format, str):
526
+ self.config_format = ConfigFormat(self.config_format)
527
+
528
+ hf_config = get_config(self.hf_config_path or self.model,
529
+ self.trust_remote_code, self.revision,
530
+ self.code_revision, self.config_format)
531
+
532
+ if hf_overrides_kw:
533
+ logger.info("Overriding HF config with %s", hf_overrides_kw)
534
+ hf_config.update(hf_overrides_kw)
535
+ if hf_overrides_fn:
536
+ logger.info("Overriding HF config with %s", hf_overrides_fn)
537
+ hf_config = hf_overrides_fn(hf_config)
538
+
539
+ self.hf_config = hf_config
540
+
541
+ self.hf_text_config = get_hf_text_config(self.hf_config)
542
+ self.attention_chunk_size = getattr(self.hf_text_config,
543
+ "attention_chunk_size", None)
544
+ self.encoder_config = self._get_encoder_config()
545
+ self.hf_image_processor_config = get_hf_image_processor_config(
546
+ self.model, hf_token=self.hf_token, revision=self.revision)
547
+
548
+ supported_tasks, task = self._resolve_task(self.task)
549
+ self.supported_tasks = supported_tasks
550
+ self.task = task
551
+ if self.task in ("draft", "generate"):
552
+ self.truncation_side = "left"
553
+ else:
554
+ self.truncation_side = "right"
555
+
556
+ self.pooler_config = self._init_pooler_config()
557
+
558
+ self.dtype = _get_and_verify_dtype(
559
+ self.model,
560
+ self.hf_config,
561
+ self.dtype,
562
+ is_pooling_model=self.runner_type == "pooling",
563
+ revision=self.revision,
564
+ )
565
+
566
+ # Workaround for Gemma 2 which uses interleaved sliding window
567
+ # attention, but it's not specified in its config. TODO: remove this
568
+ # when Gemma 2 is fixed in Transformers.
569
+ if self.hf_text_config.model_type == "gemma2":
570
+ self.hf_text_config.sliding_window_pattern = 2
571
+
572
+ sliding_window = getattr(self.hf_text_config, "sliding_window", None)
573
+ sliding_window_pattern = getattr(self.hf_text_config,
574
+ "sliding_window_pattern", None)
575
+ has_interleaved_attention = sliding_window_pattern is not None or (
576
+ isinstance(sliding_window, list))
577
+
578
+ if not self.disable_sliding_window and has_interleaved_attention:
579
+ if (backend :=
580
+ envs.VLLM_ATTENTION_BACKEND) in ("XFORMERS", "FLASHINFER"):
581
+ sliding_window_len_min = get_min_sliding_window(
582
+ self.hf_text_config.sliding_window)
583
+
584
+ logger.warning_once(
585
+ "%s has interleaved attention, which is currently not supported by the %s backend. Disabling sliding window and capping the max length to the sliding window size (%d).", # noqa: E501
586
+ self.hf_text_config.model_type,
587
+ backend,
588
+ sliding_window_len_min,
589
+ )
590
+ self.disable_sliding_window = True
591
+ else:
592
+ # for a model with interleaved attention,
593
+ # the scheduler and the model treat it as full attention
594
+ # (i.e., not dropping any tokens outside the window).
595
+ # only the attention layer itself is aware of the sliding
596
+ # window, and use the window size to compute the attention.
597
+ self.hf_text_config.interleaved_sliding_window = sliding_window
598
+
599
+ if hasattr(self.hf_text_config, "sliding_window"):
600
+ delattr(self.hf_text_config, "sliding_window")
601
+
602
+ sliding_window = None
603
+
604
+ self.original_max_model_len = self.max_model_len
605
+ self.max_model_len = self.get_and_verify_max_len(self.max_model_len)
606
+ self.served_model_name = get_served_model_name(self.model,
607
+ self.served_model_name)
608
+ self.multimodal_config = self._init_multimodal_config()
609
+ if not self.skip_tokenizer_init:
610
+ self._verify_tokenizer_mode()
611
+
612
+ self.is_attention_free = self._init_attention_free()
613
+ self.is_hybrid = self._init_is_hybrid()
614
+ self.has_noops = self._init_has_noops()
615
+ self.has_inner_state = self._init_has_inner_state()
616
+
617
+ if (not current_platform.is_neuron() and self.override_neuron_config):
618
+ raise ValueError(
619
+ "`override_neuron_config` is only supported on Neuron.")
620
+
621
+ self._verify_quantization()
622
+ self._verify_cuda_graph()
623
+ self._verify_bnb_config()
624
+
625
+ @field_validator("quantization", mode="before")
626
+ @classmethod
627
+ def validate_quantization_before(cls, value: Any) -> Any:
628
+ if isinstance(value, str):
629
+ return value.lower()
630
+ return value
631
+
632
+ @model_validator(mode="after")
633
+ def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
634
+ if not isinstance(self.tokenizer, str):
635
+ raise ValueError("tokenizer must be a string after __post_init__.")
636
+ if not isinstance(self.max_model_len, int):
637
+ raise ValueError(
638
+ "max_model_len must be an integer after __post_init__.")
639
+ return self
640
+
641
+ @property
642
+ def registry(self):
643
+ return ModelRegistry
644
+
645
+ @property
646
+ def architectures(self) -> list[str]:
647
+ return getattr(self.hf_config, "architectures", [])
648
+
649
+ def maybe_pull_model_tokenizer_for_s3(self, model: str,
650
+ tokenizer: str) -> None:
651
+ """Pull model/tokenizer from S3 to temporary directory when needed.
652
+
653
+ Args:
654
+ model: Model name or path
655
+ tokenizer: Tokenizer name or path
656
+ """
657
+ if not (is_s3(model) or is_s3(tokenizer)):
658
+ return
659
+
660
+ if is_s3(model):
661
+ s3_model = S3Model()
662
+ s3_model.pull_files(model,
663
+ allow_pattern=["*.model", "*.py", "*.json"])
664
+ self.model_weights = model
665
+ self.model = s3_model.dir
666
+
667
+ # If tokenizer is same as model, download to same directory
668
+ if model == tokenizer:
669
+ s3_model.pull_files(
670
+ model, ignore_pattern=["*.pt", "*.safetensors", "*.bin"])
671
+ self.tokenizer = s3_model.dir
672
+ return
673
+
674
+ # Only download tokenizer if needed and not already handled
675
+ if is_s3(tokenizer):
676
+ s3_tokenizer = S3Model()
677
+ s3_tokenizer.pull_files(
678
+ model, ignore_pattern=["*.pt", "*.safetensors", "*.bin"])
679
+ self.tokenizer = s3_tokenizer.dir
680
+
681
+ def _init_multimodal_config(self) -> Optional["MultiModalConfig"]:
682
+ if self.registry.is_multimodal_model(self.architectures):
683
+ return MultiModalConfig(
684
+ limit_per_prompt=self.limit_mm_per_prompt,
685
+ mm_processor_kwargs=self.mm_processor_kwargs,
686
+ disable_mm_preprocessor_cache=self.
687
+ disable_mm_preprocessor_cache)
688
+
689
+ if self.limit_mm_per_prompt:
690
+ raise ValueError("`limit_mm_per_prompt` is only supported for "
691
+ "multimodal models.")
692
+ if self.mm_processor_kwargs:
693
+ raise ValueError("`mm_processor_kwargs` is only supported for "
694
+ "multimodal models.")
695
+ if self.disable_mm_preprocessor_cache:
696
+ raise ValueError("`disable_mm_preprocessor_cache` is only "
697
+ "supported for multimodal models.")
698
+
699
+ return None
700
+
701
+ def _get_encoder_config(self):
702
+ return get_sentence_transformer_tokenizer_config(
703
+ self.model, self.revision)
704
+
705
+ def _init_pooler_config(self) -> Optional["PoolerConfig"]:
706
+ if self.runner_type == "pooling":
707
+ if isinstance(self.override_pooler_config, dict):
708
+ self.override_pooler_config = PoolerConfig(
709
+ **self.override_pooler_config)
710
+
711
+ pooler_config = self.override_pooler_config or PoolerConfig()
712
+
713
+ base_config = get_pooling_config(self.model, self.revision)
714
+ if base_config is not None:
715
+ # Only set values that are not overridden by the user
716
+ for k, v in base_config.items():
717
+ if getattr(pooler_config, k) is None:
718
+ setattr(pooler_config, k, v)
719
+
720
+ if self.is_matryoshka:
721
+ if pooler_config.normalize is None:
722
+ pooler_config.normalize = True
723
+ elif not pooler_config.normalize:
724
+ raise ValueError(
725
+ "`normalize` must be enabled (set to True) "
726
+ "for models that are compatible with "
727
+ "Matryoshka Representation.")
728
+
729
+ return pooler_config
730
+
731
+ return None
732
+
733
+ def _init_attention_free(self) -> bool:
734
+ return self.registry.is_attention_free_model(self.architectures)
735
+
736
+ def _init_is_hybrid(self) -> bool:
737
+ return self.registry.is_hybrid_model(self.architectures)
738
+
739
+ def _init_has_noops(self) -> bool:
740
+ architectures = getattr(self.hf_config, "architectures", [])
741
+ return self.registry.is_noops_model(architectures)
742
+
743
+ def _init_has_inner_state(self) -> bool:
744
+ return self.registry.model_has_inner_state(self.architectures)
745
+
746
+ def _verify_tokenizer_mode(self) -> None:
747
+ tokenizer_mode = cast(TokenizerMode, self.tokenizer_mode.lower())
748
+ if tokenizer_mode not in get_args(TokenizerMode):
749
+ raise ValueError(
750
+ f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
751
+ f"one of {get_args(TokenizerMode)}.")
752
+ self.tokenizer_mode = tokenizer_mode
753
+
754
+ def _get_preferred_task(
755
+ self,
756
+ architectures: list[str],
757
+ supported_tasks: set[_ResolvedTask],
758
+ ) -> Optional[_ResolvedTask]:
759
+ model_id = self.model
760
+ if get_pooling_config(model_id, self.revision):
761
+ return "embed"
762
+ if self.registry.is_cross_encoder_model(architectures):
763
+ return "score"
764
+ if self.registry.is_transcription_model(architectures):
765
+ return "transcription"
766
+
767
+ suffix_to_preferred_task: list[tuple[str, _ResolvedTask]] = [
768
+ # Other models follow this pattern
769
+ ("ForCausalLM", "generate"),
770
+ ("ForConditionalGeneration", "generate"),
771
+ ("ForSequenceClassification", "classify"),
772
+ ("ChatModel", "generate"),
773
+ ("LMHeadModel", "generate"),
774
+ ("EmbeddingModel", "embed"),
775
+ ("RewardModel", "reward"),
776
+ ]
777
+ _, arch = self.registry.inspect_model_cls(architectures)
778
+
779
+ for suffix, pref_task in suffix_to_preferred_task:
780
+ if arch.endswith(suffix) and pref_task in supported_tasks:
781
+ return pref_task
782
+
783
+ return None
784
+
785
+ def _resolve_task(
786
+ self,
787
+ task_option: Literal[TaskOption, Literal["draft"]],
788
+ ) -> tuple[set[_ResolvedTask], _ResolvedTask]:
789
+ if task_option == "draft":
790
+ return {"draft"}, "draft"
791
+
792
+ registry = self.registry
793
+ architectures = self.architectures
794
+
795
+ runner_support: dict[RunnerType, bool] = {
796
+ # NOTE: Listed from highest to lowest priority,
797
+ # in case the model supports multiple of them
798
+ "transcription": registry.is_transcription_model(architectures),
799
+ "generate": registry.is_text_generation_model(architectures),
800
+ "pooling": registry.is_pooling_model(architectures),
801
+ }
802
+ supported_runner_types_lst: list[RunnerType] = [
803
+ runner_type
804
+ for runner_type, is_supported in runner_support.items()
805
+ if is_supported
806
+ ]
807
+
808
+ supported_tasks_lst: list[_ResolvedTask] = [
809
+ task for runner_type in supported_runner_types_lst
810
+ for task in _RUNNER_TASKS[runner_type]
811
+ ]
812
+ supported_tasks = set(supported_tasks_lst)
813
+
814
+ if task_option == "auto":
815
+ selected_task = next(iter(supported_tasks_lst))
816
+
817
+ if len(supported_tasks_lst) > 1:
818
+ preferred_task = self._get_preferred_task(
819
+ architectures, supported_tasks)
820
+ if preferred_task is not None:
821
+ selected_task = preferred_task
822
+
823
+ logger.info(
824
+ "This model supports multiple tasks: %s. "
825
+ "Defaulting to '%s'.", supported_tasks, selected_task)
826
+ else:
827
+ # Aliases
828
+ if task_option == "embedding":
829
+ msg = ("The 'embedding' task has been renamed to "
830
+ "'embed', please use the new name. The old name "
831
+ "will be removed in v1.0.")
832
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
833
+
834
+ task_option = "embed"
835
+
836
+ if task_option not in supported_tasks:
837
+ msg = (
838
+ f"This model does not support the '{task_option}' task. "
839
+ f"Supported tasks: {supported_tasks}")
840
+ raise ValueError(msg)
841
+
842
+ selected_task = task_option
843
+
844
+ return supported_tasks, selected_task
845
+
846
+ def _parse_quant_hf_config(self):
847
+ quant_cfg = getattr(self.hf_config, "quantization_config", None)
848
+ if quant_cfg is None:
849
+ # compressed-tensors uses a "compression_config" key
850
+ quant_cfg = getattr(self.hf_config, "compression_config", None)
851
+ return quant_cfg
852
+
853
+ def _verify_quantization(self) -> None:
854
+ supported_quantization = QUANTIZATION_METHODS
855
+ optimized_quantization_methods = [
856
+ "fp8", "marlin", "modelopt", "gptq_marlin_24", "gptq_marlin",
857
+ "awq_marlin", "fbgemm_fp8", "compressed-tensors", "experts_int8",
858
+ "quark", "modelopt_fp4", "bitblas", "gptq_bitblas"
859
+ ]
860
+ if self.quantization is not None:
861
+ self.quantization = cast(QuantizationMethods, self.quantization)
862
+
863
+ # Parse quantization method from the HF model config, if available.
864
+ quant_cfg = self._parse_quant_hf_config()
865
+
866
+ if quant_cfg is not None:
867
+ quant_method = quant_cfg.get("quant_method", "").lower()
868
+ quant_method = quant_method.replace("compressed_tensors",
869
+ "compressed-tensors")
870
+ quant_cfg["quant_method"] = quant_method
871
+
872
+ # Quantization methods which are overrides (i.e. they have a
873
+ # `override_quantization_method` method) must be checked in order
874
+ # of preference (this is particularly important for GPTQ).
875
+ overrides = [
876
+ "marlin",
877
+ "bitblas",
878
+ "gptq_marlin_24",
879
+ "gptq_marlin",
880
+ "gptq_bitblas",
881
+ "awq_marlin",
882
+ "ipex",
883
+ "moe_wna16",
884
+ ]
885
+ quantization_methods = [
886
+ q for q in supported_quantization if q not in overrides
887
+ ]
888
+ # Any custom overrides will be in quantization_methods so we place
889
+ # them at the start of the list so custom overrides have preference
890
+ # over the built in ones.
891
+ quantization_methods = quantization_methods + overrides
892
+
893
+ # Detect which checkpoint is it
894
+ for name in quantization_methods:
895
+ method = get_quantization_config(name)
896
+ quantization_override = method.override_quantization_method(
897
+ quant_cfg, self.quantization)
898
+ if quantization_override is not None:
899
+ # Raise error if the override is not custom (custom would
900
+ # be in QUANTIZATION_METHODS but not QuantizationMethods)
901
+ # and hasn't been added to the overrides list.
902
+ if (name in get_args(QuantizationMethods)
903
+ and name not in overrides):
904
+ raise ValueError(
905
+ f"Quantization method {name} is an override but "
906
+ "is has not been added to the `overrides` list "
907
+ "above. This is necessary to ensure that the "
908
+ "overrides are checked in order of preference.")
909
+ quant_method = quantization_override
910
+ self.quantization = quantization_override
911
+ break
912
+
913
+ # Verify quantization configurations.
914
+ if self.quantization is None:
915
+ self.quantization = quant_method
916
+ elif self.quantization != quant_method:
917
+ raise ValueError(
918
+ "Quantization method specified in the model config "
919
+ f"({quant_method}) does not match the quantization "
920
+ f"method specified in the `quantization` argument "
921
+ f"({self.quantization}).")
922
+
923
+ if self.quantization is not None:
924
+ if self.quantization not in supported_quantization:
925
+ raise ValueError(
926
+ f"Unknown quantization method: {self.quantization}. Must "
927
+ f"be one of {supported_quantization}.")
928
+ from vllm.platforms import current_platform
929
+ current_platform.verify_quantization(self.quantization)
930
+ if self.quantization not in optimized_quantization_methods:
931
+ logger.warning(
932
+ "%s quantization is not fully "
933
+ "optimized yet. The speed can be slower than "
934
+ "non-quantized models.", self.quantization)
935
+
936
+ def _verify_cuda_graph(self) -> None:
937
+ self.max_seq_len_to_capture = min(self.max_seq_len_to_capture,
938
+ self.max_model_len)
939
+ # CUDAGraph capture not supported for enc-dec models and mllama on ROCm
940
+ ROCM_UNSUPPORTED_MODELS = ['mllama']
941
+ unsupported_rocm = (self.hf_config.model_type
942
+ in ROCM_UNSUPPORTED_MODELS
943
+ or self.is_encoder_decoder)
944
+
945
+ if (unsupported_rocm and not self.enforce_eager
946
+ and current_platform.is_rocm()):
947
+ logger.warning(
948
+ "CUDA graph is not supported for %s on ROCm yet, fallback "
949
+ "to eager mode.", self.hf_config.model_type)
950
+ self.enforce_eager = True
951
+
952
+ def _verify_bnb_config(self) -> None:
953
+ """
954
+ The current version of bitsandbytes (0.45.3) with 8-bit models does not
955
+ yet support CUDA graph.
956
+ # TODO Remove this when bitsandbytes supports.
957
+ """
958
+ is_bitsandbytes = self.quantization == "bitsandbytes"
959
+ has_quantization_config = (getattr(self.hf_config,
960
+ "quantization_config", None)
961
+ is not None)
962
+ is_8bit = (self.hf_config.quantization_config.get(
963
+ "load_in_8bit", False) if has_quantization_config else False)
964
+ if all([
965
+ is_bitsandbytes,
966
+ has_quantization_config,
967
+ is_8bit,
968
+ not self.enforce_eager,
969
+ ]):
970
+ logger.warning(
971
+ "CUDA graph is not supported on BitsAndBytes 8bit yet, "
972
+ "fallback to the eager mode.")
973
+
974
+ self.enforce_eager = True
975
+
976
+ def _verify_with_expert_parallelism(self) -> None:
977
+ num_expert_names = [
978
+ "moe_num_experts", # Dbrx
979
+ "num_experts", # Jamba
980
+ "n_routed_experts", # DeepSeek
981
+ "num_local_experts", # Mixtral
982
+ ]
983
+ num_experts = 0
984
+ for name in num_expert_names:
985
+ num_experts = getattr(self.hf_text_config, name, 0)
986
+ if num_experts > 0:
987
+ break
988
+ if num_experts < 1:
989
+ raise ValueError(
990
+ "Number of experts in the model must be greater than 0 "
991
+ "when expert parallelism is enabled.")
992
+
993
+ def verify_dual_chunk_attention_config(
994
+ self,
995
+ load_config: "LoadConfig",
996
+ ) -> None:
997
+ if hasattr(self.hf_config, "dual_chunk_attention_config"):
998
+ # Try loading the sparse attention config
999
+ from vllm.model_executor.model_loader.weight_utils import (
1000
+ get_sparse_attention_config)
1001
+ sparse_attn_config = get_sparse_attention_config(self, load_config)
1002
+ if sparse_attn_config:
1003
+ self.hf_config.dual_chunk_attention_config[
1004
+ "sparse_attention_config"] = sparse_attn_config
1005
+ if "sparse_attention_enabled" not in \
1006
+ self.hf_config.dual_chunk_attention_config:
1007
+ self.hf_config.dual_chunk_attention_config[
1008
+ "sparse_attention_enabled"] = True
1009
+
1010
+ def verify_async_output_proc(self, parallel_config, speculative_config,
1011
+ device_config) -> None:
1012
+ if not self.use_async_output_proc:
1013
+ # Nothing to check
1014
+ return
1015
+
1016
+ if parallel_config.pipeline_parallel_size > 1:
1017
+ self.use_async_output_proc = False
1018
+ return
1019
+
1020
+ # Reminder: Please update docs/features/compatibility_matrix.md
1021
+ # If the feature combo become valid
1022
+ from vllm.platforms import current_platform
1023
+ if not current_platform.is_async_output_supported(self.enforce_eager):
1024
+ self.use_async_output_proc = False
1025
+ return
1026
+
1027
+ if envs.VLLM_USE_RAY_SPMD_WORKER:
1028
+ self.use_async_output_proc = False
1029
+ return
1030
+
1031
+ # Async postprocessor is not necessary for pooling models
1032
+ # since there is no token generation
1033
+ if self.runner_type == "pooling":
1034
+ self.use_async_output_proc = False
1035
+
1036
+ # Reminder: Please update docs/features/compatibility_matrix.md
1037
+ # If the feature combo become valid
1038
+ if speculative_config:
1039
+ self.use_async_output_proc = False
1040
+
1041
+ def verify_with_parallel_config(
1042
+ self,
1043
+ parallel_config: "ParallelConfig",
1044
+ ) -> None:
1045
+
1046
+ if parallel_config.distributed_executor_backend == "external_launcher":
1047
+ assert self.seed is not None, (
1048
+ "Seed must be set when using external launcher backend to "
1049
+ "make sure sampling results are the same across workers.")
1050
+
1051
+ total_num_attention_heads = getattr(self.hf_text_config,
1052
+ "num_attention_heads", 0)
1053
+ tensor_parallel_size = parallel_config.tensor_parallel_size
1054
+ if total_num_attention_heads % tensor_parallel_size != 0:
1055
+ raise ValueError(
1056
+ f"Total number of attention heads ({total_num_attention_heads})"
1057
+ " must be divisible by tensor parallel size "
1058
+ f"({tensor_parallel_size}).")
1059
+
1060
+ if parallel_config.enable_expert_parallel:
1061
+ self._verify_with_expert_parallelism()
1062
+
1063
+ pipeline_parallel_size = parallel_config.pipeline_parallel_size
1064
+ if pipeline_parallel_size > 1:
1065
+ if not self.registry.is_pp_supported_model(self.architectures):
1066
+ raise NotImplementedError(
1067
+ "Pipeline parallelism is not supported for this model. "
1068
+ "Supported models implement the `SupportsPP` interface.")
1069
+
1070
+ if self.use_async_output_proc:
1071
+ self.use_async_output_proc = False
1072
+
1073
+ def get_hf_config_sliding_window(
1074
+ self) -> Union[Optional[int], list[Optional[int]]]:
1075
+ """Get the sliding window size, or None if disabled."""
1076
+
1077
+ # Some models, like Qwen2 and Qwen1.5, use `use_sliding_window` in
1078
+ # addition to sliding window size. We check if that field is present
1079
+ # and if it's False, return None.
1080
+ if (hasattr(self.hf_text_config, "use_sliding_window")
1081
+ and not self.hf_text_config.use_sliding_window):
1082
+ return None
1083
+ return getattr(self.hf_text_config, "sliding_window", None)
1084
+
1085
+ def get_sliding_window(self) -> Optional[Union[int, list[Optional[int]]]]:
1086
+ """Get the sliding window size, or None if disabled.
1087
+ """
1088
+ # If user disables sliding window, return None.
1089
+ if self.disable_sliding_window:
1090
+ return None
1091
+ # Otherwise get the value from the hf config.
1092
+ return self.get_hf_config_sliding_window()
1093
+
1094
+ def get_vocab_size(self) -> int:
1095
+ return self.hf_text_config.vocab_size
1096
+
1097
+ def get_hidden_size(self) -> int:
1098
+ return self.hf_text_config.hidden_size
1099
+
1100
+ @property
1101
+ def is_deepseek_mla(self) -> bool:
1102
+ if not hasattr(self.hf_text_config, "model_type"):
1103
+ return False
1104
+ elif self.hf_text_config.model_type in \
1105
+ ('deepseek_v2', 'deepseek_v3', 'deepseek_mtp'):
1106
+ return self.hf_text_config.kv_lora_rank is not None
1107
+ elif self.hf_text_config.model_type == 'eagle':
1108
+ # if the model is an EAGLE module, check for the
1109
+ # underlying architecture
1110
+ return self.hf_text_config.model.model_type in \
1111
+ ('deepseek_v2', 'deepseek_v3') \
1112
+ and self.hf_text_config.kv_lora_rank is not None
1113
+ return False
1114
+
1115
+ def get_head_size(self) -> int:
1116
+ # TODO remove hard code
1117
+ if self.is_deepseek_mla:
1118
+ qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim",
1119
+ 0)
1120
+ if self.use_mla:
1121
+ return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
1122
+ else:
1123
+ qk_nope_head_dim = getattr(self.hf_text_config,
1124
+ "qk_nope_head_dim", 0)
1125
+ if qk_rope_head_dim and qk_nope_head_dim:
1126
+ return qk_rope_head_dim + qk_nope_head_dim
1127
+
1128
+ if hasattr(self.hf_text_config,
1129
+ "model_type") and (self.hf_text_config.model_type
1130
+ == "zamba2"):
1131
+ return self.hf_text_config.attention_head_dim
1132
+
1133
+ if self.is_attention_free:
1134
+ return 0
1135
+
1136
+ # NOTE: Some configs may set head_dim=None in the config
1137
+ if getattr(self.hf_text_config, "head_dim", None) is not None:
1138
+ return self.hf_text_config.head_dim
1139
+
1140
+ # FIXME(woosuk): This may not be true for all models.
1141
+ return (self.hf_text_config.hidden_size //
1142
+ self.hf_text_config.num_attention_heads)
1143
+
1144
+ def get_total_num_kv_heads(self) -> int:
1145
+ """Returns the total number of KV heads."""
1146
+ # For GPTBigCode & Falcon:
1147
+ # NOTE: for falcon, when new_decoder_architecture is True, the
1148
+ # multi_query flag is ignored and we use n_head_kv for the number of
1149
+ # KV heads.
1150
+ falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
1151
+ new_decoder_arch_falcon = (
1152
+ self.hf_config.model_type in falcon_model_types
1153
+ and getattr(self.hf_config, "new_decoder_architecture", False))
1154
+ if not new_decoder_arch_falcon and getattr(self.hf_text_config,
1155
+ "multi_query", False):
1156
+ # Multi-query attention, only one KV head.
1157
+ # Currently, tensor parallelism is not supported in this case.
1158
+ return 1
1159
+
1160
+ # For DBRX and MPT
1161
+ if self.hf_config.model_type == "mpt":
1162
+ if "kv_n_heads" in self.hf_config.attn_config:
1163
+ return self.hf_config.attn_config["kv_n_heads"]
1164
+ return self.hf_config.num_attention_heads
1165
+ if self.hf_config.model_type == "dbrx":
1166
+ return getattr(self.hf_config.attn_config, "kv_n_heads",
1167
+ self.hf_config.num_attention_heads)
1168
+
1169
+ if self.hf_config.model_type == "nemotron-nas":
1170
+ for block in self.hf_config.block_configs:
1171
+ if not block.attention.no_op:
1172
+ return self.hf_config.num_attention_heads \
1173
+ // block.attention.n_heads_in_group
1174
+
1175
+ raise RuntimeError("Couldn't determine number of kv heads")
1176
+
1177
+ if self.is_attention_free:
1178
+ return 0
1179
+
1180
+ attributes = [
1181
+ # For Falcon:
1182
+ "n_head_kv",
1183
+ "num_kv_heads",
1184
+ # For LLaMA-2:
1185
+ "num_key_value_heads",
1186
+ # For ChatGLM:
1187
+ "multi_query_group_num",
1188
+ ]
1189
+ for attr in attributes:
1190
+ num_kv_heads = getattr(self.hf_text_config, attr, None)
1191
+ if num_kv_heads is not None:
1192
+ return num_kv_heads
1193
+
1194
+ # For non-grouped-query attention models, the number of KV heads is
1195
+ # equal to the number of attention heads.
1196
+ return self.hf_text_config.num_attention_heads
1197
+
1198
+ def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int:
1199
+ """Returns the number of KV heads per GPU."""
1200
+ if self.use_mla:
1201
+ # When using MLA during decode it becomes MQA
1202
+ return 1
1203
+
1204
+ total_num_kv_heads = self.get_total_num_kv_heads()
1205
+ # If tensor parallelism is used, we divide the number of KV heads by
1206
+ # the tensor parallel size. We will replicate the KV heads in the
1207
+ # case where the number of KV heads is smaller than the tensor
1208
+ # parallel size so each GPU has at least one KV head.
1209
+ return max(1,
1210
+ total_num_kv_heads // parallel_config.tensor_parallel_size)
1211
+
1212
+ def get_num_attention_heads(self,
1213
+ parallel_config: "ParallelConfig") -> int:
1214
+ num_heads = getattr(self.hf_text_config, "num_attention_heads", 0)
1215
+ return num_heads // parallel_config.tensor_parallel_size
1216
+
1217
+ def get_layers_start_end_indices(
1218
+ self, parallel_config: "ParallelConfig") -> tuple[int, int]:
1219
+ from vllm.distributed.utils import get_pp_indices
1220
+ if (self.hf_text_config.model_type == "deepseek_mtp"
1221
+ or self.hf_config.model_type == "mimo_mtp"):
1222
+ total_num_hidden_layers = getattr(self.hf_text_config,
1223
+ "num_nextn_predict_layers", 0)
1224
+ else:
1225
+ total_num_hidden_layers = getattr(self.hf_text_config,
1226
+ "num_hidden_layers", 0)
1227
+ # the layout order is: DP x PP x TP
1228
+ pp_rank = (parallel_config.rank // parallel_config.tensor_parallel_size
1229
+ ) % parallel_config.pipeline_parallel_size
1230
+ pp_size = parallel_config.pipeline_parallel_size
1231
+ start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
1232
+ return start, end
1233
+
1234
+ def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
1235
+ start, end = self.get_layers_start_end_indices(parallel_config)
1236
+ return end - start
1237
+
1238
+ def get_num_layers_by_block_type(
1239
+ self,
1240
+ parallel_config: "ParallelConfig",
1241
+ block_type: LayerBlockType = LayerBlockType.attention,
1242
+ ) -> int:
1243
+ # This function relies on 'layers_block_type' in hf_config,
1244
+ # for w/o this attribute, we will need to have workarounds like so
1245
+ attn_block_type = block_type == LayerBlockType.attention
1246
+ is_transformer = not self.is_hybrid and \
1247
+ not self.has_noops and \
1248
+ not self.is_attention_free
1249
+ start, end = self.get_layers_start_end_indices(parallel_config)
1250
+
1251
+ if is_transformer:
1252
+ # Handle the basic case first
1253
+ return end - start if attn_block_type else 0
1254
+ elif self.is_attention_free:
1255
+ # Attention free
1256
+ # Note that this code assumes there
1257
+ # is only one type of attention-free block type.
1258
+ return 0 if attn_block_type else end - start
1259
+ elif self.has_noops:
1260
+ block_configs = self.hf_config.block_configs
1261
+ return sum(not bc.attention.no_op
1262
+ for bc in block_configs[start:end])
1263
+ else:
1264
+ # Hybrid model Jamba
1265
+ layers_block_type_value = getattr(self.hf_config,
1266
+ "layers_block_type", None)
1267
+ if layers_block_type_value is not None:
1268
+ if hasattr(self.hf_text_config,
1269
+ "model_type") and (self.hf_text_config.model_type
1270
+ == "zamba2"):
1271
+ if attn_block_type:
1272
+ return sum(t == "hybrid"
1273
+ for t in layers_block_type_value[start:end])
1274
+ else:
1275
+ return self.get_num_layers(parallel_config)
1276
+ return sum(t == block_type.value
1277
+ for t in layers_block_type_value[start:end])
1278
+
1279
+ # Hybrid model Minimax
1280
+ attn_type_list = getattr(self.hf_config, "attn_type_list", None)
1281
+ if attn_type_list:
1282
+ return sum(t == 1 for t in attn_type_list[start:end])
1283
+
1284
+ if layers_block_type_value is None and attn_type_list is None:
1285
+ raise ValueError(
1286
+ "The model is an hybrid without a"
1287
+ "layers_block_type or an attn_type_list in the hf_config,"
1288
+ "cannot determine the num of "
1289
+ f"{block_type.value} layers")
1290
+
1291
+ return sum(t == 1 for t in attn_type_list[start:end])
1292
+
1293
+ def get_multimodal_config(self) -> "MultiModalConfig":
1294
+ """
1295
+ Get the multimodal configuration of the model.
1296
+
1297
+ Raises:
1298
+ ValueError: If the model is not multimodal.
1299
+ """
1300
+ if self.multimodal_config is None:
1301
+ raise ValueError("The model is not multimodal.")
1302
+
1303
+ return self.multimodal_config
1304
+
1305
+ def try_get_generation_config(self) -> dict[str, Any]:
1306
+ if self.generation_config in ("auto", "vllm"):
1307
+ config = try_get_generation_config(
1308
+ self.hf_config_path or self.model,
1309
+ trust_remote_code=self.trust_remote_code,
1310
+ revision=self.revision,
1311
+ )
1312
+ else:
1313
+ config = try_get_generation_config(
1314
+ self.generation_config,
1315
+ trust_remote_code=self.trust_remote_code,
1316
+ )
1317
+
1318
+ if config is None:
1319
+ return {}
1320
+
1321
+ return config.to_diff_dict()
1322
+
1323
+ def get_diff_sampling_param(self) -> dict[str, Any]:
1324
+ """
1325
+ This method returns a dictionary containing the parameters
1326
+ that differ from the default sampling parameters. If
1327
+ `generation_config` is `"vllm"`, an empty dictionary is returned.
1328
+
1329
+ Returns:
1330
+ dict[str, Any]: A dictionary with the differing sampling
1331
+ parameters, if `generation_config` is `"vllm"` an empty dictionary.
1332
+ """
1333
+ if self.generation_config == "vllm":
1334
+ config = {}
1335
+ else:
1336
+ config = self.try_get_generation_config()
1337
+
1338
+ # Overriding with given generation config
1339
+ config.update(self.override_generation_config)
1340
+
1341
+ available_params = [
1342
+ "repetition_penalty",
1343
+ "temperature",
1344
+ "top_k",
1345
+ "top_p",
1346
+ "min_p",
1347
+ "max_new_tokens",
1348
+ ]
1349
+ if any(p in config for p in available_params):
1350
+ diff_sampling_param = {
1351
+ p: config.get(p)
1352
+ for p in available_params if config.get(p) is not None
1353
+ }
1354
+ # Huggingface definition of max_new_tokens is equivalent
1355
+ # to vLLM's max_tokens
1356
+ if "max_new_tokens" in diff_sampling_param:
1357
+ diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
1358
+ "max_new_tokens")
1359
+ else:
1360
+ diff_sampling_param = {}
1361
+
1362
+ if diff_sampling_param:
1363
+ logger.warning_once(
1364
+ "Default sampling parameters have been overridden by the "
1365
+ "model's Hugging Face generation config recommended from the "
1366
+ "model creator. If this is not intended, please relaunch "
1367
+ "vLLM instance with `--generation-config vllm`.")
1368
+ return diff_sampling_param
1369
+
1370
+ @property
1371
+ def is_encoder_decoder(self) -> bool:
1372
+ """Extract the HF encoder/decoder model flag."""
1373
+ """
1374
+ For Mllama, VLLM overrides HF's is_encoder_decoder flag and sets it to
1375
+ True to enable cross-attention
1376
+ Neuron needs all multimodal data to be in the decoder and does not
1377
+ need to explicitly enable cross-attention
1378
+ """
1379
+ if (current_platform.is_neuron()
1380
+ and self.hf_config.model_type == "mllama"):
1381
+ return False
1382
+
1383
+ return is_encoder_decoder(self.hf_config)
1384
+
1385
+ @property
1386
+ def uses_mrope(self) -> bool:
1387
+ return uses_mrope(self.hf_config)
1388
+
1389
+ @property
1390
+ def is_multimodal_model(self) -> bool:
1391
+ return self.multimodal_config is not None
1392
+
1393
+ @property
1394
+ def is_cross_encoder(self) -> bool:
1395
+ return self.registry.is_cross_encoder_model(self.architectures)
1396
+
1397
+ @property
1398
+ def use_mla(self) -> bool:
1399
+ return self.is_deepseek_mla and not envs.VLLM_MLA_DISABLE
1400
+
1401
+ @property
1402
+ def supported_runner_types(self) -> set[RunnerType]:
1403
+ return {_TASK_RUNNER[task] for task in self.supported_tasks}
1404
+
1405
+ @property
1406
+ def runner_type(self) -> RunnerType:
1407
+ return _TASK_RUNNER[cast(_ResolvedTask, self.task)]
1408
+
1409
+ @property
1410
+ def is_v1_compatible(self) -> bool:
1411
+ architectures = getattr(self.hf_config, "architectures", [])
1412
+ return ModelRegistry.is_v1_compatible(architectures)
1413
+
1414
+ @property
1415
+ def is_matryoshka(self) -> bool:
1416
+ return (hasattr(self.hf_config, "matryoshka_dimensions")
1417
+ or getattr(self.hf_config, "is_matryoshka", False))
1418
+
1419
+ @property
1420
+ def matryoshka_dimensions(self):
1421
+ return getattr(self.hf_config, "matryoshka_dimensions", None)
1422
+
1423
+ def get_and_verify_max_len(self, max_model_len: int):
1424
+ max_model_len = _get_and_verify_max_len(
1425
+ hf_config=self.hf_text_config,
1426
+ max_model_len=max_model_len,
1427
+ disable_sliding_window=self.disable_sliding_window,
1428
+ sliding_window_len=self.get_hf_config_sliding_window(),
1429
+ spec_target_max_model_len=self.spec_target_max_model_len,
1430
+ encoder_config=self.encoder_config)
1431
+
1432
+ tokenizer_config = try_get_tokenizer_config(
1433
+ self.tokenizer,
1434
+ trust_remote_code=self.trust_remote_code,
1435
+ revision=self.tokenizer_revision)
1436
+
1437
+ if tokenizer_config is None:
1438
+ return max_model_len
1439
+
1440
+ model_max_length = tokenizer_config.get("model_max_length",
1441
+ max_model_len)
1442
+ max_model_len = min(max_model_len, model_max_length)
1443
+ return max_model_len
1444
+
1445
+
1446
+ BlockSize = Literal[1, 8, 16, 32, 64, 128]
1447
+ CacheDType = Literal["auto", "fp8", "fp8_e4m3", "fp8_e5m2"]
1448
+ PrefixCachingHashAlgo = Literal["builtin", "sha256"]
1449
+
1450
+
1451
+ @config
1452
+ @dataclass
1453
+ class CacheConfig:
1454
+ """Configuration for the KV cache."""
1455
+
1456
+ block_size: SkipValidation[BlockSize] = None # type: ignore
1457
+ """Size of a contiguous cache block in number of tokens. This is ignored on
1458
+ neuron devices and set to `--max-model-len`. On CUDA devices, only block
1459
+ sizes up to 32 are supported. On HPU devices, block size defaults to 128.
1460
+
1461
+ This config has no static default. If left unspecified by the user, it will
1462
+ be set in `Platform.check_and_update_configs()` based on the current
1463
+ platform."""
1464
+ gpu_memory_utilization: float = 0.9
1465
+ """The fraction of GPU memory to be used for the model executor, which can
1466
+ range from 0 to 1. For example, a value of 0.5 would imply 50% GPU memory
1467
+ utilization. If unspecified, will use the default value of 0.9. This is a
1468
+ per-instance limit, and only applies to the current vLLM instance. It does
1469
+ not matter if you have another vLLM instance running on the same GPU. For
1470
+ example, if you have two vLLM instances running on the same GPU, you can
1471
+ set the GPU memory utilization to 0.5 for each instance."""
1472
+ swap_space: float = 4
1473
+ """Size of the CPU swap space per GPU (in GiB)."""
1474
+ cache_dtype: CacheDType = "auto"
1475
+ """Data type for kv cache storage. If "auto", will use model data type.
1476
+ CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. ROCm (AMD GPU) supports
1477
+ fp8 (=fp8_e4m3)."""
1478
+ is_attention_free: bool = False
1479
+ """Whether the model is attention-free. This is primarily set in
1480
+ `ModelConfig` and that value should be manually duplicated here."""
1481
+ num_gpu_blocks_override: Optional[int] = None
1482
+ """Number of GPU blocks to use. This overrides the profiled `num_gpu_blocks`
1483
+ if specified. Does nothing if `None`. Used for testing preemption."""
1484
+ sliding_window: Optional[int] = None
1485
+ """Sliding window size for the KV cache. This is primarily set in
1486
+ `ModelConfig` and that value should be manually duplicated here."""
1487
+ enable_prefix_caching: Optional[bool] = None
1488
+ """Whether to enable prefix caching. Disabled by default for V0. Enabled by
1489
+ default for V1."""
1490
+ prefix_caching_hash_algo: PrefixCachingHashAlgo = "builtin"
1491
+ """Set the hash algorithm for prefix caching:\n
1492
+ - "builtin" is Python's built-in hash.\n
1493
+ - "sha256" is collision resistant but with certain overheads."""
1494
+ cpu_offload_gb: float = 0
1495
+ """The space in GiB to offload to CPU, per GPU. Default is 0, which means
1496
+ no offloading. Intuitively, this argument can be seen as a virtual way to
1497
+ increase the GPU memory size. For example, if you have one 24 GB GPU and
1498
+ set this to 10, virtually you can think of it as a 34 GB GPU. Then you can
1499
+ load a 13B model with BF16 weight, which requires at least 26GB GPU memory.
1500
+ Note that this requires fast CPU-GPU interconnect, as part of the model is
1501
+ loaded from CPU memory to GPU memory on the fly in each model forward pass.
1502
+ """
1503
+ calculate_kv_scales: bool = False
1504
+ """This enables dynamic calculation of `k_scale` and `v_scale` when
1505
+ kv_cache_dtype is fp8. If `False`, the scales will be loaded from the model
1506
+ checkpoint if available. Otherwise, the scales will default to 1.0."""
1507
+
1508
+ # Will be set after profiling.
1509
+ num_gpu_blocks: Optional[int] = field(default=None, init=False)
1510
+ """The number of blocks to allocate for GPU memory."""
1511
+ num_cpu_blocks: Optional[int] = field(default=None, init=False)
1512
+ """The number of blocks to allocate for CPU memory."""
1513
+
1514
+ def compute_hash(self) -> str:
1515
+ """
1516
+ WARNING: Whenever a new field is added to this config,
1517
+ ensure that it is included in the factors list if
1518
+ it affects the computation graph.
1519
+
1520
+ Provide a hash that uniquely identifies all the configs
1521
+ that affect the structure of the computation
1522
+ graph from input ids/embeddings to the final hidden states,
1523
+ excluding anything before input ids/embeddings and after
1524
+ the final hidden states.
1525
+ """
1526
+ factors: list[Any] = []
1527
+ factors.append(self.cache_dtype)
1528
+ # `cpu_offload_gb` does not use `torch.compile` yet.
1529
+ hash_str = hashlib.md5(str(factors).encode(),
1530
+ usedforsecurity=False).hexdigest()
1531
+ return hash_str
1532
+
1533
+ def __post_init__(self) -> None:
1534
+ self.swap_space_bytes = self.swap_space * GiB_bytes
1535
+
1536
+ self._verify_args()
1537
+ self._verify_cache_dtype()
1538
+ self._verify_prefix_caching()
1539
+
1540
+ def metrics_info(self):
1541
+ # convert cache_config to dict(key: str, value: str) for prometheus
1542
+ # metrics info
1543
+ return {key: str(value) for key, value in self.__dict__.items()}
1544
+
1545
+ def _verify_args(self) -> None:
1546
+ if self.cpu_offload_gb < 0:
1547
+ raise ValueError("CPU offload space must be non-negative"
1548
+ f", but got {self.cpu_offload_gb}")
1549
+
1550
+ if self.gpu_memory_utilization > 1.0:
1551
+ raise ValueError(
1552
+ "GPU memory utilization must be less than 1.0. Got "
1553
+ f"{self.gpu_memory_utilization}.")
1554
+
1555
+ def _verify_cache_dtype(self) -> None:
1556
+ if self.cache_dtype == "auto":
1557
+ pass
1558
+ elif self.cache_dtype in get_args(CacheDType):
1559
+ logger.info(
1560
+ "Using fp8 data type to store kv cache. It reduces the GPU "
1561
+ "memory footprint and boosts the performance. "
1562
+ "Meanwhile, it may cause accuracy drop without a proper "
1563
+ "scaling factor")
1564
+ else:
1565
+ raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}")
1566
+
1567
+ def _verify_prefix_caching(self) -> None:
1568
+ if not self.enable_prefix_caching:
1569
+ return
1570
+
1571
+ if self.sliding_window is not None and not envs.VLLM_USE_V1:
1572
+ raise NotImplementedError(
1573
+ "Prefix caching is not supported with sliding window. "
1574
+ "Run with --disable-sliding-window to use prefix caching.")
1575
+
1576
+ if (self.enable_prefix_caching and self.prefix_caching_hash_algo
1577
+ not in get_args(PrefixCachingHashAlgo)):
1578
+ raise ValueError(
1579
+ "Unknown prefix caching hash algorithm: "
1580
+ f"{self.prefix_caching_hash_algo}. Must be one of "
1581
+ f"{get_args(PrefixCachingHashAlgo)}.")
1582
+
1583
+ def verify_with_parallel_config(
1584
+ self,
1585
+ parallel_config: "ParallelConfig",
1586
+ ) -> None:
1587
+ total_cpu_memory = get_cpu_memory()
1588
+ # FIXME(woosuk): Here, it is assumed that the GPUs in a tensor parallel
1589
+ # group are in the same node. However, the GPUs may span multiple nodes.
1590
+ num_gpus_per_node = parallel_config.tensor_parallel_size
1591
+ cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node
1592
+
1593
+ msg = (f"{cpu_memory_usage / GiB_bytes:.2f} GiB out of the "
1594
+ f"{total_cpu_memory / GiB_bytes:.2f} GiB total CPU memory "
1595
+ "is allocated for the swap space.")
1596
+ if cpu_memory_usage > 0.7 * total_cpu_memory:
1597
+ raise ValueError("Too large swap space. " + msg)
1598
+ elif cpu_memory_usage > 0.4 * total_cpu_memory:
1599
+ logger.warning("Possibly too large swap space. %s", msg)
1600
+
1601
+
1602
+ @config
1603
+ @dataclass
1604
+ class TokenizerPoolConfig:
1605
+ """This config is deprecated and will be removed in a future release.
1606
+
1607
+ Passing these parameters will have no effect. Please remove them from your
1608
+ configurations.
1609
+ """
1610
+
1611
+ pool_size: int = 0
1612
+ """This parameter is deprecated and will be removed in a future release.
1613
+ Passing this parameter will have no effect. Please remove it from your
1614
+ configurations."""
1615
+ pool_type: str = "ray"
1616
+ """This parameter is deprecated and will be removed in a future release.
1617
+ Passing this parameter will have no effect. Please remove it from your
1618
+ configurations."""
1619
+ extra_config: dict = field(default_factory=dict)
1620
+ """This parameter is deprecated and will be removed in a future release.
1621
+ Passing this parameter will have no effect. Please remove it from your
1622
+ configurations."""
1623
+
1624
+ def __post_init__(self) -> None:
1625
+ logger.warning_once(
1626
+ "TokenizerPoolConfig is deprecated and will be removed in a "
1627
+ "future release. Passing this parameter will have no effect. "
1628
+ "Please remove it from your configurations.")
1629
+
1630
+
1631
+ class LoadFormat(str, enum.Enum):
1632
+ AUTO = "auto"
1633
+ PT = "pt"
1634
+ SAFETENSORS = "safetensors"
1635
+ NPCACHE = "npcache"
1636
+ DUMMY = "dummy"
1637
+ TENSORIZER = "tensorizer"
1638
+ SHARDED_STATE = "sharded_state"
1639
+ GGUF = "gguf"
1640
+ BITSANDBYTES = "bitsandbytes"
1641
+ MISTRAL = "mistral"
1642
+ RUNAI_STREAMER = "runai_streamer"
1643
+ RUNAI_STREAMER_SHARDED = "runai_streamer_sharded"
1644
+ FASTSAFETENSORS = "fastsafetensors"
1645
+
1646
+
1647
+ @config
1648
+ @dataclass
1649
+ class LoadConfig:
1650
+ """Configuration for loading the model weights."""
1651
+
1652
+ load_format: Union[str, LoadFormat,
1653
+ "BaseModelLoader"] = LoadFormat.AUTO.value
1654
+ """The format of the model weights to load:\n
1655
+ - "auto" will try to load the weights in the safetensors format and fall
1656
+ back to the pytorch bin format if safetensors format is not available.\n
1657
+ - "pt" will load the weights in the pytorch bin format.\n
1658
+ - "safetensors" will load the weights in the safetensors format.\n
1659
+ - "npcache" will load the weights in pytorch format and store a numpy cache
1660
+ to speed up the loading.\n
1661
+ - "dummy" will initialize the weights with random values, which is mainly
1662
+ for profiling.\n
1663
+ - "tensorizer" will use CoreWeave's tensorizer library for fast weight
1664
+ loading. See the Tensorize vLLM Model script in the Examples section for
1665
+ more information.\n
1666
+ - "runai_streamer" will load the Safetensors weights using Run:ai Model
1667
+ Streamer.\n
1668
+ - "bitsandbytes" will load the weights using bitsandbytes quantization.\n
1669
+ - "sharded_state" will load weights from pre-sharded checkpoint files,
1670
+ supporting efficient loading of tensor-parallel models.\n
1671
+ - "gguf" will load weights from GGUF format files (details specified in
1672
+ https://github.com/ggml-org/ggml/blob/master/docs/gguf.md).\n
1673
+ - "mistral" will load weights from consolidated safetensors files used by
1674
+ Mistral models."""
1675
+ download_dir: Optional[str] = None
1676
+ """Directory to download and load the weights, default to the default
1677
+ cache directory of Hugging Face."""
1678
+ model_loader_extra_config: Union[dict, TensorizerConfig] = field(
1679
+ default_factory=dict)
1680
+ """Extra config for model loader. This will be passed to the model loader
1681
+ corresponding to the chosen load_format."""
1682
+ ignore_patterns: Optional[Union[list[str], str]] = None
1683
+ """The list of patterns to ignore when loading the model. Default to
1684
+ "original/**/*" to avoid repeated loading of llama's checkpoints."""
1685
+ use_tqdm_on_load: bool = True
1686
+ """Whether to enable tqdm for showing progress bar when loading model
1687
+ weights."""
1688
+ pt_load_map_location: Union[str, dict[str, str]] = "cpu"
1689
+ """
1690
+ pt_load_map_location: the map location for loading pytorch checkpoint, to
1691
+ support loading checkpoints can only be loaded on certain devices like
1692
+ "cuda", this is equivalent to {"": "cuda"}. Another supported format is
1693
+ mapping from different devices like from GPU 1 to GPU 0:
1694
+ {"cuda:1": "cuda:0"}. Note that when passed from command line, the strings
1695
+ in dictionary needs to be double quoted for json parsing. For more details,
1696
+ see original doc for `map_location` in https://pytorch.org/docs/stable/generated/torch.load.html
1697
+ """
1698
+
1699
+ def compute_hash(self) -> str:
1700
+ """
1701
+ WARNING: Whenever a new field is added to this config,
1702
+ ensure that it is included in the factors list if
1703
+ it affects the computation graph.
1704
+
1705
+ Provide a hash that uniquely identifies all the configs
1706
+ that affect the structure of the computation
1707
+ graph from input ids/embeddings to the final hidden states,
1708
+ excluding anything before input ids/embeddings and after
1709
+ the final hidden states.
1710
+ """
1711
+ # no factors to consider.
1712
+ # this config will not affect the computation graph.
1713
+ factors: list[Any] = []
1714
+ hash_str = hashlib.md5(str(factors).encode(),
1715
+ usedforsecurity=False).hexdigest()
1716
+ return hash_str
1717
+
1718
+ def __post_init__(self):
1719
+ if isinstance(self.load_format, str):
1720
+ load_format = self.load_format.lower()
1721
+ self.load_format = LoadFormat(load_format)
1722
+
1723
+ if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
1724
+ logger.info(
1725
+ "Ignoring the following patterns when downloading weights: %s",
1726
+ self.ignore_patterns)
1727
+ else:
1728
+ self.ignore_patterns = ["original/**/*"]
1729
+
1730
+
1731
+ DistributedExecutorBackend = Literal["ray", "mp", "uni", "external_launcher"]
1732
+
1733
+
1734
+ @config
1735
+ @dataclass
1736
+ class ParallelConfig:
1737
+ """Configuration for the distributed execution."""
1738
+
1739
+ pipeline_parallel_size: int = 1
1740
+ """Number of pipeline parallel groups."""
1741
+ tensor_parallel_size: int = 1
1742
+ """Number of tensor parallel groups."""
1743
+ data_parallel_size: int = 1
1744
+ """Number of data parallel groups. MoE layers will be sharded according to
1745
+ the product of the tensor parallel size and data parallel size."""
1746
+ data_parallel_size_local: int = 1
1747
+ """Number of local data parallel groups."""
1748
+ data_parallel_rank: int = 0
1749
+ """Rank of the data parallel group."""
1750
+ data_parallel_rank_local: Optional[int] = None
1751
+ """Local rank of the data parallel group,
1752
+ set only in SPMD mode."""
1753
+ data_parallel_master_ip: str = "127.0.0.1"
1754
+ """IP of the data parallel master."""
1755
+ data_parallel_rpc_port: int = 29550
1756
+ """Port for data parallel messaging."""
1757
+ data_parallel_master_port: int = 29500
1758
+ """Port of the data parallel master."""
1759
+ data_parallel_backend: str = "mp"
1760
+ """Backend to use for data parallel, either "mp" or "ray"."""
1761
+ enable_expert_parallel: bool = False
1762
+ """Use expert parallelism instead of tensor parallelism for MoE layers."""
1763
+ max_parallel_loading_workers: Optional[int] = None
1764
+ """Maximum number of parallel loading workers when loading model
1765
+ sequentially in multiple batches. To avoid RAM OOM when using tensor
1766
+ parallel and large models."""
1767
+
1768
+ disable_custom_all_reduce: bool = False
1769
+ """Disable the custom all-reduce kernel and fall back to NCCL."""
1770
+
1771
+ tokenizer_pool_config: Optional[TokenizerPoolConfig] = None
1772
+ """This parameter is deprecated and will be removed in a future release.
1773
+ Please remove it from your configs"""
1774
+
1775
+ ray_workers_use_nsight: bool = False
1776
+ """Whether to profile Ray workers with nsight, see https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler."""
1777
+
1778
+ placement_group: Optional["PlacementGroup"] = None
1779
+ """ray distributed model workers placement group."""
1780
+
1781
+ distributed_executor_backend: Optional[Union[DistributedExecutorBackend,
1782
+ type["ExecutorBase"]]] = None
1783
+ """Backend to use for distributed model
1784
+ workers, either "ray" or "mp" (multiprocessing). If the product
1785
+ of pipeline_parallel_size and tensor_parallel_size is less than
1786
+ or equal to the number of GPUs available, "mp" will be used to
1787
+ keep processing on a single host. Otherwise, this will default
1788
+ to "ray" if Ray is installed and fail otherwise. Note that tpu
1789
+ and hpu only support Ray for distributed inference."""
1790
+
1791
+ worker_cls: str = "auto"
1792
+ """The full name of the worker class to use. If "auto", the worker class
1793
+ will be determined based on the platform."""
1794
+ sd_worker_cls: str = "auto"
1795
+ """The full name of the worker class to use for speculative decofing.
1796
+ If "auto", the worker class will be determined based on the platform."""
1797
+ worker_extension_cls: str = ""
1798
+ """The full name of the worker extension class to use. The worker extension
1799
+ class is dynamically inherited by the worker class. This is used to inject
1800
+ new attributes and methods to the worker class for use in collective_rpc
1801
+ calls."""
1802
+
1803
+ world_size: int = field(init=False)
1804
+ """world_size is TPxPP, it affects the number of workers we create."""
1805
+
1806
+ rank: int = 0
1807
+ """Global rank in distributed setup."""
1808
+
1809
+ enable_multimodal_encoder_data_parallel: bool = False
1810
+ """ Use data parallelism instead of tensor parallelism for vision encoder.
1811
+ Only support LLama4 for now"""
1812
+
1813
+ @property
1814
+ def world_size_across_dp(self) -> int:
1815
+ """world_size_across_dp is TPxPPxDP, it is the size of the world
1816
+ including data parallelism."""
1817
+ return self.world_size * self.data_parallel_size
1818
+
1819
+ def get_next_dp_init_port(self) -> int:
1820
+ """
1821
+ We might need to initialize process groups in multiple
1822
+ processes that is related to data parallelism,
1823
+ e.g. both in the worker and in the engine, which
1824
+ can live in different processes. To avoid port conflicts, we
1825
+ increment the port number each time we need to initialize a
1826
+ new process group related to data parallelism.
1827
+ """
1828
+ answer = self.data_parallel_master_port
1829
+ self.data_parallel_master_port += 1
1830
+ return answer
1831
+
1832
+ def stateless_init_dp_group(self) -> "ProcessGroup":
1833
+ from vllm.distributed.utils import (
1834
+ stateless_init_torch_distributed_process_group)
1835
+
1836
+ # use gloo since the engine process might not have cuda device
1837
+ dp_group = stateless_init_torch_distributed_process_group(
1838
+ self.data_parallel_master_ip,
1839
+ self.get_next_dp_init_port(),
1840
+ self.data_parallel_rank,
1841
+ self.data_parallel_size,
1842
+ backend="gloo")
1843
+
1844
+ return dp_group
1845
+
1846
+ @staticmethod
1847
+ def has_unfinished_dp(dp_group: "ProcessGroup",
1848
+ has_unfinished: bool) -> bool:
1849
+ tensor = torch.tensor([has_unfinished],
1850
+ dtype=torch.int32,
1851
+ device="cpu")
1852
+ # dp rank 0: has_unfinished_seqs=True
1853
+ # dp rank 1: has_unfinished_seqs=False
1854
+ # aggregated: has_unfinished_seqs=True
1855
+ # so this is an OR operation, i.e. MAX in integers
1856
+ torch.distributed.all_reduce(tensor, op=ReduceOp.MAX, group=dp_group)
1857
+ aggregated_has_unfinished = bool(tensor.item())
1858
+ return aggregated_has_unfinished
1859
+
1860
+ def compute_hash(self):
1861
+ """
1862
+ Provide a hash that uniquely identifies all the configs
1863
+ that affect the structure of the computation
1864
+ graph from input ids/embeddings to the final hidden states,
1865
+ excluding anything before input ids/embeddings and after
1866
+ the final hidden states.
1867
+ """
1868
+ factors: list[Any] = []
1869
+ factors.append(self.pipeline_parallel_size)
1870
+ factors.append(self.tensor_parallel_size)
1871
+ factors.append(self.enable_expert_parallel)
1872
+ factors.append(self.data_parallel_size)
1873
+ factors.append(envs.VLLM_ALL2ALL_BACKEND)
1874
+ return hashlib.sha256(str(factors).encode()).hexdigest()
1875
+
1876
+ def __post_init__(self) -> None:
1877
+ self.world_size = self.pipeline_parallel_size * \
1878
+ self.tensor_parallel_size
1879
+
1880
+ if self.data_parallel_size_local > self.data_parallel_size:
1881
+ raise ValueError(
1882
+ f"data_parallel_size_local ({self.data_parallel_size_local}) "
1883
+ f"must be <= data_parallel_size ({self.data_parallel_size})")
1884
+
1885
+ if self.data_parallel_size > 1 or self.data_parallel_size_local == 0:
1886
+ # Data parallel was specified in the engine args.
1887
+ self.data_parallel_master_port = get_open_port()
1888
+ else:
1889
+ # Otherwise fall back to env vars (e.g. for offline SPMD case).
1890
+ self.data_parallel_size = envs.VLLM_DP_SIZE
1891
+ self.data_parallel_rank = envs.VLLM_DP_RANK
1892
+ self.data_parallel_rank_local = envs.VLLM_DP_RANK_LOCAL
1893
+ self.data_parallel_master_ip = envs.VLLM_DP_MASTER_IP
1894
+ self.data_parallel_master_port = envs.VLLM_DP_MASTER_PORT
1895
+
1896
+ if self.distributed_executor_backend == "external_launcher":
1897
+ import os
1898
+ os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
1899
+ logger.info("Disabling V1 multiprocessing for external launcher.")
1900
+
1901
+ ray_only_devices: list[str] = []
1902
+ from vllm.platforms import current_platform
1903
+ if (current_platform.device_type in ray_only_devices
1904
+ and self.world_size > 1):
1905
+ if self.distributed_executor_backend is None:
1906
+ self.distributed_executor_backend = "ray"
1907
+ if self.distributed_executor_backend != "ray":
1908
+ raise ValueError(
1909
+ f"{current_platform.device_type.upper()} backend only "
1910
+ "supports Ray for distributed inference.")
1911
+
1912
+ if self.distributed_executor_backend is None and self.world_size > 1:
1913
+ # We use multiprocessing by default if world_size fits on the
1914
+ # current node and we aren't in a ray placement group.
1915
+
1916
+ from vllm.executor import ray_utils
1917
+ backend: DistributedExecutorBackend = "mp"
1918
+ ray_found = ray_utils.ray_is_available()
1919
+ if current_platform.is_neuron():
1920
+ # neuron uses single process to control multiple devices
1921
+ backend = "uni"
1922
+ elif current_platform.is_tpu() and envs.VLLM_XLA_USE_SPMD:
1923
+ backend = "uni"
1924
+ elif (current_platform.is_cuda()
1925
+ and cuda_device_count_stateless() < self.world_size):
1926
+ if not ray_found:
1927
+ raise ValueError("Unable to load Ray which is "
1928
+ "required for multi-node inference, "
1929
+ "please install Ray with `pip install "
1930
+ "ray`.") from ray_utils.ray_import_err
1931
+ backend = "ray"
1932
+ elif self.data_parallel_backend == "ray":
1933
+ logger.info("Using ray distributed inference because "
1934
+ "data_parallel_backend is ray")
1935
+ backend = "ray"
1936
+ elif ray_found:
1937
+ if self.placement_group:
1938
+ backend = "ray"
1939
+ else:
1940
+ from ray import is_initialized as ray_is_initialized
1941
+ if ray_is_initialized():
1942
+ from ray.util import get_current_placement_group
1943
+ if get_current_placement_group():
1944
+ backend = "ray"
1945
+ self.distributed_executor_backend = backend
1946
+ logger.info("Defaulting to use %s for distributed inference",
1947
+ backend)
1948
+
1949
+ if self.distributed_executor_backend is None and self.world_size == 1:
1950
+ self.distributed_executor_backend = "uni"
1951
+
1952
+ self._verify_args()
1953
+
1954
+ @property
1955
+ def use_ray(self) -> bool:
1956
+ return self.distributed_executor_backend == "ray" or (
1957
+ isinstance(self.distributed_executor_backend, type)
1958
+ and self.distributed_executor_backend.uses_ray)
1959
+
1960
+ def _verify_args(self) -> None:
1961
+ # Lazy import to avoid circular import
1962
+ from vllm.executor.executor_base import ExecutorBase
1963
+ from vllm.platforms import current_platform
1964
+ if self.distributed_executor_backend not in (
1965
+ "ray", "mp", "uni",
1966
+ "external_launcher", None) and not (isinstance(
1967
+ self.distributed_executor_backend, type) and issubclass(
1968
+ self.distributed_executor_backend, ExecutorBase)):
1969
+ raise ValueError(
1970
+ "Unrecognized distributed executor backend "
1971
+ f"{self.distributed_executor_backend}. Supported "
1972
+ "values are 'ray', 'mp' 'uni', 'external_launcher' or"
1973
+ " custom ExecutorBase subclass.")
1974
+ if self.use_ray:
1975
+ from vllm.executor import ray_utils
1976
+ ray_utils.assert_ray_available()
1977
+
1978
+ if not current_platform.use_custom_allreduce():
1979
+ self.disable_custom_all_reduce = True
1980
+ logger.info(
1981
+ "Disabled the custom all-reduce kernel because it is not "
1982
+ "supported on current platform.")
1983
+ if self.ray_workers_use_nsight and not self.use_ray:
1984
+ raise ValueError("Unable to use nsight profiling unless workers "
1985
+ "run with Ray.")
1986
+
1987
+ assert isinstance(self.worker_extension_cls, str), (
1988
+ "worker_extension_cls must be a string (qualified class name).")
1989
+
1990
+
1991
+ PreemptionMode = Literal["swap", "recompute"]
1992
+ SchedulerPolicy = Literal["fcfs", "priority"]
1993
+
1994
+
1995
+ @config
1996
+ @dataclass
1997
+ class SchedulerConfig:
1998
+ """Scheduler configuration."""
1999
+
2000
+ runner_type: RunnerType = "generate"
2001
+ """The runner type to launch for the model."""
2002
+
2003
+ max_num_batched_tokens: SkipValidation[int] = None # type: ignore
2004
+ """Maximum number of tokens to be processed in a single iteration.
2005
+
2006
+ This config has no static default. If left unspecified by the user, it will
2007
+ be set in `EngineArgs.create_engine_config` based on the usage context."""
2008
+
2009
+ max_num_seqs: SkipValidation[int] = None # type: ignore
2010
+ """Maximum number of sequences to be processed in a single iteration.
2011
+
2012
+ This config has no static default. If left unspecified by the user, it will
2013
+ be set in `EngineArgs.create_engine_config` based on the usage context."""
2014
+
2015
+ max_model_len: SkipValidation[int] = None # type: ignore
2016
+ """Maximum length of a sequence (including prompt and generated text). This
2017
+ is primarily set in `ModelConfig` and that value should be manually
2018
+ duplicated here."""
2019
+
2020
+ max_num_partial_prefills: int = 1
2021
+ """For chunked prefill, the maximum number of sequences that can be
2022
+ partially prefilled concurrently."""
2023
+
2024
+ max_long_partial_prefills: int = 1
2025
+ """For chunked prefill, the maximum number of prompts longer than
2026
+ long_prefill_token_threshold that will be prefilled concurrently. Setting
2027
+ this less than max_num_partial_prefills will allow shorter prompts to jump
2028
+ the queue in front of longer prompts in some cases, improving latency."""
2029
+
2030
+ long_prefill_token_threshold: int = 0
2031
+ """For chunked prefill, a request is considered long if the prompt is
2032
+ longer than this number of tokens."""
2033
+
2034
+ num_lookahead_slots: int = 0
2035
+ """The number of slots to allocate per sequence per
2036
+ step, beyond the known token ids. This is used in speculative
2037
+ decoding to store KV activations of tokens which may or may not be
2038
+ accepted.
2039
+
2040
+ NOTE: This will be replaced by speculative config in the future; it is
2041
+ present to enable correctness tests until then."""
2042
+
2043
+ cuda_graph_sizes: list[int] = field(default_factory=lambda: [512])
2044
+ """Cuda graph capture sizes, default is 512.
2045
+ 1. if one value is provided, then the capture list would follow the
2046
+ pattern: [1, 2, 4] + [i for i in range(8, cuda_graph_sizes + 1, 8)]
2047
+ 2. more than one value (e.g. 1 2 128) is provided, then the capture list
2048
+ will follow the provided list."""
2049
+
2050
+ delay_factor: float = 0.0
2051
+ """Apply a delay (of delay factor multiplied by previous
2052
+ prompt latency) before scheduling next prompt."""
2053
+
2054
+ enable_chunked_prefill: SkipValidation[bool] = None # type: ignore
2055
+ """If True, prefill requests can be chunked based
2056
+ on the remaining max_num_batched_tokens."""
2057
+
2058
+ is_multimodal_model: bool = False
2059
+ """True if the model is multimodal."""
2060
+
2061
+ # TODO (ywang96): Make this configurable.
2062
+ max_num_encoder_input_tokens: int = field(init=False)
2063
+ """Multimodal encoder compute budget, only used in V1.
2064
+
2065
+ NOTE: This is not currently configurable. It will be overridden by
2066
+ max_num_batched_tokens in case max multimodal embedding size is larger."""
2067
+
2068
+ # TODO (ywang96): Make this configurable.
2069
+ encoder_cache_size: int = field(init=False)
2070
+ """Multimodal encoder cache size, only used in V1.
2071
+
2072
+ NOTE: This is not currently configurable. It will be overridden by
2073
+ max_num_batched_tokens in case max multimodal embedding size is larger."""
2074
+
2075
+ preemption_mode: Optional[PreemptionMode] = None
2076
+ """Whether to perform preemption by swapping or
2077
+ recomputation. If not specified, we determine the mode as follows:
2078
+ We use recomputation by default since it incurs lower overhead than
2079
+ swapping. However, when the sequence group has multiple sequences
2080
+ (e.g., beam search), recomputation is not currently supported. In
2081
+ such a case, we use swapping instead."""
2082
+
2083
+ num_scheduler_steps: int = 1
2084
+ """Maximum number of forward steps per scheduler call."""
2085
+
2086
+ multi_step_stream_outputs: bool = True
2087
+ """If False, then multi-step will stream outputs at the end of all steps"""
2088
+
2089
+ send_delta_data: bool = False
2090
+ """Private API. If used, scheduler sends delta data to
2091
+ workers instead of an entire data. It should be enabled only
2092
+ when SPMD worker architecture is enabled. I.e.,
2093
+ VLLM_USE_RAY_SPMD_WORKER=1"""
2094
+
2095
+ policy: SchedulerPolicy = "fcfs"
2096
+ """The scheduling policy to use:\n
2097
+ - "fcfs" means first come first served, i.e. requests are handled in order
2098
+ of arrival.\n
2099
+ - "priority" means requests are handled based on given priority (lower
2100
+ value means earlier handling) and time of arrival deciding any ties)."""
2101
+
2102
+ chunked_prefill_enabled: bool = field(init=False)
2103
+ """True if chunked prefill is enabled."""
2104
+
2105
+ disable_chunked_mm_input: bool = False
2106
+ """If set to true and chunked prefill is enabled, we do not want to
2107
+ partially schedule a multimodal item. Only used in V1
2108
+ This ensures that if a request has a mixed prompt
2109
+ (like text tokens TTTT followed by image tokens IIIIIIIIII) where only
2110
+ some image tokens can be scheduled (like TTTTIIIII, leaving IIIII),
2111
+ it will be scheduled as TTTT in one step and IIIIIIIIII in the next."""
2112
+
2113
+ # scheduler class or path. "vllm.core.scheduler.Scheduler" (default)
2114
+ # or "mod.custom_class".
2115
+ scheduler_cls: Union[str, type[object]] = "vllm.core.scheduler.Scheduler"
2116
+ """The scheduler class to use. "vllm.core.scheduler.Scheduler" is the
2117
+ default scheduler. Can be a class directly or the path to a class of form
2118
+ "mod.custom_class"."""
2119
+
2120
+ disable_hybrid_kv_cache_manager: bool = False
2121
+ """If set to True, KV cache manager will allocate the same size of KV cache
2122
+ for all attention layers even if there are multiple type of attention layers
2123
+ like full attention and sliding window attention.
2124
+ """
2125
+
2126
+ def compute_hash(self) -> str:
2127
+ """
2128
+ WARNING: Whenever a new field is added to this config,
2129
+ ensure that it is included in the factors list if
2130
+ it affects the computation graph.
2131
+
2132
+ Provide a hash that uniquely identifies all the configs
2133
+ that affect the structure of the computation
2134
+ graph from input ids/embeddings to the final hidden states,
2135
+ excluding anything before input ids/embeddings and after
2136
+ the final hidden states.
2137
+ """
2138
+ # no factors to consider.
2139
+ # this config will not affect the computation graph.
2140
+ factors: list[Any] = []
2141
+ hash_str = hashlib.md5(str(factors).encode(),
2142
+ usedforsecurity=False).hexdigest()
2143
+ return hash_str
2144
+
2145
+ def __post_init__(self) -> None:
2146
+ if self.max_model_len is None:
2147
+ self.max_model_len = 8192
2148
+
2149
+ if self.max_num_seqs is None:
2150
+ self.max_num_seqs = 128
2151
+
2152
+ if self.max_num_batched_tokens is None:
2153
+ if self.enable_chunked_prefill:
2154
+ if self.num_scheduler_steps > 1:
2155
+ # Multi-step Chunked-Prefill doesn't allow prompt-chunking
2156
+ # for now. Have max_num_batched_tokens set to max_model_len
2157
+ # so we don't reject sequences on account of a short
2158
+ # max_num_batched_tokens.
2159
+ self.max_num_batched_tokens = max(
2160
+ self.max_model_len, DEFAULT_MAX_NUM_BATCHED_TOKENS)
2161
+ else:
2162
+ self.max_num_batched_tokens = (
2163
+ DEFAULT_MAX_NUM_BATCHED_TOKENS)
2164
+ else:
2165
+ # If max_model_len is too short, use
2166
+ # DEFAULT_MAX_NUM_BATCHED_TOKENS as the default value
2167
+ # for higher throughput.
2168
+ self.max_num_batched_tokens = max(
2169
+ self.max_model_len, DEFAULT_MAX_NUM_BATCHED_TOKENS)
2170
+
2171
+ if self.runner_type == "pooling":
2172
+ # Choose specific value for higher throughput
2173
+ self.max_num_batched_tokens = max(
2174
+ self.max_num_batched_tokens,
2175
+ POOLING_MODEL_MAX_NUM_BATCHED_TOKENS,
2176
+ )
2177
+ if self.is_multimodal_model:
2178
+ # The value needs to be at least the number of multimodal tokens
2179
+ self.max_num_batched_tokens = max(
2180
+ self.max_num_batched_tokens,
2181
+ MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS,
2182
+ )
2183
+
2184
+ # When using default settings,
2185
+ # Ensure max_num_batched_tokens does not exceed model limit.
2186
+ # Some models (e.g., Whisper) have embeddings tied to max length.
2187
+ self.max_num_batched_tokens = min(
2188
+ self.max_num_seqs * self.max_model_len,
2189
+ self.max_num_batched_tokens)
2190
+
2191
+ self.max_num_encoder_input_tokens = self.max_num_batched_tokens
2192
+ self.encoder_cache_size = self.max_num_batched_tokens
2193
+
2194
+ if self.enable_chunked_prefill:
2195
+ logger.info(
2196
+ "Chunked prefill is enabled with max_num_batched_tokens=%d.",
2197
+ self.max_num_batched_tokens)
2198
+
2199
+ self.chunked_prefill_enabled = self.enable_chunked_prefill
2200
+ if self.max_num_partial_prefills > 1:
2201
+ if self.long_prefill_token_threshold == 0:
2202
+ self.long_prefill_token_threshold = int(self.max_model_len *
2203
+ 0.04)
2204
+
2205
+ logger.info(
2206
+ "Concurrent partial prefills enabled with "
2207
+ "max_num_partial_prefills=%d, max_long_partial_prefills=%d, "
2208
+ "long_prefill_token_threshold=%d",
2209
+ self.max_num_partial_prefills, self.max_long_partial_prefills,
2210
+ self.long_prefill_token_threshold)
2211
+
2212
+ self._verify_args()
2213
+
2214
+ def _verify_args(self) -> None:
2215
+ if (self.max_num_batched_tokens < self.max_model_len
2216
+ and not self.chunked_prefill_enabled):
2217
+ raise ValueError(
2218
+ f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
2219
+ f"smaller than max_model_len ({self.max_model_len}). "
2220
+ "This effectively limits the maximum sequence length to "
2221
+ "max_num_batched_tokens and makes vLLM reject longer "
2222
+ "sequences. Please increase max_num_batched_tokens or "
2223
+ "decrease max_model_len.")
2224
+
2225
+ if self.max_num_batched_tokens < self.max_num_seqs:
2226
+ raise ValueError(
2227
+ f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
2228
+ "be greater than or equal to max_num_seqs "
2229
+ f"({self.max_num_seqs}).")
2230
+
2231
+ if self.max_num_batched_tokens > self.max_num_seqs * self.max_model_len:
2232
+ logger.warning(
2233
+ "max_num_batched_tokens (%d) exceeds max_num_seqs"
2234
+ "* max_model_len (%d). This may lead to unexpected behavior.",
2235
+ self.max_num_batched_tokens,
2236
+ self.max_num_seqs * self.max_model_len)
2237
+
2238
+ if self.num_lookahead_slots < 0:
2239
+ raise ValueError(
2240
+ "num_lookahead_slots "
2241
+ f"({self.num_lookahead_slots}) must be greater than or "
2242
+ "equal to 0.")
2243
+
2244
+ if self.num_scheduler_steps < 1:
2245
+ raise ValueError(
2246
+ "num_scheduler_steps "
2247
+ f"({self.num_scheduler_steps}) must be greater than or "
2248
+ "equal to 1.")
2249
+
2250
+ if self.max_num_partial_prefills < 1:
2251
+ raise ValueError(
2252
+ f"max_num_partial_prefills ({self.max_num_partial_prefills}) "
2253
+ "must be greater than or equal to 1.")
2254
+ elif self.max_num_partial_prefills > 1:
2255
+ if not self.chunked_prefill_enabled:
2256
+ raise ValueError("Chunked prefill must be enabled to set "
2257
+ "max_num_partial_prefills > 1.")
2258
+
2259
+ if self.long_prefill_token_threshold > self.max_model_len:
2260
+ raise ValueError(
2261
+ "long_prefill_token_threshold "
2262
+ f"({self.long_prefill_token_threshold}) cannot be greater "
2263
+ f"than the max_model_len ({self.max_model_len}).")
2264
+
2265
+ if (self.max_long_partial_prefills
2266
+ < 1) or (self.max_long_partial_prefills
2267
+ > self.max_num_partial_prefills):
2268
+ raise ValueError(
2269
+ f"max_long_partial_prefills ({self.max_long_partial_prefills}) "
2270
+ "must be greater than or equal to 1 and less than or equal to "
2271
+ f"max_num_partial_prefills ({self.max_num_partial_prefills}).")
2272
+
2273
+ @property
2274
+ def is_multi_step(self) -> bool:
2275
+ return self.num_scheduler_steps > 1
2276
+
2277
+
2278
+ Device = Literal["auto", "cuda", "neuron", "cpu", "tpu", "xpu", "hpu"]
2279
+
2280
+
2281
+ @config
2282
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
2283
+ class DeviceConfig:
2284
+ """Configuration for the device to use for vLLM execution."""
2285
+
2286
+ device: SkipValidation[Union[Device, torch.device]] = "auto"
2287
+ """Device type for vLLM execution.
2288
+ This parameter is deprecated and will be
2289
+ removed in a future release.
2290
+ It will now be set automatically based
2291
+ on the current platform."""
2292
+ device_type: str = field(init=False)
2293
+ """Device type from the current platform. This is set in
2294
+ `__post_init__`."""
2295
+
2296
+ def compute_hash(self) -> str:
2297
+ """
2298
+ WARNING: Whenever a new field is added to this config,
2299
+ ensure that it is included in the factors list if
2300
+ it affects the computation graph.
2301
+
2302
+ Provide a hash that uniquely identifies all the configs
2303
+ that affect the structure of the computation
2304
+ graph from input ids/embeddings to the final hidden states,
2305
+ excluding anything before input ids/embeddings and after
2306
+ the final hidden states.
2307
+ """
2308
+ # no factors to consider.
2309
+ # the device/platform information will be summarized
2310
+ # by torch/vllm automatically.
2311
+ factors: list[Any] = []
2312
+ hash_str = hashlib.md5(str(factors).encode(),
2313
+ usedforsecurity=False).hexdigest()
2314
+ return hash_str
2315
+
2316
+ def __post_init__(self):
2317
+ if self.device == "auto":
2318
+ # Automated device type detection
2319
+ from vllm.platforms import current_platform
2320
+ self.device_type = current_platform.device_type
2321
+ if not self.device_type:
2322
+ raise RuntimeError(
2323
+ "Failed to infer device type, please set "
2324
+ "the environment variable `VLLM_LOGGING_LEVEL=DEBUG` "
2325
+ "to turn on verbose logging to help debug the issue.")
2326
+ else:
2327
+ # Device type is assigned explicitly
2328
+ self.device_type = self.device
2329
+
2330
+ # Some device types require processing inputs on CPU
2331
+ if self.device_type in ["neuron"]:
2332
+ self.device = torch.device("cpu")
2333
+ elif self.device_type in ["tpu"]:
2334
+ self.device = None
2335
+ else:
2336
+ # Set device with device type
2337
+ self.device = torch.device(self.device_type)
2338
+
2339
+
2340
+ SpeculativeMethod = Literal["ngram", "eagle", "eagle3", "medusa",
2341
+ "mlp_speculator", "draft_model", "deepseek_mtp"]
2342
+ SpeculativeAcceptanceMethod = Literal["rejection_sampler",
2343
+ "typical_acceptance_sampler"]
2344
+
2345
+
2346
+ @config
2347
+ @dataclass
2348
+ class SpeculativeConfig:
2349
+ """Configuration for speculative decoding."""
2350
+
2351
+ # General speculative decoding control
2352
+ num_speculative_tokens: SkipValidation[int] = None # type: ignore
2353
+ """The number of speculative tokens, if provided. It will default to the
2354
+ number in the draft model config if present, otherwise, it is required."""
2355
+ model: Optional[str] = None
2356
+ """The name of the draft model, eagle head, or additional weights, if
2357
+ provided."""
2358
+ method: Optional[SpeculativeMethod] = None
2359
+ """The name of the speculative method to use. If users provide and set the
2360
+ `model` param, the speculative method type will be detected automatically
2361
+ if possible, if `model` param is not provided, the method name must be
2362
+ provided.
2363
+
2364
+ If using `ngram` method, the related configuration `prompt_lookup_max` and
2365
+ `prompt_lookup_min` should be considered."""
2366
+ acceptance_method: SpeculativeAcceptanceMethod = "rejection_sampler"
2367
+ """The method to use for accepting draft tokens:\n
2368
+ - "rejection_sampler" maps to `RejectionSampler`.\n
2369
+ - "typical_acceptance_sampler" maps to `TypicalAcceptanceSampler`.
2370
+
2371
+ If using `typical_acceptance_sampler`, the related configuration
2372
+ `posterior_threshold` and `posterior_alpha` should be considered."""
2373
+ draft_tensor_parallel_size: Optional[int] = None
2374
+ """The degree of the tensor parallelism for the draft model. Can only be 1
2375
+ or the same as the target model's tensor parallel size."""
2376
+ disable_logprobs: bool = True
2377
+ """If set to True, token log probabilities are not returned during
2378
+ speculative decoding. If set to False, token log probabilities are returned
2379
+ according to the log probability settings in SamplingParams."""
2380
+
2381
+ # Draft model configuration
2382
+ quantization: Optional[QuantizationMethods] = None
2383
+ """Quantization method that was used to quantize the draft model weights.
2384
+ If `None`, we assume the model weights are not quantized. Note that it only
2385
+ takes effect when using the draft model-based speculative method."""
2386
+ max_model_len: Optional[int] = None
2387
+ """The maximum model length of the draft model. Used when testing the
2388
+ ability to skip speculation for some sequences."""
2389
+ revision: Optional[str] = None
2390
+ """The specific model version to use for the draft model. It can be a
2391
+ branch name, a tag name, or a commit id. If unspecified, will use the
2392
+ default version."""
2393
+ code_revision: Optional[str] = None
2394
+ """The specific revision to use for the draft model code on Hugging Face
2395
+ Hub. It can be a branch name, a tag name, or a commit id. If unspecified,
2396
+ will use the default version."""
2397
+
2398
+ # Advanced control
2399
+ disable_mqa_scorer: bool = False
2400
+ """Disable the MQA scorer and fall back to batch expansion for scoring
2401
+ proposals."""
2402
+ disable_by_batch_size: Optional[int] = None
2403
+ """Disable speculative decoding for new incoming requests when the number
2404
+ of enqueued requests is larger than this value, if provided."""
2405
+
2406
+ # Ngram proposer configuration
2407
+ prompt_lookup_max: Optional[int] = None
2408
+ """Maximum size of ngram token window when using Ngram proposer, required
2409
+ when method is set to ngram."""
2410
+ prompt_lookup_min: Optional[int] = None
2411
+ """Minimum size of ngram token window when using Ngram proposer, if
2412
+ provided. Defaults to 1."""
2413
+
2414
+ # Typical acceptance sampler configuration
2415
+ posterior_threshold: Optional[float] = None
2416
+ """A threshold value that sets a lower bound on the posterior probability
2417
+ of a token in the target model for it to be accepted. This threshold is
2418
+ used only when we use the `TypicalAcceptanceSampler` for token acceptance.
2419
+ """
2420
+ posterior_alpha: Optional[float] = None
2421
+ """Scaling factor for entropy-based threshold, applied when using
2422
+ `TypicalAcceptanceSampler`."""
2423
+
2424
+ speculative_token_tree: Optional[str] = None
2425
+ """Specifies the tree structure for speculative token generation.
2426
+ """
2427
+ # required configuration params passed from engine
2428
+ target_model_config: SkipValidation[ModelConfig] = None # type: ignore
2429
+ """The configuration of the target model."""
2430
+ target_parallel_config: SkipValidation[
2431
+ ParallelConfig] = None # type: ignore
2432
+ """The parallel configuration for the target model."""
2433
+ enable_chunked_prefill: SkipValidation[bool] = None # type: ignore
2434
+ """Whether vLLM is configured to use chunked prefill or not. Used for
2435
+ raising an error since it's not yet compatible with speculative decode."""
2436
+ disable_log_stats: SkipValidation[bool] = None # type: ignore
2437
+ """Whether to disable the periodic printing of stage times in speculative
2438
+ decoding."""
2439
+
2440
+ # params generated in the post-init stage
2441
+ draft_model_config: SkipValidation[ModelConfig] = None # type: ignore
2442
+ """The configuration of the draft model initialized internal."""
2443
+ draft_parallel_config: SkipValidation[
2444
+ ParallelConfig] = None # type: ignore
2445
+ """The parallel configuration for the draft model initialized internal."""
2446
+
2447
+ def compute_hash(self) -> str:
2448
+ """
2449
+ WARNING: Whenever a new field is added to this config,
2450
+ ensure that it is included in the factors list if
2451
+ it affects the computation graph.
2452
+
2453
+ Provide a hash that uniquely identifies all the configs
2454
+ that affect the structure of the computation
2455
+ graph from input ids/embeddings to the final hidden states,
2456
+ excluding anything before input ids/embeddings and after
2457
+ the final hidden states.
2458
+ """
2459
+ factors: list[Any] = []
2460
+ # Eagle3 affects the computation graph because it returns intermediate
2461
+ # hidden states in addition to the final hidden state.
2462
+ factors.append(self.method == "eagle3")
2463
+ hash_str = hashlib.md5(str(factors).encode(),
2464
+ usedforsecurity=False).hexdigest()
2465
+ return hash_str
2466
+
2467
+ @classmethod
2468
+ def from_dict(cls, dict_value: dict) -> "SpeculativeConfig":
2469
+ """Parse the CLI value for the speculative config."""
2470
+ return cls(**dict_value)
2471
+
2472
+ @staticmethod
2473
+ def hf_config_override(hf_config: PretrainedConfig) -> PretrainedConfig:
2474
+ if hf_config.model_type == "deepseek_v3":
2475
+ hf_config.model_type = "deepseek_mtp"
2476
+ if hf_config.model_type == "deepseek_mtp":
2477
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
2478
+ hf_config.update({
2479
+ "n_predict": n_predict,
2480
+ "architectures": ["DeepSeekMTPModel"]
2481
+ })
2482
+
2483
+ if hf_config.architectures[0] == "MiMoForCausalLM":
2484
+ hf_config.model_type = "mimo_mtp"
2485
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
2486
+ hf_config.update({
2487
+ "num_hidden_layers": 0,
2488
+ "n_predict": n_predict,
2489
+ "architectures": ["MiMoMTPModel"]
2490
+ })
2491
+ return hf_config
2492
+
2493
+ return hf_config
2494
+
2495
+ def __post_init__(self):
2496
+
2497
+ # Note: "method" is a new parameter that helps to extend the
2498
+ # configuration of non-model-based proposers, and the "model" parameter
2499
+ # will be used to set the draft model, eagle head, or additional weight
2500
+ # when needed. If users do not specify "method", the speculative method
2501
+ # will be detected automatically if possible. If the speculative method
2502
+ # can not be detected, it will be considered as the "draft_model" by
2503
+ # default.
2504
+
2505
+ if self.model is None and self.num_speculative_tokens is not None:
2506
+ # TODO(Shangming): Refactor mtp configuration logic when supporting
2507
+ # mtp acceleration for more models besides deepseek_v3
2508
+ if self.target_model_config and \
2509
+ (self.target_model_config.hf_text_config.model_type \
2510
+ == "deepseek_v3" or
2511
+ self.target_model_config.hf_text_config.model_type \
2512
+ == "mimo"):
2513
+ # use the draft model from the same model:
2514
+ self.model = self.target_model_config.model
2515
+ elif self.method in ("ngram", "[ngram]"):
2516
+ self.model = "ngram"
2517
+ else:
2518
+ raise ValueError("num_speculative_tokens was provided without "
2519
+ "speculative model.")
2520
+
2521
+ # Automatically configure the method for ngram when "model" is used
2522
+ # instead of "method"
2523
+ if self.method is None and (self.model is not None
2524
+ and self.model in ("ngram", "[ngram]")):
2525
+ self.method = "ngram"
2526
+
2527
+ if self.method in ("ngram", "[ngram]"):
2528
+ # Unified to "ngram" internally
2529
+ self.method = "ngram"
2530
+ # Set default values if not provided
2531
+ if (self.prompt_lookup_min is None
2532
+ and self.prompt_lookup_max is None):
2533
+ # TODO(woosuk): Tune these values. They are arbitrarily chosen.
2534
+ self.prompt_lookup_min = 5
2535
+ self.prompt_lookup_max = 5
2536
+ elif self.prompt_lookup_min is None:
2537
+ assert self.prompt_lookup_max is not None
2538
+ self.prompt_lookup_min = self.prompt_lookup_max
2539
+ elif self.prompt_lookup_max is None:
2540
+ assert self.prompt_lookup_min is not None
2541
+ self.prompt_lookup_max = self.prompt_lookup_min
2542
+
2543
+ # Validate values
2544
+ if self.prompt_lookup_min < 1:
2545
+ raise ValueError(
2546
+ f"prompt_lookup_min={self.prompt_lookup_min} must be > 0")
2547
+ if self.prompt_lookup_max < 1:
2548
+ raise ValueError(
2549
+ f"prompt_lookup_max={self.prompt_lookup_max} must be > 0")
2550
+ if self.prompt_lookup_min > self.prompt_lookup_max:
2551
+ raise ValueError(
2552
+ f"prompt_lookup_min={self.prompt_lookup_min} must "
2553
+ f"be <= prompt_lookup_max={self.prompt_lookup_max}")
2554
+
2555
+ # TODO: current we still need extract vocab_size from target model
2556
+ # config, in future, we may try refactor it out, and set
2557
+ # draft related config as None here.
2558
+ self.draft_model_config = self.target_model_config
2559
+ self.draft_parallel_config = self.target_parallel_config
2560
+ else:
2561
+ self.prompt_lookup_max = 0
2562
+ self.prompt_lookup_min = 0
2563
+
2564
+ if self.model is not None:
2565
+ self.draft_model_config = ModelConfig(
2566
+ model=self.model,
2567
+ task="draft",
2568
+ tokenizer=self.target_model_config.tokenizer,
2569
+ tokenizer_mode=self.target_model_config.tokenizer_mode,
2570
+ trust_remote_code=self.target_model_config.
2571
+ trust_remote_code,
2572
+ allowed_local_media_path=self.target_model_config.
2573
+ allowed_local_media_path,
2574
+ dtype=self.target_model_config.dtype,
2575
+ seed=self.target_model_config.seed,
2576
+ revision=self.revision,
2577
+ code_revision=self.code_revision,
2578
+ tokenizer_revision=self.target_model_config.
2579
+ tokenizer_revision,
2580
+ spec_target_max_model_len=self.target_model_config.
2581
+ max_model_len,
2582
+ quantization=self.quantization,
2583
+ enforce_eager=self.target_model_config.enforce_eager,
2584
+ max_seq_len_to_capture=self.target_model_config.
2585
+ max_seq_len_to_capture,
2586
+ max_logprobs=self.target_model_config.max_logprobs,
2587
+ hf_overrides=SpeculativeConfig.hf_config_override,
2588
+ )
2589
+
2590
+ # Automatically detect the method
2591
+ if self.method in ('eagle', 'eagle3'):
2592
+ pass
2593
+ elif "eagle-" in self.draft_model_config.model.lower() or \
2594
+ "eagle3-" in self.draft_model_config.model.lower():
2595
+ self.method = "eagle"
2596
+ elif self.draft_model_config.hf_config.model_type == "medusa":
2597
+ self.method = "medusa"
2598
+ elif (self.draft_model_config.hf_config.model_type ==
2599
+ "mlp_speculator"):
2600
+ self.method = "mlp_speculator"
2601
+ elif (self.draft_model_config.hf_config.model_type ==
2602
+ "deepseek_mtp"):
2603
+ self.method = "deepseek_mtp"
2604
+ if self.num_speculative_tokens > 1:
2605
+ logger.warning(
2606
+ "All Deepseek MTP models only have " \
2607
+ "one layer. Might need some code changes " \
2608
+ "to support multiple layers."
2609
+ )
2610
+ else:
2611
+ self.method = "draft_model"
2612
+
2613
+ # Replace hf_config for EAGLE draft_model
2614
+ if self.method in ("eagle", "eagle3"):
2615
+ if self.enable_chunked_prefill and not envs.VLLM_USE_V1:
2616
+ raise ValueError(
2617
+ "Chunked prefill and EAGLE are not compatible "
2618
+ "when using V0.")
2619
+
2620
+ from vllm.transformers_utils.configs.eagle import (
2621
+ EAGLEConfig)
2622
+ if isinstance(self.draft_model_config.hf_config,
2623
+ EAGLEConfig):
2624
+ pass
2625
+ else:
2626
+ eagle_config = EAGLEConfig(
2627
+ self.draft_model_config.hf_config,
2628
+ method=self.method,
2629
+ model_type="eagle")
2630
+ self.draft_model_config.hf_config = eagle_config
2631
+
2632
+ if (self.num_speculative_tokens is not None
2633
+ and hasattr(self.draft_model_config.hf_config,
2634
+ "num_lookahead_tokens")):
2635
+ self.draft_model_config.hf_config.num_lookahead_tokens = \
2636
+ self.num_speculative_tokens
2637
+
2638
+ n_predict = getattr(self.draft_model_config.hf_config,
2639
+ "n_predict", None)
2640
+ if n_predict is not None:
2641
+ if self.num_speculative_tokens is None:
2642
+ # Default to max value defined in draft model config.
2643
+ self.num_speculative_tokens = n_predict
2644
+ elif self.num_speculative_tokens > n_predict and \
2645
+ self.num_speculative_tokens % n_predict != 0:
2646
+ # Ensure divisibility for MTP module reuse.
2647
+ raise ValueError(
2648
+ f"num_speculative_tokens:{self.num_speculative_tokens}"
2649
+ f" must be divisible by {n_predict=}")
2650
+
2651
+ self.draft_tensor_parallel_size = \
2652
+ SpeculativeConfig._verify_and_get_draft_tp(
2653
+ self.target_parallel_config,
2654
+ self.draft_tensor_parallel_size,
2655
+ self.draft_model_config.hf_config
2656
+ )
2657
+
2658
+ self.draft_model_config.max_model_len = (
2659
+ SpeculativeConfig._maybe_override_draft_max_model_len(
2660
+ self.max_model_len,
2661
+ self.draft_model_config.max_model_len,
2662
+ self.target_model_config.max_model_len,
2663
+ ))
2664
+
2665
+ self.draft_parallel_config = (
2666
+ SpeculativeConfig.create_draft_parallel_config(
2667
+ self.target_parallel_config,
2668
+ self.draft_tensor_parallel_size))
2669
+
2670
+ if self.acceptance_method == "typical_acceptance_sampler":
2671
+ if self.posterior_threshold is None:
2672
+ self.posterior_threshold = 0.09
2673
+ if self.posterior_alpha is None:
2674
+ self.posterior_alpha = 0.3
2675
+
2676
+ self._verify_args()
2677
+
2678
+ @staticmethod
2679
+ def _maybe_override_draft_max_model_len(
2680
+ speculative_max_model_len: Optional[int],
2681
+ draft_max_model_len: int,
2682
+ target_max_model_len: int,
2683
+ ) -> int:
2684
+ """Determine the max sequence len for the draft model. This is usually
2685
+ the draft_max_model_len, but may be the target_max_model_len if it is
2686
+ less than the draft_max_model_len, or may be speculative_max_model_len
2687
+ if it is specified.
2688
+
2689
+ This is necessary so that sequences do not exceed the capacity of the
2690
+ draft model or the target model.
2691
+
2692
+ speculative_max_model_len is mainly used for testing that sequences can
2693
+ skip speculation.
2694
+ """
2695
+
2696
+ if speculative_max_model_len is not None:
2697
+
2698
+ if speculative_max_model_len > draft_max_model_len:
2699
+ raise ValueError(f"{speculative_max_model_len=} cannot be "
2700
+ f"larger than {draft_max_model_len=}")
2701
+
2702
+ if speculative_max_model_len > target_max_model_len:
2703
+ raise ValueError(f"{speculative_max_model_len=} cannot be "
2704
+ f"larger than {target_max_model_len=}")
2705
+
2706
+ return speculative_max_model_len
2707
+
2708
+ return min(
2709
+ draft_max_model_len,
2710
+ target_max_model_len,
2711
+ )
2712
+
2713
+ @staticmethod
2714
+ def _verify_and_get_draft_tp(
2715
+ target_parallel_config: ParallelConfig,
2716
+ speculative_draft_tensor_parallel_size: Optional[int],
2717
+ draft_hf_config: PretrainedConfig) -> int:
2718
+ """
2719
+ Verifies and adjusts the tensor parallel size for a draft model
2720
+ specified using speculative_draft_tensor_parallel_size.
2721
+ """
2722
+ # If speculative_draft_tensor_parallel_size is unset then set it
2723
+ # appropriately else verify that it is set correctly.
2724
+ if speculative_draft_tensor_parallel_size is None:
2725
+ if draft_hf_config.model_type == "mlp_speculator":
2726
+ speculative_draft_tensor_parallel_size = 1
2727
+ if target_parallel_config.tensor_parallel_size > 1:
2728
+ logger.warning(
2729
+ "%s cannot currently be run with tp>1; "
2730
+ "setting speculative_draft_tensor_parallel_size=1",
2731
+ draft_hf_config.model_type)
2732
+ else:
2733
+ speculative_draft_tensor_parallel_size = \
2734
+ target_parallel_config.tensor_parallel_size
2735
+ elif speculative_draft_tensor_parallel_size not in (
2736
+ 1, target_parallel_config.tensor_parallel_size):
2737
+ raise ValueError(
2738
+ f"{speculative_draft_tensor_parallel_size=} cannot be "
2739
+ f"other value than 1 or target model tensor_parallel_size")
2740
+ return speculative_draft_tensor_parallel_size
2741
+
2742
+ @staticmethod
2743
+ def create_draft_parallel_config(
2744
+ target_parallel_config: ParallelConfig,
2745
+ speculative_draft_tensor_parallel_size: int,
2746
+ ) -> ParallelConfig:
2747
+ """Create a parallel config for use by the draft worker.
2748
+
2749
+ This is mostly a copy of the target parallel config, except the tp_size.
2750
+ """
2751
+ draft_parallel_config = ParallelConfig(
2752
+ pipeline_parallel_size=target_parallel_config.
2753
+ pipeline_parallel_size,
2754
+ tensor_parallel_size=speculative_draft_tensor_parallel_size,
2755
+ distributed_executor_backend=target_parallel_config.
2756
+ distributed_executor_backend,
2757
+ max_parallel_loading_workers=target_parallel_config.
2758
+ max_parallel_loading_workers,
2759
+ disable_custom_all_reduce=target_parallel_config.
2760
+ disable_custom_all_reduce,
2761
+ ray_workers_use_nsight=target_parallel_config.
2762
+ ray_workers_use_nsight,
2763
+ placement_group=target_parallel_config.placement_group,
2764
+ )
2765
+
2766
+ return draft_parallel_config
2767
+
2768
+ def _verify_args(self) -> None:
2769
+ if self.num_speculative_tokens is None:
2770
+ raise ValueError(
2771
+ "num_speculative_tokens must be provided with "
2772
+ "speculative model unless the draft model config contains an "
2773
+ "n_predict parameter.")
2774
+
2775
+ if self.num_speculative_tokens <= 0:
2776
+ raise ValueError("Expected num_speculative_tokens to be greater "
2777
+ f"than zero ({self.num_speculative_tokens}).")
2778
+
2779
+ if self.draft_model_config:
2780
+ self.draft_model_config.verify_with_parallel_config(
2781
+ self.draft_parallel_config)
2782
+ # Validate and set draft token acceptance related settings.
2783
+
2784
+ if self.acceptance_method is None:
2785
+ raise ValueError("acceptance_method is not set. "
2786
+ "Expected values are rejection_sampler or "
2787
+ "typical_acceptance_sampler.")
2788
+
2789
+ if (self.acceptance_method != 'rejection_sampler'
2790
+ and self.acceptance_method != 'typical_acceptance_sampler'):
2791
+ raise ValueError(
2792
+ "Expected acceptance_method to be either "
2793
+ "rejection_sampler or typical_acceptance_sampler. Instead it "
2794
+ f"is {self.acceptance_method}")
2795
+
2796
+ if self.acceptance_method == "typical_acceptance_sampler" and (
2797
+ (self.posterior_threshold is not None
2798
+ and self.posterior_threshold < 0) or
2799
+ (self.posterior_alpha is not None and self.posterior_alpha < 0)):
2800
+ raise ValueError(
2801
+ "Expected the posterior_threshold and posterior_alpha of "
2802
+ "typical_acceptance_sampler to be > 0. "
2803
+ "Instead found posterior_threshold = "
2804
+ f"{self.posterior_threshold} and posterior_alpha = "
2805
+ f"{self.posterior_alpha}")
2806
+
2807
+ if (self.disable_by_batch_size is not None
2808
+ and self.disable_by_batch_size < 2):
2809
+ raise ValueError("Expect the batch size threshold of disabling "
2810
+ "speculative decoding is > 1, but got "
2811
+ f"{self.disable_by_batch_size=}")
2812
+
2813
+ if self.method == "eagle3" and self.target_model_config and \
2814
+ "llama" not in self.target_model_config.hf_text_config.model_type:
2815
+ raise ValueError(
2816
+ "Eagle3 is only supported for Llama models. "
2817
+ f"Got {self.target_model_config.hf_text_config.model_type=}")
2818
+
2819
+ @property
2820
+ def num_lookahead_slots(self) -> int:
2821
+ """The number of additional slots the scheduler should allocate per
2822
+ step, in addition to the slots allocated for each known token.
2823
+
2824
+ This is equal to the number of speculative tokens, as each speculative
2825
+ token must be scored.
2826
+ """
2827
+ return self.num_speculative_tokens
2828
+
2829
+ def use_eagle(self) -> bool:
2830
+ return self.method in ("eagle", "eagle3", "deepseek_mtp")
2831
+
2832
+ def __repr__(self) -> str:
2833
+ method = self.method
2834
+ model = None if method == "ngram" else self.draft_model_config.model
2835
+ num_spec_tokens = self.num_speculative_tokens
2836
+ return f"SpeculativeConfig({method=}, {model=}, {num_spec_tokens=})"
2837
+
2838
+
2839
+ LoRADType = Literal["auto", "float16", "bfloat16"]
2840
+
2841
+
2842
+ @config
2843
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
2844
+ class LoRAConfig:
2845
+ """Configuration for LoRA."""
2846
+
2847
+ max_lora_rank: int = 16
2848
+ """Max LoRA rank."""
2849
+ max_loras: int = 1
2850
+ """Max number of LoRAs in a single batch."""
2851
+ fully_sharded_loras: bool = False
2852
+ """By default, only half of the LoRA computation is sharded with tensor
2853
+ parallelism. Enabling this will use the fully sharded layers. At high
2854
+ sequence length, max rank or tensor parallel size, this is likely faster.
2855
+ """
2856
+ max_cpu_loras: Optional[int] = None
2857
+ """Maximum number of LoRAs to store in CPU memory. Must be >= than
2858
+ `max_loras`."""
2859
+ lora_dtype: Union[torch.dtype, LoRADType] = "auto"
2860
+ """Data type for LoRA. If auto, will default to base model dtype."""
2861
+ lora_extra_vocab_size: int = 256
2862
+ """Maximum size of extra vocabulary that can be present in a LoRA adapter
2863
+ (added to the base model vocabulary)."""
2864
+ lora_vocab_padding_size: ClassVar[int] = current_platform\
2865
+ .get_lora_vocab_padding_size()
2866
+ long_lora_scaling_factors: Optional[tuple[float, ...]] = None
2867
+ """Specify multiple scaling factors (which can be different from base model
2868
+ scaling factor - see eg. Long LoRA) to allow for multiple LoRA adapters
2869
+ trained with those scaling factors to be used at the same time. If not
2870
+ specified, only adapters trained with the base model scaling factor are
2871
+ allowed."""
2872
+ bias_enabled: bool = False
2873
+ """Enable bias for LoRA adapters."""
2874
+
2875
+ def compute_hash(self) -> str:
2876
+ """
2877
+ WARNING: Whenever a new field is added to this config,
2878
+ ensure that it is included in the factors list if
2879
+ it affects the computation graph.
2880
+
2881
+ Provide a hash that uniquely identifies all the configs
2882
+ that affect the structure of the computation
2883
+ graph from input ids/embeddings to the final hidden states,
2884
+ excluding anything before input ids/embeddings and after
2885
+ the final hidden states.
2886
+ """
2887
+ factors: list[Any] = []
2888
+ factors.append(self.max_lora_rank)
2889
+ factors.append(self.max_loras)
2890
+ factors.append(self.fully_sharded_loras)
2891
+ factors.append(self.lora_dtype)
2892
+ factors.append(self.lora_extra_vocab_size)
2893
+ factors.append(self.lora_vocab_padding_size)
2894
+ factors.append(self.long_lora_scaling_factors)
2895
+ factors.append(self.bias_enabled)
2896
+ hash_str = hashlib.md5(str(factors).encode(),
2897
+ usedforsecurity=False).hexdigest()
2898
+ return hash_str
2899
+
2900
+ def __post_init__(self):
2901
+ # Setting the maximum rank to 512 should be able to satisfy the vast
2902
+ # majority of applications.
2903
+ possible_max_ranks = (8, 16, 32, 64, 128, 256, 320, 512)
2904
+ possible_lora_extra_vocab_size = (256, 512)
2905
+ if self.max_lora_rank not in possible_max_ranks:
2906
+ raise ValueError(
2907
+ f"max_lora_rank ({self.max_lora_rank}) must be one of "
2908
+ f"{possible_max_ranks}.")
2909
+ if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size:
2910
+ raise ValueError(
2911
+ f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) "
2912
+ f"must be one of {possible_lora_extra_vocab_size}.")
2913
+ if self.max_loras < 1:
2914
+ raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.")
2915
+ if self.max_cpu_loras is None:
2916
+ self.max_cpu_loras = self.max_loras
2917
+ elif self.max_cpu_loras < self.max_loras:
2918
+ raise ValueError(
2919
+ f"max_cpu_loras ({self.max_cpu_loras}) must be >= "
2920
+ f"max_loras ({self.max_loras})")
2921
+
2922
+ def verify_with_cache_config(self, cache_config: CacheConfig):
2923
+ if cache_config.cpu_offload_gb > 0 and not envs.VLLM_USE_V1:
2924
+ raise ValueError(
2925
+ "V0 LoRA does not support CPU offload, please use V1.")
2926
+
2927
+ def verify_with_model_config(self, model_config: ModelConfig):
2928
+ if self.lora_dtype in (None, "auto"):
2929
+ self.lora_dtype = model_config.dtype
2930
+ elif isinstance(self.lora_dtype, str):
2931
+ self.lora_dtype = getattr(torch, self.lora_dtype)
2932
+
2933
+ def verify_lora_support(self):
2934
+ if self.long_lora_scaling_factors is not None and envs.VLLM_USE_V1:
2935
+ raise ValueError(
2936
+ "V1 LoRA does not support long LoRA, please use V0.")
2937
+
2938
+
2939
+ @config
2940
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
2941
+ class PromptAdapterConfig:
2942
+ """Configuration for PromptAdapters."""
2943
+
2944
+ max_prompt_adapters: int = 1
2945
+ """Max number of PromptAdapters in a batch."""
2946
+ max_prompt_adapter_token: int = 0
2947
+ """Max number of PromptAdapters tokens."""
2948
+ max_cpu_prompt_adapters: Optional[int] = None
2949
+ """Maximum number of PromptAdapters to store in CPU memory. Must be >= than
2950
+ `max_prompt_adapters`."""
2951
+ prompt_adapter_dtype: Union[torch.dtype, str] = "auto"
2952
+ """Data type for PromptAdapter. If auto, will default to base model dtype.
2953
+ """
2954
+
2955
+ def compute_hash(self) -> str:
2956
+ """
2957
+ WARNING: Whenever a new field is added to this config,
2958
+ ensure that it is included in the factors list if
2959
+ it affects the computation graph.
2960
+
2961
+ Provide a hash that uniquely identifies all the configs
2962
+ that affect the structure of the computation
2963
+ graph from input ids/embeddings to the final hidden states,
2964
+ excluding anything before input ids/embeddings and after
2965
+ the final hidden states.
2966
+ """
2967
+ # no factors to consider.
2968
+ # this config will not affect the computation graph.
2969
+ factors: list[Any] = []
2970
+ hash_str = hashlib.md5(str(factors).encode(),
2971
+ usedforsecurity=False).hexdigest()
2972
+ return hash_str
2973
+
2974
+ def __post_init__(self):
2975
+
2976
+ if self.max_prompt_adapters < 1:
2977
+ raise ValueError(f"max_prompt_adapters "
2978
+ f"({self.max_prompt_adapters}) must be >= 1.")
2979
+ if self.max_prompt_adapter_token == 0:
2980
+ raise ValueError("max_prompt_adapter_token must be set.")
2981
+ if self.max_cpu_prompt_adapters is None:
2982
+ self.max_cpu_prompt_adapters = self.max_prompt_adapters
2983
+
2984
+ def verify_with_model_config(self, model_config: ModelConfig):
2985
+ if self.prompt_adapter_dtype == "auto":
2986
+ self.prompt_adapter_dtype = model_config.dtype
2987
+ elif isinstance(self.prompt_adapter_dtype, str):
2988
+ self.prompt_adapter_dtype = getattr(torch,
2989
+ self.prompt_adapter_dtype)
2990
+
2991
+
2992
+ @config
2993
+ @dataclass
2994
+ class MultiModalConfig:
2995
+ """Controls the behavior of multimodal models."""
2996
+
2997
+ limit_per_prompt: dict[str, int] = \
2998
+ cast(dict[str, int], get_field(ModelConfig, "limit_mm_per_prompt"))
2999
+ """
3000
+ The maximum number of input items allowed per prompt for each modality.
3001
+ Defaults to 1 (V0) or 999 (V1) for each modality.
3002
+
3003
+ For example, to allow up to 16 images and 2 videos per prompt:
3004
+ `{"images": 16, "videos": 2}`
3005
+ """
3006
+
3007
+ mm_processor_kwargs: Optional[dict[str, object]] = None
3008
+ """
3009
+ Overrides for the multi-modal processor obtained from
3010
+ `transformers.AutoProcessor.from_pretrained`.
3011
+
3012
+ The available overrides depend on the model that is being run.
3013
+
3014
+ For example, for Phi-3-Vision:
3015
+ `{"num_crops": 4}`.
3016
+ """
3017
+
3018
+ disable_mm_preprocessor_cache: bool = False
3019
+ """
3020
+ If `True`, disable caching of the processed multi-modal inputs.
3021
+ """
3022
+
3023
+ def compute_hash(self) -> str:
3024
+ """
3025
+ WARNING: Whenever a new field is added to this config,
3026
+ ensure that it is included in the factors list if
3027
+ it affects the computation graph.
3028
+
3029
+ Provide a hash that uniquely identifies all the configs
3030
+ that affect the structure of the computation
3031
+ graph from input ids/embeddings to the final hidden states,
3032
+ excluding anything before input ids/embeddings and after
3033
+ the final hidden states.
3034
+ """
3035
+ # no factors to consider.
3036
+ # this config will not affect the computation graph.
3037
+ factors: list[Any] = []
3038
+ hash_str = hashlib.md5(str(factors).encode(),
3039
+ usedforsecurity=False).hexdigest()
3040
+ return hash_str
3041
+
3042
+ def get_limit_per_prompt(self, modality: str) -> int:
3043
+ """
3044
+ Get the maximum number of input items allowed per prompt
3045
+ for the given modality.
3046
+ """
3047
+ return self.limit_per_prompt.get(
3048
+ modality,
3049
+ 999 if envs.VLLM_USE_V1 else 1,
3050
+ )
3051
+
3052
+ # TODO: Add configs to init vision tower or not.
3053
+
3054
+
3055
+ @config
3056
+ @dataclass
3057
+ class PoolerConfig:
3058
+ """Controls the behavior of output pooling in pooling models."""
3059
+
3060
+ pooling_type: Optional[str] = None
3061
+ """
3062
+ The pooling method of the pooling model. This should be a key in
3063
+ [`vllm.model_executor.layers.pooler.PoolingType`][].
3064
+ """
3065
+
3066
+ normalize: Optional[bool] = None
3067
+ """
3068
+ Whether to normalize the pooled outputs. Usually, this should be set to
3069
+ ``True`` for embedding outputs.
3070
+ """
3071
+
3072
+ softmax: Optional[bool] = None
3073
+ """
3074
+ Whether to apply softmax to the pooled outputs. Usually, this should be set
3075
+ to ``True`` for classification outputs.
3076
+ """
3077
+
3078
+ step_tag_id: Optional[int] = None
3079
+ """
3080
+ If set, only the score corresponding to the ``step_tag_id`` in the
3081
+ generated sentence should be returned. Otherwise, the scores for all tokens
3082
+ are returned.
3083
+ """
3084
+
3085
+ returned_token_ids: Optional[list[int]] = None
3086
+ """
3087
+ A list of indices for the vocabulary dimensions to be extracted,
3088
+ such as the token IDs of ``good_token`` and ``bad_token`` in the
3089
+ ``math-shepherd-mistral-7b-prm`` model.
3090
+ """
3091
+
3092
+ def compute_hash(self) -> str:
3093
+ """
3094
+ WARNING: Whenever a new field is added to this config,
3095
+ ensure that it is included in the factors list if
3096
+ it affects the computation graph.
3097
+
3098
+ Provide a hash that uniquely identifies all the configs
3099
+ that affect the structure of the computation
3100
+ graph from input ids/embeddings to the final hidden states,
3101
+ excluding anything before input ids/embeddings and after
3102
+ the final hidden states.
3103
+ """
3104
+ # no factors to consider.
3105
+ # this config will not affect the computation graph.
3106
+ factors: list[Any] = []
3107
+ hash_str = hashlib.md5(str(factors).encode(),
3108
+ usedforsecurity=False).hexdigest()
3109
+ return hash_str
3110
+
3111
+
3112
+ _STR_DTYPE_TO_TORCH_DTYPE = {
3113
+ "half": torch.float16,
3114
+ "float16": torch.float16,
3115
+ "float": torch.float32,
3116
+ "float32": torch.float32,
3117
+ "bfloat16": torch.bfloat16,
3118
+ }
3119
+
3120
+ # model_type -> reason
3121
+ _FLOAT16_NOT_SUPPORTED_MODELS = {
3122
+ "gemma2": "Numerical instability. Please use bfloat16 or float32 instead.",
3123
+ "gemma3": "Numerical instability. Please use bfloat16 or float32 instead.",
3124
+ "plamo2": "Numerical instability. Please use bfloat16 or float32 instead.",
3125
+ "glm4": "Numerical instability. Please use bfloat16 or float32 instead.",
3126
+ }
3127
+
3128
+
3129
+ def _is_valid_dtype(model_type: str, dtype: torch.dtype):
3130
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16: # noqa: E501, SIM103
3131
+ return False
3132
+
3133
+ return True
3134
+
3135
+
3136
+ def _check_valid_dtype(model_type: str, dtype: torch.dtype):
3137
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16:
3138
+ reason = _FLOAT16_NOT_SUPPORTED_MODELS[model_type]
3139
+ raise ValueError(f"The model type {model_type!r} "
3140
+ f"does not support float16. Reason: {reason}")
3141
+
3142
+ return True
3143
+
3144
+
3145
+ def _find_dtype(
3146
+ model_id: str,
3147
+ config: PretrainedConfig,
3148
+ *,
3149
+ revision: Optional[str],
3150
+ ):
3151
+ # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
3152
+ # because config.torch_dtype can be None.
3153
+ config_dtype = getattr(config, "torch_dtype", None)
3154
+
3155
+ # Fallbacks for multi-modal models if the root config
3156
+ # does not define torch_dtype
3157
+ if config_dtype is None:
3158
+ config_dtype = getattr(config.get_text_config(), "torch_dtype", None)
3159
+ if config_dtype is None and hasattr(config, "vision_config"):
3160
+ config_dtype = getattr(config.vision_config, "torch_dtype", None)
3161
+ if config_dtype is None and hasattr(config, "encoder_config"):
3162
+ config_dtype = getattr(config.encoder_config, "torch_dtype", None)
3163
+
3164
+ # Try to read the dtype of the weights if they are in safetensors format
3165
+ if config_dtype is None:
3166
+ repo_mt = try_get_safetensors_metadata(model_id, revision=revision)
3167
+
3168
+ if repo_mt and (files_mt := repo_mt.files_metadata):
3169
+ param_dtypes: set[torch.dtype] = {
3170
+ _SAFETENSORS_TO_TORCH_DTYPE[dtype_str]
3171
+ for file_mt in files_mt.values()
3172
+ for dtype_str in file_mt.parameter_count
3173
+ if dtype_str in _SAFETENSORS_TO_TORCH_DTYPE
3174
+ }
3175
+
3176
+ if param_dtypes:
3177
+ return common_broadcastable_dtype(param_dtypes)
3178
+
3179
+ if config_dtype is None:
3180
+ config_dtype = torch.float32
3181
+
3182
+ return config_dtype
3183
+
3184
+
3185
+ def _resolve_auto_dtype(
3186
+ model_type: str,
3187
+ config_dtype: torch.dtype,
3188
+ *,
3189
+ is_pooling_model: bool,
3190
+ ):
3191
+ from vllm.platforms import current_platform
3192
+
3193
+ supported_dtypes = [
3194
+ dtype for dtype in current_platform.supported_dtypes
3195
+ if _is_valid_dtype(model_type, dtype)
3196
+ ]
3197
+
3198
+ if is_pooling_model and torch.float16 in supported_dtypes:
3199
+ preferred_dtype = torch.float16
3200
+ else:
3201
+ preferred_dtype = supported_dtypes[0]
3202
+
3203
+ # Downcast for float32 models
3204
+ if config_dtype == torch.float32:
3205
+ config_dtype = preferred_dtype
3206
+
3207
+ if config_dtype in supported_dtypes:
3208
+ return config_dtype
3209
+
3210
+ # Ensure device compatibility
3211
+ device_name = current_platform.get_device_name()
3212
+ device_capability = current_platform.get_device_capability()
3213
+
3214
+ if device_capability is None:
3215
+ device_str = f"{device_name!r}"
3216
+ else:
3217
+ version_str = device_capability.as_version_str()
3218
+ device_str = f"{device_name!r} (with compute capability {version_str})"
3219
+
3220
+ logger.warning(
3221
+ "Your device %s doesn't support %s. "
3222
+ "Falling back to %s for compatibility.",
3223
+ device_str,
3224
+ config_dtype,
3225
+ preferred_dtype,
3226
+ )
3227
+
3228
+ return preferred_dtype
3229
+
3230
+
3231
+ def _get_and_verify_dtype(
3232
+ model_id: str,
3233
+ config: PretrainedConfig,
3234
+ dtype: Union[str, torch.dtype],
3235
+ *,
3236
+ is_pooling_model: bool,
3237
+ revision: Optional[str] = None,
3238
+ ) -> torch.dtype:
3239
+ config_dtype = _find_dtype(model_id, config, revision=revision)
3240
+ model_type = config.model_type
3241
+
3242
+ if isinstance(dtype, str):
3243
+ dtype = dtype.lower()
3244
+ if dtype == "auto":
3245
+ # Set default dtype from model config
3246
+ torch_dtype = _resolve_auto_dtype(
3247
+ model_type,
3248
+ config_dtype,
3249
+ is_pooling_model=is_pooling_model,
3250
+ )
3251
+ else:
3252
+ if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
3253
+ raise ValueError(f"Unknown dtype: {dtype!r}")
3254
+ torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
3255
+ elif isinstance(dtype, torch.dtype):
3256
+ torch_dtype = dtype
3257
+ else:
3258
+ raise ValueError(f"Unknown dtype: {dtype}")
3259
+
3260
+ _check_valid_dtype(model_type, torch_dtype)
3261
+
3262
+ if torch_dtype != config_dtype:
3263
+ if torch_dtype == torch.float32:
3264
+ # Upcasting to float32 is allowed.
3265
+ logger.info("Upcasting %s to %s.", config_dtype, torch_dtype)
3266
+ elif config_dtype == torch.float32:
3267
+ # Downcasting from float32 to float16 or bfloat16 is allowed.
3268
+ logger.info("Downcasting %s to %s.", config_dtype, torch_dtype)
3269
+ else:
3270
+ # Casting between float16 and bfloat16 is allowed with a warning.
3271
+ logger.warning("Casting %s to %s.", config_dtype, torch_dtype)
3272
+
3273
+ return torch_dtype
3274
+
3275
+
3276
+ def _get_and_verify_max_len(
3277
+ hf_config: PretrainedConfig,
3278
+ max_model_len: Optional[int],
3279
+ disable_sliding_window: bool,
3280
+ sliding_window_len: Optional[Union[int, list[Optional[int]]]],
3281
+ spec_target_max_model_len: Optional[int] = None,
3282
+ encoder_config: Optional[Any] = None,
3283
+ ) -> int:
3284
+ """Get and verify the model's maximum length."""
3285
+ derived_max_model_len = float("inf")
3286
+ possible_keys = [
3287
+ # OPT
3288
+ "max_position_embeddings",
3289
+ # GPT-2
3290
+ "n_positions",
3291
+ # MPT
3292
+ "max_seq_len",
3293
+ # ChatGLM2
3294
+ "seq_length",
3295
+ # Command-R
3296
+ "model_max_length",
3297
+ # Whisper
3298
+ "max_target_positions",
3299
+ # Others
3300
+ "max_sequence_length",
3301
+ "max_seq_length",
3302
+ "seq_len",
3303
+ ]
3304
+ # Choose the smallest "max_length" from the possible keys.
3305
+ max_len_key = None
3306
+ for key in possible_keys:
3307
+ max_len = getattr(hf_config, key, None)
3308
+ if max_len is not None:
3309
+ max_len_key = key if max_len < derived_max_model_len \
3310
+ else max_len_key
3311
+ derived_max_model_len = min(derived_max_model_len, max_len)
3312
+ # For Command-R / Cohere, Cohere2 / Aya Vision models
3313
+ if tmp_max_len := getattr(hf_config, "model_max_length", None):
3314
+ max_len_key = "model_max_length"
3315
+ derived_max_model_len = tmp_max_len
3316
+
3317
+ # If sliding window is manually disabled, max_length should be less
3318
+ # than the sliding window length in the model config.
3319
+ if disable_sliding_window and sliding_window_len is not None:
3320
+
3321
+ sliding_window_len_min = get_min_sliding_window(sliding_window_len)
3322
+ max_len_key = "sliding_window" \
3323
+ if sliding_window_len_min < derived_max_model_len else max_len_key
3324
+ derived_max_model_len = min(derived_max_model_len,
3325
+ sliding_window_len_min)
3326
+
3327
+ # If none of the keys were found in the config, use a default and
3328
+ # log a warning.
3329
+ if derived_max_model_len == float("inf"):
3330
+ if max_model_len is not None:
3331
+ # If max_model_len is specified, we use it.
3332
+ return max_model_len
3333
+
3334
+ if spec_target_max_model_len is not None:
3335
+ # If this is a speculative draft model, we use the max model len
3336
+ # from the target model.
3337
+ return spec_target_max_model_len
3338
+
3339
+ default_max_len = 2048
3340
+ logger.warning(
3341
+ "The model's config.json does not contain any of the following "
3342
+ "keys to determine the original maximum length of the model: "
3343
+ "%s. Assuming the model's maximum length is %d.", possible_keys,
3344
+ default_max_len)
3345
+ derived_max_model_len = default_max_len
3346
+
3347
+ rope_scaling = getattr(hf_config, "rope_scaling", None)
3348
+ # NOTE(woosuk): Gemma3's max_model_len (128K) is already scaled by RoPE
3349
+ # scaling, so we skip applying the scaling factor again.
3350
+ if rope_scaling is not None and "gemma3" not in hf_config.model_type:
3351
+ # No need to consider "type" key because of patch_rope_scaling when
3352
+ # loading HF config
3353
+ rope_type = rope_scaling["rope_type"]
3354
+
3355
+ if rope_type not in ("su", "longrope", "llama3"):
3356
+ if disable_sliding_window:
3357
+ # TODO(robertgshaw): Find a model that supports rope_scaling
3358
+ # with sliding window to see if this case should be allowed.
3359
+ raise NotImplementedError(
3360
+ "Disabling sliding window is not supported for models "
3361
+ "with rope_scaling. Please raise an issue so we can "
3362
+ "investigate.")
3363
+
3364
+ # NOTE: rope_type == "default" does not define factor
3365
+ # https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/modeling_rope_utils.py
3366
+ scaling_factor = rope_scaling.get("factor", 1.0)
3367
+
3368
+ if rope_type == "yarn":
3369
+ derived_max_model_len = rope_scaling[
3370
+ "original_max_position_embeddings"]
3371
+ derived_max_model_len *= scaling_factor
3372
+
3373
+ if encoder_config and "max_seq_length" in encoder_config:
3374
+ derived_max_model_len = encoder_config["max_seq_length"]
3375
+
3376
+ # If the user specified a max length, make sure it is smaller than the
3377
+ # derived length from the HF model config.
3378
+ if max_model_len is None:
3379
+ max_model_len = int(derived_max_model_len)
3380
+ if current_platform.is_tpu():
3381
+ logger.warning(
3382
+ "--max-model-len is not specified, "
3383
+ "it's currently using model's default length %s, "
3384
+ "which might be too large."
3385
+ "Please input with --max-model-len based on your "
3386
+ "request input length and output length, to avoid "
3387
+ "unnecessary degradation.", max_model_len)
3388
+ elif max_model_len > derived_max_model_len:
3389
+ # Some models might have a separate key for specifying model_max_length
3390
+ # that will be bigger than derived_max_model_len. We compare user input
3391
+ # with model_max_length and allow this override when it's smaller.
3392
+ model_max_length = getattr(hf_config, "model_max_length", None)
3393
+ if model_max_length is not None and max_model_len <= model_max_length:
3394
+ if disable_sliding_window:
3395
+ # TODO(robertgshaw): Find a model that has model_max_length
3396
+ # with sliding window to see if this case should be allowed.
3397
+ raise NotImplementedError(
3398
+ "Disabling sliding window is not supported for models "
3399
+ "model_max_length in the config. Please raise an issue "
3400
+ "so we can investigate.")
3401
+ else:
3402
+ msg = (
3403
+ f"User-specified max_model_len ({max_model_len}) is greater "
3404
+ f"than the derived max_model_len ({max_len_key}="
3405
+ f"{derived_max_model_len} or model_max_length="
3406
+ f"{model_max_length} in model's config.json). This may lead "
3407
+ "to incorrect model outputs or CUDA errors.")
3408
+ if envs.VLLM_ALLOW_LONG_MAX_MODEL_LEN:
3409
+ logger.warning(
3410
+ "%s Make sure the value is correct and within the "
3411
+ "model context size.", msg)
3412
+ else:
3413
+ raise ValueError(
3414
+ f"{msg} To allow overriding this maximum, set "
3415
+ "the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN=1")
3416
+ return int(max_model_len)
3417
+
3418
+
3419
+ def get_min_sliding_window(
3420
+ sliding_window: Union[int, list[Optional[int]]]) -> int:
3421
+ if isinstance(sliding_window, list):
3422
+ return min(s for s in sliding_window if s is not None)
3423
+
3424
+ return sliding_window
3425
+
3426
+
3427
+ def get_served_model_name(model: str,
3428
+ served_model_name: Optional[Union[str, list[str]]]):
3429
+ """
3430
+ If the input is a non-empty list, the first model_name in
3431
+ `served_model_name` is taken.
3432
+ If the input is a non-empty string, it is used directly.
3433
+ For cases where the input is either an empty string or an
3434
+ empty list, the fallback is to use `self.model`.
3435
+ """
3436
+ if not served_model_name:
3437
+ return model
3438
+ if isinstance(served_model_name, list):
3439
+ return served_model_name[0]
3440
+ return served_model_name
3441
+
3442
+
3443
+ GuidedDecodingBackendV0 = Literal["auto", "outlines", "lm-format-enforcer",
3444
+ "xgrammar", "guidance"]
3445
+ GuidedDecodingBackendV1 = Literal["auto", "xgrammar", "guidance"]
3446
+ GuidedDecodingBackend = Literal[GuidedDecodingBackendV0,
3447
+ GuidedDecodingBackendV1]
3448
+
3449
+
3450
+ @config
3451
+ @dataclass
3452
+ class DecodingConfig:
3453
+ """Dataclass which contains the decoding strategy of the engine."""
3454
+
3455
+ @property
3456
+ @deprecated(
3457
+ "`guided_decoding_backend` is deprecated and has been renamed to "
3458
+ "`backend`. This will be removed in v0.10.0. Please use the "
3459
+ "`backend` argument instead.")
3460
+ def guided_decoding_backend(self) -> GuidedDecodingBackend:
3461
+ return self.backend
3462
+
3463
+ @guided_decoding_backend.setter
3464
+ def guided_decoding_backend(self, value: GuidedDecodingBackend):
3465
+ self.backend = value
3466
+
3467
+ backend: GuidedDecodingBackend = "auto" if envs.VLLM_USE_V1 else "xgrammar"
3468
+ """Which engine will be used for guided decoding (JSON schema / regex etc)
3469
+ by default. With "auto", we will make opinionated choices based on request
3470
+ contents and what the backend libraries currently support, so the behavior
3471
+ is subject to change in each release."""
3472
+
3473
+ disable_fallback: bool = False
3474
+ """If `True`, vLLM will not fallback to a different backend on error."""
3475
+
3476
+ disable_any_whitespace: bool = False
3477
+ """If `True`, the model will not generate any whitespace during guided
3478
+ decoding. This is only supported for xgrammar and guidance backends."""
3479
+
3480
+ disable_additional_properties: bool = False
3481
+ """If `True`, the `guidance` backend will not use `additionalProperties`
3482
+ in the JSON schema. This is only supported for the `guidance` backend and
3483
+ is used to better align its behaviour with `outlines` and `xgrammar`."""
3484
+
3485
+ reasoning_backend: str = ""
3486
+ """Select the reasoning parser depending on the model that you're using.
3487
+ This is used to parse the reasoning content into OpenAI API format."""
3488
+
3489
+ def compute_hash(self) -> str:
3490
+ """
3491
+ WARNING: Whenever a new field is added to this config,
3492
+ ensure that it is included in the factors list if
3493
+ it affects the computation graph.
3494
+
3495
+ Provide a hash that uniquely identifies all the configs
3496
+ that affect the structure of the computation
3497
+ graph from input ids/embeddings to the final hidden states,
3498
+ excluding anything before input ids/embeddings and after
3499
+ the final hidden states.
3500
+ """
3501
+ # no factors to consider.
3502
+ # this config will not affect the computation graph.
3503
+ factors: list[Any] = []
3504
+ hash_str = hashlib.md5(str(factors).encode(),
3505
+ usedforsecurity=False).hexdigest()
3506
+ return hash_str
3507
+
3508
+ def __post_init__(self):
3509
+ if ":" in self.backend:
3510
+ self._extract_backend_options()
3511
+
3512
+ if envs.VLLM_USE_V1:
3513
+ valid_guided_backends = get_args(GuidedDecodingBackendV1)
3514
+ else:
3515
+ valid_guided_backends = get_args(GuidedDecodingBackendV0)
3516
+ if self.backend not in valid_guided_backends:
3517
+ raise ValueError(f"Invalid backend '{self.backend}',"
3518
+ f" must be one of {valid_guided_backends}")
3519
+ if (self.disable_any_whitespace
3520
+ and self.backend not in ("xgrammar", "guidance")):
3521
+ raise ValueError("disable_any_whitespace is only supported for "
3522
+ "xgrammar and guidance backends.")
3523
+ if (self.disable_additional_properties and self.backend != "guidance"):
3524
+ raise ValueError("disable_additional_properties is only supported "
3525
+ "for the guidance backend.")
3526
+
3527
+ @deprecated(
3528
+ "Passing guided decoding backend options inside backend in the format "
3529
+ "'backend:...' is deprecated. This will be removed in v0.10.0. Please "
3530
+ "use the dedicated arguments '--disable-fallback', "
3531
+ "'--disable-any-whitespace' and '--disable-additional-properties' "
3532
+ "instead.")
3533
+ def _extract_backend_options(self):
3534
+ """Extract backend options from the backend string."""
3535
+ backend, options = self.backend.split(":")
3536
+ self.backend = cast(GuidedDecodingBackend, backend)
3537
+ options_set = set(options.strip().split(","))
3538
+ if "no-fallback" in options_set:
3539
+ self.disable_fallback = True
3540
+ if "disable-any-whitespace" in options_set:
3541
+ self.disable_any_whitespace = True
3542
+ if "no-additional-properties" in options_set:
3543
+ self.disable_additional_properties = True
3544
+
3545
+
3546
+ DetailedTraceModules = Literal["model", "worker", "all"]
3547
+
3548
+
3549
+ @config
3550
+ @dataclass
3551
+ class ObservabilityConfig:
3552
+ """Configuration for observability - metrics and tracing."""
3553
+
3554
+ show_hidden_metrics_for_version: Optional[str] = None
3555
+ """Enable deprecated Prometheus metrics that have been hidden since the
3556
+ specified version. For example, if a previously deprecated metric has been
3557
+ hidden since the v0.7.0 release, you use
3558
+ `--show-hidden-metrics-for-version=0.7` as a temporary escape hatch while
3559
+ you migrate to new metrics. The metric is likely to be removed completely
3560
+ in an upcoming release."""
3561
+
3562
+ @cached_property
3563
+ def show_hidden_metrics(self) -> bool:
3564
+ """Check if the hidden metrics should be shown."""
3565
+ if self.show_hidden_metrics_for_version is None:
3566
+ return False
3567
+ return version._prev_minor_version_was(
3568
+ self.show_hidden_metrics_for_version)
3569
+
3570
+ otlp_traces_endpoint: Optional[str] = None
3571
+ """Target URL to which OpenTelemetry traces will be sent."""
3572
+
3573
+ collect_detailed_traces: Optional[list[DetailedTraceModules]] = None
3574
+ """It makes sense to set this only if `--otlp-traces-endpoint` is set. If
3575
+ set, it will collect detailed traces for the specified modules. This
3576
+ involves use of possibly costly and or blocking operations and hence might
3577
+ have a performance impact.
3578
+
3579
+ Note that collecting detailed timing information for each request can be
3580
+ expensive."""
3581
+
3582
+ @cached_property
3583
+ def collect_model_forward_time(self) -> bool:
3584
+ """Whether to collect model forward time for the request."""
3585
+ return (self.collect_detailed_traces is not None
3586
+ and ("model" in self.collect_detailed_traces
3587
+ or "all" in self.collect_detailed_traces))
3588
+
3589
+ @cached_property
3590
+ def collect_model_execute_time(self) -> bool:
3591
+ """Whether to collect model execute time for the request."""
3592
+ return (self.collect_detailed_traces is not None
3593
+ and ("worker" in self.collect_detailed_traces
3594
+ or "all" in self.collect_detailed_traces))
3595
+
3596
+ def compute_hash(self) -> str:
3597
+ """
3598
+ WARNING: Whenever a new field is added to this config,
3599
+ ensure that it is included in the factors list if
3600
+ it affects the computation graph.
3601
+
3602
+ Provide a hash that uniquely identifies all the configs
3603
+ that affect the structure of the computation
3604
+ graph from input ids/embeddings to the final hidden states,
3605
+ excluding anything before input ids/embeddings and after
3606
+ the final hidden states.
3607
+ """
3608
+ # no factors to consider.
3609
+ # this config will not affect the computation graph.
3610
+ factors: list[Any] = []
3611
+ hash_str = hashlib.md5(str(factors).encode(),
3612
+ usedforsecurity=False).hexdigest()
3613
+ return hash_str
3614
+
3615
+ def __post_init__(self):
3616
+ if (self.collect_detailed_traces is not None
3617
+ and len(self.collect_detailed_traces) == 1
3618
+ and "," in self.collect_detailed_traces[0]):
3619
+ self._parse_collect_detailed_traces()
3620
+
3621
+ if not is_otel_available() and self.otlp_traces_endpoint is not None:
3622
+ raise ValueError(
3623
+ "OpenTelemetry is not available. Unable to configure "
3624
+ "'otlp_traces_endpoint'. Ensure OpenTelemetry packages are "
3625
+ f"installed. Original error:\n{otel_import_error_traceback}")
3626
+
3627
+ def _parse_collect_detailed_traces(self):
3628
+ assert isinstance(self.collect_detailed_traces, list)
3629
+ self.collect_detailed_traces = cast(
3630
+ list[DetailedTraceModules],
3631
+ self.collect_detailed_traces[0].split(","))
3632
+
3633
+
3634
+ KVProducer = Literal["kv_producer", "kv_both"]
3635
+ KVConsumer = Literal["kv_consumer", "kv_both"]
3636
+ KVRole = Literal[KVProducer, KVConsumer]
3637
+
3638
+
3639
+ @config
3640
+ @dataclass
3641
+ class KVTransferConfig:
3642
+ """Configuration for distributed KV cache transfer."""
3643
+
3644
+ kv_connector: Optional[str] = None
3645
+ """The KV connector for vLLM to transmit KV caches between vLLM instances.
3646
+ """
3647
+
3648
+ engine_id: Optional[str] = None
3649
+ """The engine id for KV transfers."""
3650
+
3651
+ kv_buffer_device: Optional[str] = "cuda"
3652
+ """The device used by kv connector to buffer the KV cache.
3653
+ Currently only support 'cuda'."""
3654
+
3655
+ kv_buffer_size: float = 1e9
3656
+ """The buffer size for TorchDistributedConnector. Measured in number of
3657
+ bytes. Recommended value: 1e9 (about 1GB)."""
3658
+
3659
+ kv_role: Optional[KVRole] = None
3660
+ """Whether this vLLM instance produces, consumes KV cache, or both. Choices
3661
+ are 'kv_producer', 'kv_consumer', and 'kv_both'."""
3662
+
3663
+ kv_rank: Optional[int] = None
3664
+ """The rank of this vLLM instance in the KV cache transfer. Typical value:
3665
+ 0 for prefill instance, 1 for decode instance.
3666
+ Currently only 1P1D is supported."""
3667
+
3668
+ kv_parallel_size: int = 1
3669
+ """The number of parallel instances for KV cache transfer. For
3670
+ PyNcclConnector, this should be 2."""
3671
+
3672
+ kv_ip: str = "127.0.0.1"
3673
+ """The KV connector ip, used to build distributed connection."""
3674
+
3675
+ kv_port: int = 14579
3676
+ """The KV connector port, used to build distributed connection."""
3677
+
3678
+ kv_connector_extra_config: dict[str, Any] = field(default_factory=dict)
3679
+ """any extra config that the connector may need."""
3680
+
3681
+ kv_connector_module_path: Optional[str] = None
3682
+ """The Python module path to dynamically load the KV connector from.
3683
+ Only supported in V1."""
3684
+
3685
+ def compute_hash(self) -> str:
3686
+ """
3687
+ WARNING: Whenever a new field is added to this config,
3688
+ ensure that it is included in the factors list if
3689
+ it affects the computation graph.
3690
+
3691
+ Provide a hash that uniquely identifies all the configs
3692
+ that affect the structure of the computation
3693
+ graph from input ids/embeddings to the final hidden states,
3694
+ excluding anything before input ids/embeddings and after
3695
+ the final hidden states.
3696
+ """
3697
+ # no factors to consider.
3698
+ # this config will not affect the computation graph.
3699
+ factors: list[Any] = []
3700
+ hash_str = hashlib.md5(str(factors).encode(),
3701
+ usedforsecurity=False).hexdigest()
3702
+ return hash_str
3703
+
3704
+ def __post_init__(self) -> None:
3705
+ if self.engine_id is None:
3706
+ self.engine_id = str(uuid.uuid4())
3707
+
3708
+ if self.kv_role is not None and self.kv_role not in get_args(KVRole):
3709
+ raise ValueError(f"Unsupported kv_role: {self.kv_role}. "
3710
+ f"Supported roles are {get_args(KVRole)}")
3711
+
3712
+ if self.kv_connector is not None and self.kv_role is None:
3713
+ raise ValueError("Please specify kv_disagg_role when kv_connector "
3714
+ f"is set, supported roles are {get_args(KVRole)}")
3715
+
3716
+ @property
3717
+ def is_kv_transfer_instance(self) -> bool:
3718
+ return self.kv_connector is not None and \
3719
+ self.kv_role in get_args(KVRole)
3720
+
3721
+ @property
3722
+ def is_kv_producer(self) -> bool:
3723
+ return self.kv_connector is not None and \
3724
+ self.kv_role in get_args(KVProducer)
3725
+
3726
+ @property
3727
+ def is_kv_consumer(self) -> bool:
3728
+ return self.kv_connector is not None and \
3729
+ self.kv_role in get_args(KVConsumer)
3730
+
3731
+ def get_from_extra_config(self, key, default) -> Any:
3732
+ return self.kv_connector_extra_config.get(key, default)
3733
+
3734
+
3735
+ @config
3736
+ @dataclass
3737
+ class KVEventsConfig:
3738
+ """Configuration for KV event publishing."""
3739
+
3740
+ enable_kv_cache_events: bool = False
3741
+ """If True, enable KV cache events for tracking block storage and removal.
3742
+ Events can be published externally by zmq using the event publisher config.
3743
+ """
3744
+
3745
+ publisher: str = "null"
3746
+ """The publisher to use for publishing kv events. Can be "null", "zmq".
3747
+ """
3748
+
3749
+ endpoint: str = "tcp://*:5557"
3750
+ """The zmq endpoint to use for publishing kv events.
3751
+ """
3752
+
3753
+ replay_endpoint: Optional[str] = None
3754
+ """The zmq endpoint to use for replaying kv events.
3755
+ """
3756
+
3757
+ buffer_steps: int = 10_000
3758
+ """The number of steps to cache for replay endpoint. Will only save
3759
+ events from the last N steps for the replay endpoint.
3760
+ """
3761
+
3762
+ hwm: int = 100_000
3763
+ """The zmq high water mark for the event publisher. After queueing N events,
3764
+ events will start dropping if the consumer is not keeping up.
3765
+ """
3766
+
3767
+ max_queue_size: int = 100_000
3768
+ """The maximum number of events to queue while waiting for publishing.
3769
+ """
3770
+
3771
+ topic: str = ""
3772
+ """The topic to use for the event publisher. Consumers can subscribe to
3773
+ this topic to receive events.
3774
+ """
3775
+
3776
+
3777
+ class CompilationLevel:
3778
+ # constants for the levels of the compilation process
3779
+ NO_COMPILATION = 0
3780
+ DYNAMO_AS_IS = 1
3781
+ DYNAMO_ONCE = 2
3782
+ PIECEWISE = 3
3783
+
3784
+
3785
+ @config
3786
+ @dataclass
3787
+ class PassConfig:
3788
+ """Configuration for custom Inductor passes.
3789
+
3790
+ This is separate from general `CompilationConfig` so that inductor passes
3791
+ don't all have access to full configuration - that would create a cycle as
3792
+ the `PassManager` is set as a property of config."""
3793
+
3794
+ dump_graph_stages: list[str] = field(default_factory=list)
3795
+ """List of stages for which we want to dump the graph. Each pass defines
3796
+ its own stages (before, after, maybe in-between)."""
3797
+ dump_graph_dir: Path = Path(".")
3798
+ """Directory to dump the graphs."""
3799
+ # TODO(luka) better pass enabling system.
3800
+ enable_fusion: bool = True
3801
+ """Whether to enable the custom fusion pass."""
3802
+ enable_noop: bool = True
3803
+ """Whether to enable the custom no-op elimination pass."""
3804
+ enable_sequence_parallelism: bool = False
3805
+ """Whether to enable sequence parallelism."""
3806
+ enable_async_tp: bool = False
3807
+ """Whether to enable async TP."""
3808
+
3809
+ def uuid(self):
3810
+ """
3811
+ Produces a hash unique to the pass configuration.
3812
+ Any new fields that affect compilation should be added to the hash.
3813
+ Do not include dump_graph_* in the hash - they don't affect
3814
+ compilation.
3815
+ """
3816
+ include = {
3817
+ "enable_fusion", "enable_noop", "enable_sequence_parallelism",
3818
+ "enable_async_tp"
3819
+ }
3820
+ dict_ = {k: v for k, v in asdict(self).items() if k in include}
3821
+ return InductorPass.hash_dict(dict_)
3822
+
3823
+ def __post_init__(self) -> None:
3824
+ if not self.enable_noop and self.enable_fusion:
3825
+ logger.warning_once(
3826
+ "Fusion enabled but reshape elimination disabled. "
3827
+ "RMSNorm + quant (fp8) fusion might not work")
3828
+
3829
+
3830
+ @config
3831
+ @dataclass
3832
+ class CompilationConfig:
3833
+ """Configuration for compilation. It has three parts:
3834
+
3835
+ - Top-level Compilation control:
3836
+ - [`level`][vllm.config.CompilationConfig.level]
3837
+ - [`debug_dump_path`][vllm.config.CompilationConfig.debug_dump_path]
3838
+ - [`cache_dir`][vllm.config.CompilationConfig.cache_dir]
3839
+ - [`backend`][vllm.config.CompilationConfig.backend]
3840
+ - [`custom_ops`][vllm.config.CompilationConfig.custom_ops]
3841
+ - [`splitting_ops`][vllm.config.CompilationConfig.splitting_ops]
3842
+ - CudaGraph capture:
3843
+ - [`use_cudagraph`][vllm.config.CompilationConfig.use_cudagraph]
3844
+ - [`cudagraph_capture_sizes`]
3845
+ [vllm.config.CompilationConfig.cudagraph_capture_sizes]
3846
+ - [`cudagraph_num_of_warmups`]
3847
+ [vllm.config.CompilationConfig.cudagraph_num_of_warmups]
3848
+ - [`cudagraph_copy_inputs`]
3849
+ [vllm.config.CompilationConfig.cudagraph_copy_inputs]
3850
+ - [`full_cuda_graph`][vllm.config.CompilationConfig.full_cuda_graph]
3851
+ - Inductor compilation:
3852
+ - [`use_inductor`][vllm.config.CompilationConfig.use_inductor]
3853
+ - [`compile_sizes`][vllm.config.CompilationConfig.compile_sizes]
3854
+ - [`inductor_compile_config`]
3855
+ [vllm.config.CompilationConfig.inductor_compile_config]
3856
+ - [`inductor_passes`][vllm.config.CompilationConfig.inductor_passes]
3857
+ - custom inductor passes
3858
+
3859
+ Why we have different sizes for cudagraph and inductor:
3860
+ - cudagraph: a cudagraph captured for a specific size can only be used
3861
+ for the same size. We need to capture all the sizes we want to use.
3862
+ - inductor: a graph compiled by inductor for a general shape can be used
3863
+ for different sizes. Inductor can also compile for specific sizes,
3864
+ where it can have more information to optimize the graph with fully
3865
+ static shapes. However, we find the general shape compilation is
3866
+ sufficient for most cases. It might be beneficial to compile for
3867
+ certain small batchsizes, where inductor is good at optimizing.
3868
+ """
3869
+ # Top-level Compilation control
3870
+ level: int = 0
3871
+ """The level of compilation:
3872
+
3873
+ - 0: no compilation.
3874
+ - 1: dynamo as is.
3875
+ - 2: dynamo once.
3876
+ - 3: piecewise compilation."""
3877
+ debug_dump_path: str = ""
3878
+ """The path to dump the debug information."""
3879
+ cache_dir: str = ""
3880
+ """The directory to store the compiled graph, to accelerate Inductor
3881
+ compilation. By default, it will use model-related information to generate
3882
+ a cache directory."""
3883
+ backend: str = ""
3884
+ """The backend for compilation. It needs to be a string:
3885
+
3886
+ - "" (empty string): use the default backend.
3887
+ - "eager"/"openxla"/...: use the specified backend registered in PyTorch.
3888
+ - "full.module.name": a qualified name which can be used to import the
3889
+
3890
+ backend function.
3891
+ We use string to avoid serialization issues when using compilation in a
3892
+ distributed setting. When the compilation level is 1 or 2, the backend is
3893
+ used for the compilation directly (it sees the whole graph). When the
3894
+ compilation level is 3, the backend is used for the piecewise compilation
3895
+ (it sees a part of the graph)."""
3896
+ custom_ops: list[str] = field(default_factory=list)
3897
+ """Fine-grained control over which custom ops to enable/disable. Use 'all'
3898
+ to enable all, 'none' to disable all. Also specify a list of custom op
3899
+ names to enable (prefixed with a '+'), or disable (prefixed with a '-').
3900
+ Examples:
3901
+
3902
+ - 'all,-op1' to enable all except op1
3903
+ - 'none,+op1,+op2' to enable only op1 and op2
3904
+
3905
+ By default, all custom ops are enabled when running without Inductor and
3906
+ disabled when running with Inductor (compile_level >= Inductor)."""
3907
+ splitting_ops: list[str] = field(default_factory=list)
3908
+ """A list of ops to split the full graph into subgraphs, used in piecewise
3909
+ compilation."""
3910
+
3911
+ # Inductor capture
3912
+ use_inductor: bool = True
3913
+ """Whether to use inductor compilation:
3914
+
3915
+ - False: inductor compilation is not used. graph runs in eager.
3916
+ - True: inductor compilation is used. one graph for symbolic shape
3917
+ is compiled. In addition, compile for compile_sizes,
3918
+ using configurations in inductor_compile_config."""
3919
+ compile_sizes: Optional[list[Union[int, str]]] = None
3920
+ """Sizes to compile for inductor. In addition
3921
+ to integers, it also supports "cudagraph_capture_sizes" to
3922
+ specify the sizes for cudagraph capture."""
3923
+ inductor_compile_config: dict = field(default_factory=dict)
3924
+ """Additional configurations for inductor.
3925
+ - None: use default configurations."""
3926
+ inductor_passes: dict[str, str] = field(default_factory=dict)
3927
+ """Additional passes for inductor. It is a dictionary
3928
+ from pass name to pass function qualified name. We use function
3929
+ name because the config uses JSON format. If we pass the config
3930
+ from Python, functions can also be passed directly via Python object
3931
+ constructor, e.g. `CompilationConfig(inductor_passes={"a": func})`."""
3932
+
3933
+ # CudaGraph compilation
3934
+ use_cudagraph: bool = envs.VLLM_USE_V1
3935
+ """Whether to use cudagraph inside compilation.
3936
+ - False: cudagraph inside compilation is not used.
3937
+ - True: cudagraph inside compilation is used. It requires
3938
+ that all input buffers have fixed addresses, and all
3939
+ splitting ops write their outputs to input buffers.
3940
+ In the vLLM V1 Engine, this flag only applies for
3941
+ CompilationLevel.PIECEWISE (aka -O3).
3942
+ Note that this is orthogonal to the cudagraph capture logic
3943
+ outside of compilation.
3944
+ TODO: move outside cudagraph logic into compilation.
3945
+ torch.compile will handle cudagraph capture logic in the future."""
3946
+ cudagraph_num_of_warmups: int = 0
3947
+ """Number of warmup runs for cudagraph.
3948
+ It means the first several runs will be treated as warmup runs.
3949
+ Only after that, the execution will be recorded, and the recorded
3950
+ cudagraph will be used for subsequent runs."""
3951
+ cudagraph_capture_sizes: Optional[list[int]] = None
3952
+ """Sizes to capture cudagraph.
3953
+ - None (default): capture sizes are inferred from vllm config.
3954
+ - list[int]: capture sizes are specified as given."""
3955
+ cudagraph_copy_inputs: bool = False
3956
+ """Whether to copy input tensors for
3957
+ cudagraph. If the caller can guarantee that the same input buffers
3958
+ are always used, it can set this to False. Otherwise, it should
3959
+ set this to True, and the compiler will copy the input to an
3960
+ internally managed buffer. Default is False."""
3961
+ full_cuda_graph: bool = False
3962
+ """whether to use a full cuda graph for the entire forward pass rather than
3963
+ splitting certain operations such as attention into subgraphs. Thus this
3964
+ flag cannot be used together with splitting_ops. This may provide
3965
+ performance benefits for smaller models."""
3966
+
3967
+ pass_config: PassConfig = field(default_factory=PassConfig)
3968
+ """Custom inductor passes, see PassConfig for more details"""
3969
+
3970
+ max_capture_size: int = field(default=None, init=False) # type: ignore
3971
+ """not configurable, computed after init"""
3972
+ local_cache_dir: str = field(default=None, init=False) # type: ignore
3973
+ """local cache dir for each rank"""
3974
+ bs_to_padded_graph_size: list[int] = field(
3975
+ default=None, # type: ignore
3976
+ init=False)
3977
+ """optimization:
3978
+ Intuitively, bs_to_padded_graph_size should be dict[int, int].
3979
+ since we know all keys are in a range [0, max_capture_size],
3980
+ we can optimize it to list[int] for better lookup performance."""
3981
+
3982
+ # keep track of enabled and disabled custom ops
3983
+ enabled_custom_ops: Counter[str] = field(default_factory=Counter,
3984
+ init=False)
3985
+ """custom ops that are enabled"""
3986
+ disabled_custom_ops: Counter[str] = field(default_factory=Counter,
3987
+ init=False)
3988
+ """custom ops that are disabled"""
3989
+ traced_files: set[str] = field(default_factory=set, init=False)
3990
+ """files that are traced for compilation"""
3991
+ compilation_time: float = field(default=0.0, init=False)
3992
+ """time taken for compilation"""
3993
+
3994
+ static_forward_context: dict[str, Any] = field(default_factory=dict,
3995
+ init=False)
3996
+ """Per-model forward context
3997
+ Map from layer name to layer objects that need to be accessed outside
3998
+ model code, e.g., Attention, FusedMOE when dp_size>1."""
3999
+
4000
+ def compute_hash(self) -> str:
4001
+ """
4002
+ WARNING: Whenever a new field is added to this config,
4003
+ ensure that it is included in the factors list if
4004
+ it affects the computation graph.
4005
+
4006
+ Provide a hash that uniquely identifies all the configs
4007
+ that affect the structure of the computation
4008
+ graph from input ids/embeddings to the final hidden states,
4009
+ excluding anything before input ids/embeddings and after
4010
+ the final hidden states.
4011
+ """
4012
+ factors: list[Any] = []
4013
+ factors.append(self.level)
4014
+ factors.append(self.backend)
4015
+ factors.append(self.custom_ops)
4016
+ factors.append(self.splitting_ops)
4017
+ factors.append(self.use_inductor)
4018
+ factors.append(self.inductor_compile_config)
4019
+ factors.append(self.inductor_passes)
4020
+ factors.append(self.pass_config.uuid())
4021
+ return hashlib.sha256(str(factors).encode()).hexdigest()
4022
+
4023
+ def __repr__(self) -> str:
4024
+ exclude = {
4025
+ "static_forward_context": True,
4026
+ "enabled_custom_ops": True,
4027
+ "disabled_custom_ops": True,
4028
+ "compilation_time": True,
4029
+ "bs_to_padded_graph_size": True,
4030
+ "pass_config": True,
4031
+ "traced_files": True,
4032
+ "inductor_compile_config": {
4033
+ "post_grad_custom_post_pass": True,
4034
+ },
4035
+ }
4036
+ # The cast to string is necessary because Pydantic is mocked in docs
4037
+ # builds and sphinx-argparse doesn't know the return type of decode()
4038
+ return str(
4039
+ TypeAdapter(CompilationConfig).dump_json(
4040
+ self,
4041
+ exclude=exclude, # type: ignore[arg-type]
4042
+ exclude_unset=True).decode())
4043
+
4044
+ __str__ = __repr__
4045
+
4046
+ @classmethod
4047
+ def from_cli(cls, cli_value: str) -> "CompilationConfig":
4048
+ """Parse the CLI value for the compilation config."""
4049
+ if cli_value in ["0", "1", "2", "3"]:
4050
+ return cls(level=int(cli_value))
4051
+ return TypeAdapter(CompilationConfig).validate_json(cli_value)
4052
+
4053
+ def __post_init__(self) -> None:
4054
+ count_none = self.custom_ops.count("none")
4055
+ count_all = self.custom_ops.count("all")
4056
+ assert count_none + count_all <= 1, "Can only specify 'none' or 'all'"
4057
+
4058
+ # TODO(zou3519/luka): There are 2 issues with auto-functionalization V2:
4059
+ # 1. A bug in PyTorch, fixed in 2.7:
4060
+ # https://github.com/pytorch/pytorch/issues/147924
4061
+ # 2. Custom passes (fusion) rely on auto-functionalization V1 and don't
4062
+ # work with V2. Addressing this will take extra engineering effort
4063
+ # and it is not yet a priority. RFC here:
4064
+ # https://github.com/vllm-project/vllm/issues/14703
4065
+
4066
+ if is_torch_equal_or_newer("2.6"):
4067
+ KEY = 'enable_auto_functionalized_v2'
4068
+ if KEY not in self.inductor_compile_config:
4069
+ self.inductor_compile_config[KEY] = False
4070
+
4071
+ for k, v in self.inductor_passes.items():
4072
+ if not isinstance(v, str):
4073
+ assert callable(v), (
4074
+ f"pass {k} should be callable or a qualified name")
4075
+ self.inductor_compile_config[k] = v if isinstance(
4076
+ v, InductorPass) else CallableInductorPass(v)
4077
+ continue
4078
+
4079
+ # resolve function from qualified name
4080
+ names = v.split(".")
4081
+ module = ".".join(names[:-1])
4082
+ func_name = names[-1]
4083
+ func = __import__(module).__dict__[func_name]
4084
+ self.inductor_compile_config[k] = func if isinstance(
4085
+ func, InductorPass) else CallableInductorPass(func)
4086
+
4087
+ if isinstance(self.pass_config, dict):
4088
+ self.pass_config = PassConfig(**self.pass_config)
4089
+
4090
+ def init_backend(self, vllm_config: "VllmConfig") -> Union[str, Callable]:
4091
+ if self.level == CompilationLevel.NO_COMPILATION:
4092
+ raise ValueError("No compilation level is set.")
4093
+
4094
+ from torch._dynamo.backends.registry import list_backends
4095
+ torch_backends = list_backends(exclude_tags=tuple())
4096
+ if self.level in [
4097
+ CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE
4098
+ ]:
4099
+ if self.backend == "":
4100
+ return "eager"
4101
+ if self.backend in torch_backends:
4102
+ return self.backend
4103
+ return resolve_obj_by_qualname(self.backend)
4104
+
4105
+ # TODO: pass user-specified backend to piecewise compilation
4106
+ # merge with the config use_inductor
4107
+ assert self.level == CompilationLevel.PIECEWISE
4108
+
4109
+ from vllm.compilation.backends import VllmBackend
4110
+ return VllmBackend(vllm_config)
4111
+
4112
+ def init_with_cudagraph_sizes(self,
4113
+ cudagraph_capture_sizes: list[int]) -> None:
4114
+ """To complete the initialization of config,
4115
+ we need to know the cudagraph sizes."""
4116
+
4117
+ if self.cudagraph_capture_sizes is None:
4118
+ self.cudagraph_capture_sizes = cudagraph_capture_sizes
4119
+ else:
4120
+ # de-duplicate the sizes provided by the config
4121
+ dedup_sizes = list(set(self.cudagraph_capture_sizes))
4122
+ if len(dedup_sizes) < len(self.cudagraph_capture_sizes):
4123
+ logger.info(("cudagraph sizes specified by model runner"
4124
+ " %s is overridden by config %s"),
4125
+ cudagraph_capture_sizes, dedup_sizes)
4126
+ self.cudagraph_capture_sizes = dedup_sizes
4127
+
4128
+ computed_compile_sizes = []
4129
+ if self.compile_sizes is not None:
4130
+ # de-duplicate the sizes provided by the config
4131
+ self.compile_sizes = list(set(self.compile_sizes))
4132
+ for x in self.compile_sizes:
4133
+ if isinstance(x, str):
4134
+ assert x == "cudagraph_capture_sizes", \
4135
+ "Unrecognized size type in compile_sizes, " \
4136
+ f"expect 'cudagraph_capture_sizes', got {x}"
4137
+ computed_compile_sizes.extend(self.cudagraph_capture_sizes)
4138
+ else:
4139
+ assert isinstance(x, int)
4140
+ computed_compile_sizes.append(x)
4141
+ self.compile_sizes = computed_compile_sizes # type: ignore
4142
+
4143
+ # sort to make sure cudagraph capture sizes are in descending order
4144
+ self.cudagraph_capture_sizes.sort(reverse=True)
4145
+ self.max_capture_size = self.cudagraph_capture_sizes[
4146
+ 0] if self.cudagraph_capture_sizes else 0
4147
+
4148
+ # pre-compute the mapping from batch size to padded graph size
4149
+ self.bs_to_padded_graph_size = [
4150
+ 0 for i in range(self.max_capture_size + 1)
4151
+ ]
4152
+ for end, start in zip(self.cudagraph_capture_sizes,
4153
+ self.cudagraph_capture_sizes[1:] + [0]):
4154
+ for bs in range(start, end):
4155
+ if bs == start:
4156
+ self.bs_to_padded_graph_size[bs] = start
4157
+ else:
4158
+ self.bs_to_padded_graph_size[bs] = end
4159
+ self.bs_to_padded_graph_size[
4160
+ self.max_capture_size] = self.max_capture_size
4161
+
4162
+ def set_splitting_ops_for_v1(self):
4163
+ # NOTE: this function needs to be called
4164
+ if self.splitting_ops and self.full_cuda_graph:
4165
+ raise ValueError("full_cuda_graph cannot be used together with "
4166
+ "splitting_ops, as Full CUDA graph will override "
4167
+ f"the splitting_ops: {self.splitting_ops}")
4168
+
4169
+ if not self.splitting_ops:
4170
+ self.splitting_ops = [] if self.full_cuda_graph else [
4171
+ "vllm.unified_attention",
4172
+ "vllm.unified_attention_with_output",
4173
+ ]
4174
+
4175
+
4176
+ @config
4177
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
4178
+ class VllmConfig:
4179
+ """Dataclass which contains all vllm-related configuration. This
4180
+ simplifies passing around the distinct configurations in the codebase.
4181
+ """
4182
+
4183
+ # TODO: use default_factory once default constructing ModelConfig doesn't
4184
+ # try to download a model
4185
+ model_config: ModelConfig = None # type: ignore
4186
+ """Model configuration."""
4187
+ cache_config: CacheConfig = field(default_factory=CacheConfig)
4188
+ """Cache configuration."""
4189
+ parallel_config: ParallelConfig = field(default_factory=ParallelConfig)
4190
+ """Parallel configuration."""
4191
+ scheduler_config: SchedulerConfig = field(default_factory=SchedulerConfig)
4192
+ """Scheduler configuration."""
4193
+ device_config: DeviceConfig = field(default_factory=DeviceConfig)
4194
+ """Device configuration."""
4195
+ load_config: LoadConfig = field(default_factory=LoadConfig)
4196
+ """Load configuration."""
4197
+ lora_config: Optional[LoRAConfig] = None
4198
+ """LoRA configuration."""
4199
+ speculative_config: Optional[SpeculativeConfig] = None
4200
+ """Speculative decoding configuration."""
4201
+ decoding_config: DecodingConfig = field(default_factory=DecodingConfig)
4202
+ """Decoding configuration."""
4203
+ observability_config: Optional[ObservabilityConfig] = None
4204
+ """Observability configuration."""
4205
+ prompt_adapter_config: Optional[PromptAdapterConfig] = None
4206
+ """Prompt adapter configuration."""
4207
+ quant_config: Optional[QuantizationConfig] = None
4208
+ """Quantization configuration."""
4209
+ compilation_config: CompilationConfig = field(
4210
+ default_factory=CompilationConfig)
4211
+ """`torch.compile` configuration for the model.
4212
+
4213
+ When it is a number (0, 1, 2, 3), it will be interpreted as the
4214
+ optimization level.
4215
+
4216
+ NOTE: level 0 is the default level without any optimization. level 1 and 2
4217
+ are for internal testing only. level 3 is the recommended level for
4218
+ production.
4219
+
4220
+ Following the convention of traditional compilers, using `-O` without space
4221
+ is also supported. `-O3` is equivalent to `-O 3`.
4222
+
4223
+ You can specify the full compilation config like so:
4224
+ `{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8]}`
4225
+ """
4226
+ kv_transfer_config: Optional[KVTransferConfig] = None
4227
+ """The configurations for distributed KV cache transfer."""
4228
+ kv_events_config: Optional[KVEventsConfig] = None
4229
+ """The configurations for event publishing."""
4230
+ # some opaque config, only used to provide additional information
4231
+ # for the hash computation, mainly used for testing, debugging or out of
4232
+ # tree config registration.
4233
+ additional_config: Union[dict, SupportsHash] = field(default_factory=dict)
4234
+ """Additional config for specified platform. Different platforms may
4235
+ support different configs. Make sure the configs are valid for the platform
4236
+ you are using. Contents must be hashable."""
4237
+ instance_id: str = ""
4238
+ """The ID of the vLLM instance."""
4239
+
4240
+ def compute_hash(self) -> str:
4241
+ """
4242
+ WARNING: Whenever a new field is added to this config,
4243
+ ensure that it is included in the factors list if
4244
+ it affects the computation graph.
4245
+
4246
+ Provide a hash that uniquely identifies all the configs
4247
+ that affect the structure of the computation
4248
+ graph from input ids/embeddings to the final hidden states,
4249
+ excluding anything before input ids/embeddings and after
4250
+ the final hidden states.
4251
+ """
4252
+ factors: list[Any] = []
4253
+
4254
+ # summarize vllm config
4255
+ vllm_factors: list[Any] = []
4256
+ from vllm import __version__
4257
+ vllm_factors.append(__version__)
4258
+ vllm_factors.append(envs.VLLM_USE_V1)
4259
+ if self.model_config:
4260
+ vllm_factors.append(self.model_config.compute_hash())
4261
+ else:
4262
+ vllm_factors.append("None")
4263
+ if self.cache_config:
4264
+ vllm_factors.append(self.cache_config.compute_hash())
4265
+ else:
4266
+ vllm_factors.append("None")
4267
+ if self.parallel_config:
4268
+ vllm_factors.append(self.parallel_config.compute_hash())
4269
+ else:
4270
+ vllm_factors.append("None")
4271
+ if self.scheduler_config:
4272
+ vllm_factors.append(self.scheduler_config.compute_hash())
4273
+ else:
4274
+ vllm_factors.append("None")
4275
+ if self.device_config:
4276
+ vllm_factors.append(self.device_config.compute_hash())
4277
+ else:
4278
+ vllm_factors.append("None")
4279
+ if self.load_config:
4280
+ vllm_factors.append(self.load_config.compute_hash())
4281
+ else:
4282
+ vllm_factors.append("None")
4283
+ if self.lora_config:
4284
+ vllm_factors.append(self.lora_config.compute_hash())
4285
+ # LoRA creates static buffers based on max_num_batched_tokens.
4286
+ # The tensor sizes and strides get captured in the torch.compile
4287
+ # graph explicitly.
4288
+ vllm_factors.append(
4289
+ str(self.scheduler_config.max_num_batched_tokens))
4290
+ else:
4291
+ vllm_factors.append("None")
4292
+ if self.speculative_config:
4293
+ vllm_factors.append(self.speculative_config.compute_hash())
4294
+ else:
4295
+ vllm_factors.append("None")
4296
+ if self.decoding_config:
4297
+ vllm_factors.append(self.decoding_config.compute_hash())
4298
+ else:
4299
+ vllm_factors.append("None")
4300
+ if self.observability_config:
4301
+ vllm_factors.append(self.observability_config.compute_hash())
4302
+ else:
4303
+ vllm_factors.append("None")
4304
+ if self.prompt_adapter_config:
4305
+ vllm_factors.append(self.prompt_adapter_config.compute_hash())
4306
+ else:
4307
+ vllm_factors.append("None")
4308
+ if self.quant_config:
4309
+ pass # should be captured by model_config.quantization
4310
+ if self.compilation_config:
4311
+ vllm_factors.append(self.compilation_config.compute_hash())
4312
+ else:
4313
+ vllm_factors.append("None")
4314
+ if self.kv_transfer_config:
4315
+ vllm_factors.append(self.kv_transfer_config.compute_hash())
4316
+ else:
4317
+ vllm_factors.append("None")
4318
+ if self.additional_config:
4319
+ if isinstance(additional_config := self.additional_config, dict):
4320
+ additional_config_hash = hashlib.md5(
4321
+ json.dumps(additional_config, sort_keys=True).encode(),
4322
+ usedforsecurity=False,
4323
+ ).hexdigest()
4324
+ else:
4325
+ additional_config_hash = additional_config.compute_hash()
4326
+ vllm_factors.append(additional_config_hash)
4327
+ else:
4328
+ vllm_factors.append("None")
4329
+ factors.append(vllm_factors)
4330
+
4331
+ hash_str = hashlib.md5(str(factors).encode(),
4332
+ usedforsecurity=False).hexdigest()[:10]
4333
+ return hash_str
4334
+
4335
+ def pad_for_cudagraph(self, batch_size: int) -> int:
4336
+ # if batch_size > self.compilation_config.max_capture_size,
4337
+ # it should raise an IndexError.
4338
+ # the caller should make sure the batch_size is within the range,
4339
+ # i.e., batch_size <= self.compilation_config.max_capture_size
4340
+ return self.compilation_config.bs_to_padded_graph_size[batch_size]
4341
+
4342
+ @staticmethod
4343
+ def _get_quantization_config(
4344
+ model_config: ModelConfig,
4345
+ load_config: LoadConfig) -> Optional[QuantizationConfig]:
4346
+ """Get the quantization config."""
4347
+ from vllm.platforms import current_platform
4348
+ if model_config.quantization is not None:
4349
+ from vllm.model_executor.model_loader.weight_utils import (
4350
+ get_quant_config)
4351
+ quant_config = get_quant_config(model_config, load_config)
4352
+ capability_tuple = current_platform.get_device_capability()
4353
+
4354
+ if capability_tuple is not None:
4355
+ capability = capability_tuple.to_int()
4356
+ if capability < quant_config.get_min_capability():
4357
+ raise ValueError(
4358
+ f"The quantization method {model_config.quantization} "
4359
+ "is not supported for the current GPU. Minimum "
4360
+ f"capability: {quant_config.get_min_capability()}. "
4361
+ f"Current capability: {capability}.")
4362
+ supported_dtypes = quant_config.get_supported_act_dtypes()
4363
+ if model_config.dtype not in supported_dtypes:
4364
+ raise ValueError(
4365
+ f"{model_config.dtype} is not supported for quantization "
4366
+ f"method {model_config.quantization}. Supported dtypes: "
4367
+ f"{supported_dtypes}")
4368
+ return quant_config
4369
+ return None
4370
+
4371
+ @staticmethod
4372
+ def get_quantization_config(
4373
+ model_config: ModelConfig,
4374
+ load_config: LoadConfig) -> Optional[QuantizationConfig]:
4375
+ import copy
4376
+
4377
+ # For some reason, the _ version of this modifies the model_config
4378
+ # object, so using deepcopy to avoid this problem.
4379
+ return VllmConfig._get_quantization_config(copy.deepcopy(model_config),
4380
+ load_config)
4381
+
4382
+ def with_hf_config(
4383
+ self,
4384
+ hf_config: PretrainedConfig,
4385
+ architectures: Optional[list[str]] = None,
4386
+ ) -> "VllmConfig":
4387
+ if architectures is not None:
4388
+ hf_config = copy.deepcopy(hf_config)
4389
+ hf_config.architectures = architectures
4390
+
4391
+ model_config = copy.deepcopy(self.model_config)
4392
+ model_config.hf_config = hf_config
4393
+
4394
+ return replace(self, model_config=model_config)
4395
+
4396
+ def __post_init__(self):
4397
+ """Verify configs are valid & consistent with each other.
4398
+ """
4399
+ if self.model_config is not None:
4400
+ self.model_config.verify_async_output_proc(self.parallel_config,
4401
+ self.speculative_config,
4402
+ self.device_config)
4403
+ self.model_config.verify_with_parallel_config(self.parallel_config)
4404
+ self.model_config.verify_dual_chunk_attention_config(
4405
+ self.load_config)
4406
+
4407
+ self.cache_config.verify_with_parallel_config(self.parallel_config)
4408
+
4409
+ if self.lora_config is not None:
4410
+ self.lora_config.verify_with_cache_config(self.cache_config)
4411
+ self.lora_config.verify_with_model_config(self.model_config)
4412
+ self.lora_config.verify_lora_support()
4413
+ if self.prompt_adapter_config is not None:
4414
+ self.prompt_adapter_config.verify_with_model_config(
4415
+ self.model_config)
4416
+
4417
+ if self.quant_config is None and self.model_config is not None:
4418
+ self.quant_config = VllmConfig._get_quantization_config(
4419
+ self.model_config, self.load_config)
4420
+
4421
+ from vllm.platforms import current_platform
4422
+ if self.model_config is not None and \
4423
+ self.scheduler_config.chunked_prefill_enabled and \
4424
+ self.model_config.dtype == torch.float32 and \
4425
+ current_platform.get_device_capability() == (7, 5):
4426
+ logger.warning_once(
4427
+ "Turing devices tensor cores do not support float32 matmul. "
4428
+ "To workaround this limitation, vLLM will set 'ieee' input "
4429
+ "precision for chunked prefill triton kernels.")
4430
+
4431
+ # async tp is built on top of sequence parallelism
4432
+ # and requires it to be enabled.
4433
+ if self.compilation_config.pass_config.enable_async_tp:
4434
+ self.compilation_config.pass_config.enable_sequence_parallelism = \
4435
+ True
4436
+ if self.compilation_config.pass_config.enable_sequence_parallelism:
4437
+ self.compilation_config.custom_ops.append("+rms_norm")
4438
+ if envs.VLLM_USE_V1 and self.model_config is not None and \
4439
+ not self.model_config.enforce_eager:
4440
+ # FIXME(rob): Add function to set all of these.
4441
+ if not self.compilation_config.custom_ops:
4442
+ self.compilation_config.custom_ops = ["none"]
4443
+ self.compilation_config.cudagraph_num_of_warmups = 1
4444
+ self.compilation_config.pass_config.enable_fusion = False
4445
+ self.compilation_config.pass_config.enable_noop = False
4446
+ self.compilation_config.level = CompilationLevel.PIECEWISE
4447
+ self.compilation_config.set_splitting_ops_for_v1()
4448
+
4449
+ self._set_cudagraph_sizes()
4450
+
4451
+ if self.cache_config.cpu_offload_gb > 0 and \
4452
+ self.compilation_config.level != CompilationLevel.NO_COMPILATION \
4453
+ and not envs.VLLM_USE_V1:
4454
+ logger.warning(
4455
+ "CPU offload is not supported with `torch.compile` in v0 yet."
4456
+ " Disabling `torch.compile`.")
4457
+ self.compilation_config.level = CompilationLevel.NO_COMPILATION
4458
+
4459
+ if ((not envs.VLLM_USE_V1) and self.lora_config is not None
4460
+ and self.compilation_config.level
4461
+ != CompilationLevel.NO_COMPILATION):
4462
+ logger.warning(
4463
+ "LoRA for V0 is not supported with `torch.compile` yet. "
4464
+ "Disabling `torch.compile`.")
4465
+ self.compilation_config.level = CompilationLevel.NO_COMPILATION
4466
+
4467
+ if self.compilation_config.full_cuda_graph and \
4468
+ not self.model_config.disable_cascade_attn:
4469
+ logger.warning_once(
4470
+ "full_cuda_graph is not supported with "
4471
+ "cascade attention. Disabling cascade attention.")
4472
+ self.model_config.disable_cascade_attn = True
4473
+ self.cache_config.enable_prefix_caching = False
4474
+
4475
+ if (self.kv_events_config is not None
4476
+ and self.kv_events_config.enable_kv_cache_events
4477
+ and not self.cache_config.enable_prefix_caching):
4478
+ logger.warning(
4479
+ "KV cache events are on, but prefix caching is not enabled."
4480
+ "Use --enable-prefix-caching to enable.")
4481
+ if (self.kv_events_config is not None
4482
+ and self.kv_events_config.publisher != "null"
4483
+ and not self.kv_events_config.enable_kv_cache_events):
4484
+ logger.warning("KV cache events are disabled,"
4485
+ "but the scheduler is configured to publish them."
4486
+ "Modify KVEventsConfig.enable_kv_cache_events"
4487
+ "to True to enable.")
4488
+ current_platform.check_and_update_config(self)
4489
+
4490
+ if not self.instance_id:
4491
+ self.instance_id = random_uuid()[:5]
4492
+
4493
+ if (envs.VLLM_USE_V1
4494
+ and not self.scheduler_config.disable_hybrid_kv_cache_manager):
4495
+ # logger should only print warning message for hybrid models. As we
4496
+ # can't know whether the model is hybrid or not now, so we don't log
4497
+ # warning message here and will log it later.
4498
+ if not (current_platform.is_cuda() or current_platform.is_rocm()):
4499
+ # Hybrid KV cache manager is not supported on non-GPU platforms.
4500
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
4501
+ if self.kv_transfer_config is not None:
4502
+ # Hybrid KV cache manager is not compatible with KV transfer.
4503
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
4504
+ if self.kv_events_config is not None:
4505
+ # Hybrid KV cache manager is not compatible with KV events.
4506
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
4507
+
4508
+ def update_sizes_for_sequence_parallelism(self,
4509
+ possible_sizes: list) -> list:
4510
+ # remove the sizes that not multiple of tp_size when
4511
+ # enable sequence parallelism
4512
+ removed_sizes = [
4513
+ size for size in possible_sizes
4514
+ if size % self.parallel_config.tensor_parallel_size != 0
4515
+ ]
4516
+ if removed_sizes:
4517
+ logger.warning(
4518
+ "Batch sizes %s are removed because they are not "
4519
+ "multiple of tp_size %d when "
4520
+ "sequence parallelism is enabled", removed_sizes,
4521
+ self.parallel_config.tensor_parallel_size)
4522
+
4523
+ return [
4524
+ size for size in possible_sizes
4525
+ if size % self.parallel_config.tensor_parallel_size == 0
4526
+ ]
4527
+
4528
+ def _set_cudagraph_sizes(self):
4529
+ """
4530
+ cudagraph batchsize padding logic:
4531
+
4532
+ `[1, 2, 4] + [8 * i for i in range(1, 1025)]` is a list of all possible
4533
+ batch sizes that cudagraph will capture.
4534
+
4535
+ Depending on the engine's configuration of `max_num_seqs`, the
4536
+ candidate batch sizes to capture cudagraph will shrink to the subset
4537
+ which just cover the range of `[1, max_num_seqs]`. In the common case,
4538
+ `max_num_seqs` is 256, and the cudagraph batch sizes will be
4539
+ `[1, 2, 4, 8, 16, 24, 32, 40, ..., 256]`.
4540
+
4541
+ However, if users specify the cudagraph capture sizes through
4542
+ compilation config, we will use the specified sizes instead.
4543
+
4544
+ In the end, `vllm_config.compilation_config.cudagraph_capture_sizes`
4545
+ will be the final sizes to capture cudagraph (in descending order).
4546
+
4547
+ During runtime, if batchsize is larger than
4548
+ `vllm_config.compilation_config.cudagraph_capture_sizes`,
4549
+ no cudagraph will be used.
4550
+ If the batch size is no larger than
4551
+ `vllm_config.compilation_config.cudagraph_capture_sizes`,
4552
+ we can quickly find the padded graph size for a given batch size by
4553
+ looking up `vllm_config.compilation_config.bs_to_padded_graph_size`.
4554
+ """
4555
+
4556
+ # calculate the default `batch_size_capture_list`
4557
+ if not envs.VLLM_USE_V1:
4558
+ batch_size_capture_list = []
4559
+ max_batchsize_to_capture = 0
4560
+ if self.scheduler_config is not None and \
4561
+ self.model_config is not None and \
4562
+ not self.model_config.enforce_eager:
4563
+
4564
+ possible_sizes = [1, 2, 4] + [8 * i for i in range(1, 1025)]
4565
+ if self.parallel_config.tensor_parallel_size > 1 and \
4566
+ self.compilation_config.pass_config.enable_sequence_parallelism:
4567
+ possible_sizes = self.update_sizes_for_sequence_parallelism(
4568
+ possible_sizes)
4569
+
4570
+ # find the minimum size that is larger than max_num_seqs,
4571
+ # which then becomes the max_batchsize_to_capture
4572
+ larger_sizes = [
4573
+ x for x in possible_sizes
4574
+ if x >= self.scheduler_config.max_num_seqs
4575
+ ]
4576
+ if larger_sizes:
4577
+ max_batchsize_to_capture = larger_sizes[0]
4578
+ else:
4579
+ max_batchsize_to_capture = possible_sizes[-1]
4580
+
4581
+ # filter out the sizes that are
4582
+ # larger than max_batchsize_to_capture
4583
+ batch_size_capture_list = [
4584
+ size for size in possible_sizes
4585
+ if size <= max_batchsize_to_capture
4586
+ ]
4587
+ else:
4588
+ batch_size_capture_list = []
4589
+ if self.model_config is not None and \
4590
+ not self.model_config.enforce_eager:
4591
+ cuda_graph_sizes = self.scheduler_config.cuda_graph_sizes
4592
+ if len(cuda_graph_sizes) == 1:
4593
+ batch_size_capture_list = [1, 2, 4] + [
4594
+ i for i in range(8, cuda_graph_sizes[0] + 1, 8)
4595
+ ]
4596
+ elif len(cuda_graph_sizes) > 1:
4597
+ batch_size_capture_list = sorted(cuda_graph_sizes)
4598
+ else:
4599
+ raise TypeError(f"Invalid value for {cuda_graph_sizes=}.")
4600
+ if self.parallel_config.tensor_parallel_size > 1 and \
4601
+ self.compilation_config.pass_config.enable_sequence_parallelism:
4602
+ batch_size_capture_list = \
4603
+ self.update_sizes_for_sequence_parallelism(batch_size_capture_list)
4604
+ max_num_tokens = self.scheduler_config.max_num_batched_tokens
4605
+ batch_size_capture_list = [
4606
+ size for size in batch_size_capture_list
4607
+ if size <= max_num_tokens
4608
+ ]
4609
+
4610
+ self.compilation_config.init_with_cudagraph_sizes(
4611
+ batch_size_capture_list)
4612
+
4613
+ def recalculate_max_model_len(self, max_model_len: int):
4614
+ model_config = self.model_config
4615
+ max_model_len = model_config.get_and_verify_max_len(max_model_len)
4616
+ self.model_config.max_model_len = max_model_len
4617
+ self.scheduler_config.max_model_len = max_model_len
4618
+ self.compute_hash()
4619
+
4620
+ def __str__(self):
4621
+ return (
4622
+ f"model={self.model_config.model!r},"
4623
+ f" speculative_config={self.speculative_config!r},"
4624
+ f" tokenizer={self.model_config.tokenizer!r}, "
4625
+ f"skip_tokenizer_init={self.model_config.skip_tokenizer_init},"
4626
+ f" tokenizer_mode={self.model_config.tokenizer_mode}, "
4627
+ f"revision={self.model_config.revision}, "
4628
+ f"override_neuron_config={self.model_config.override_neuron_config},"
4629
+ f" tokenizer_revision={self.model_config.tokenizer_revision}, "
4630
+ f"trust_remote_code={self.model_config.trust_remote_code}, "
4631
+ f"dtype={self.model_config.dtype}, "
4632
+ f"max_seq_len={self.model_config.max_model_len},"
4633
+ f" download_dir={self.load_config.download_dir!r}, "
4634
+ f"load_format={self.load_config.load_format}, "
4635
+ f"tensor_parallel_size={self.parallel_config.tensor_parallel_size},"
4636
+ f" pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, " # noqa
4637
+ f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, " # noqa
4638
+ f"quantization={self.model_config.quantization}, "
4639
+ f"enforce_eager={self.model_config.enforce_eager}, "
4640
+ f"kv_cache_dtype={self.cache_config.cache_dtype}, "
4641
+ f" device_config={self.device_config.device}, "
4642
+ f"decoding_config={self.decoding_config!r}, "
4643
+ f"observability_config={self.observability_config!r}, "
4644
+ f"seed={self.model_config.seed}, "
4645
+ f"served_model_name={self.model_config.served_model_name}, "
4646
+ f"num_scheduler_steps={self.scheduler_config.num_scheduler_steps}, "
4647
+ f"multi_step_stream_outputs={self.scheduler_config.multi_step_stream_outputs}, " # noqa
4648
+ f"enable_prefix_caching={self.cache_config.enable_prefix_caching}, "
4649
+ f"chunked_prefill_enabled={self.scheduler_config.chunked_prefill_enabled}, " # noqa
4650
+ f"use_async_output_proc={self.model_config.use_async_output_proc}, "
4651
+ f"pooler_config={self.model_config.pooler_config!r}, "
4652
+ f"compilation_config={self.compilation_config!r}")
4653
+
4654
+
4655
+ _current_vllm_config: Optional[VllmConfig] = None
4656
+
4657
+
4658
+ @contextmanager
4659
+ def set_current_vllm_config(vllm_config: VllmConfig, check_compile=False):
4660
+ """
4661
+ Temporarily set the current vLLM config.
4662
+ Used during model initialization.
4663
+ We save the current vLLM config in a global variable,
4664
+ so that all modules can access it, e.g. custom ops
4665
+ can access the vLLM config to determine how to dispatch.
4666
+ """
4667
+ global _current_vllm_config
4668
+ old_vllm_config = _current_vllm_config
4669
+ from vllm.compilation.counter import compilation_counter
4670
+ num_models_seen = compilation_counter.num_models_seen
4671
+ try:
4672
+ _current_vllm_config = vllm_config
4673
+ yield
4674
+ except Exception:
4675
+ raise
4676
+ else:
4677
+ logger.debug("enabled custom ops: %s",
4678
+ vllm_config.compilation_config.enabled_custom_ops)
4679
+ logger.debug("disabled custom ops: %s",
4680
+ vllm_config.compilation_config.disabled_custom_ops)
4681
+ if check_compile and \
4682
+ vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \
4683
+ and compilation_counter.num_models_seen == num_models_seen:
4684
+ # If the model supports compilation,
4685
+ # compilation_counter.num_models_seen should be increased
4686
+ # by at least 1.
4687
+ # If it is not increased, it means the model does not support
4688
+ # compilation (does not have @support_torch_compile decorator).
4689
+ logger.warning(
4690
+ "`torch.compile` is turned on, but the model %s"
4691
+ " does not support it. Please open an issue on GitHub"
4692
+ " if you want it to be supported.",
4693
+ vllm_config.model_config.model)
4694
+ finally:
4695
+ _current_vllm_config = old_vllm_config
4696
+
4697
+
4698
+ def get_current_vllm_config() -> VllmConfig:
4699
+ if _current_vllm_config is None:
4700
+ # in ci, usually when we test custom ops/modules directly,
4701
+ # we don't set the vllm config. In that case, we set a default
4702
+ # config.
4703
+ logger.warning("Current vLLM config is not set.")
4704
+ from vllm.config import VllmConfig
4705
+ return VllmConfig()
4706
+ return _current_vllm_config
4707
+
4708
+
4709
+ def contains_object_print(text):
4710
+ """
4711
+ Check if the text looks like a printed Python object, e.g.
4712
+ contains any substring matching the pattern: "at 0xFFFFFFF>"
4713
+ We match against 0x followed by 2-16 hex chars (there's
4714
+ a max of 16 on a 64 bit system).
4715
+
4716
+ Args:
4717
+ text (str): The text to check
4718
+
4719
+ Returns:
4720
+ result (bool): `True` if a match is found, `False` otherwise.
4721
+ """
4722
+ pattern = r'at 0x[a-fA-F0-9]{2,16}>'
4723
+ match = re.search(pattern, text)
4724
+ return match is not None
4725
+
4726
+
4727
+ def assert_hashable(text):
4728
+ if not contains_object_print(text):
4729
+ return True
4730
+ raise AssertionError(
4731
+ f"vLLM tried to hash some configs that may have Python objects ids "
4732
+ f"in them. This is a bug, please file an issue. "
4733
+ f"Text being hashed: {text}")
4734
+
4735
+
4736
+ T = TypeVar("T")
4737
+
4738
+
4739
+ def get_layers_from_vllm_config(vllm_config: VllmConfig,
4740
+ layer_type: type[T]) -> dict[str, T]:
4741
+ return {
4742
+ layer_name: layer
4743
+ for layer_name, layer in
4744
+ vllm_config.compilation_config.static_forward_context.items()
4745
+ if isinstance(layer, layer_type)
4746
+ }