vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu might be problematic. Click here for more details.

Files changed (1103) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1536 -0
  4. vllm/_ipex_ops.py +241 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +38 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +31 -0
  16. vllm/assets/video.py +103 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +303 -0
  22. vllm/attention/backends/flash_attn.py +999 -0
  23. vllm/attention/backends/flashinfer.py +1092 -0
  24. vllm/attention/backends/flashmla.py +242 -0
  25. vllm/attention/backends/hpu_attn.py +301 -0
  26. vllm/attention/backends/ipex_attn.py +396 -0
  27. vllm/attention/backends/mla/__init__.py +0 -0
  28. vllm/attention/backends/mla/common.py +1444 -0
  29. vllm/attention/backends/pallas.py +346 -0
  30. vllm/attention/backends/placeholder_attn.py +399 -0
  31. vllm/attention/backends/rocm_aiter_mla.py +412 -0
  32. vllm/attention/backends/rocm_flash_attn.py +969 -0
  33. vllm/attention/backends/torch_sdpa.py +691 -0
  34. vllm/attention/backends/triton_mla.py +113 -0
  35. vllm/attention/backends/utils.py +609 -0
  36. vllm/attention/backends/xformers.py +798 -0
  37. vllm/attention/layer.py +443 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  41. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  42. vllm/attention/ops/blocksparse_attention/utils.py +244 -0
  43. vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
  44. vllm/attention/ops/flashmla.py +115 -0
  45. vllm/attention/ops/hpu_paged_attn.py +105 -0
  46. vllm/attention/ops/ipex_attn.py +193 -0
  47. vllm/attention/ops/merge_attn_states.py +42 -0
  48. vllm/attention/ops/nki_flash_attn.py +905 -0
  49. vllm/attention/ops/paged_attn.py +255 -0
  50. vllm/attention/ops/prefix_prefill.py +902 -0
  51. vllm/attention/ops/rocm_aiter_mla.py +42 -0
  52. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  53. vllm/attention/ops/triton_decode_attention.py +675 -0
  54. vllm/attention/ops/triton_flash_attention.py +1375 -0
  55. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  56. vllm/attention/selector.py +186 -0
  57. vllm/attention/utils/fa_utils.py +54 -0
  58. vllm/beam_search.py +82 -0
  59. vllm/benchmarks/__init__.py +0 -0
  60. vllm/benchmarks/datasets.py +831 -0
  61. vllm/benchmarks/endpoint_request_func.py +160 -0
  62. vllm/benchmarks/latency.py +181 -0
  63. vllm/benchmarks/serve.py +925 -0
  64. vllm/benchmarks/throughput.py +608 -0
  65. vllm/benchmarks/utils.py +69 -0
  66. vllm/collect_env.py +795 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/backends.py +715 -0
  69. vllm/compilation/compiler_interface.py +437 -0
  70. vllm/compilation/counter.py +33 -0
  71. vllm/compilation/decorators.py +249 -0
  72. vllm/compilation/fix_functionalization.py +182 -0
  73. vllm/compilation/fusion.py +617 -0
  74. vllm/compilation/fx_utils.py +60 -0
  75. vllm/compilation/inductor_pass.py +114 -0
  76. vllm/compilation/monitor.py +38 -0
  77. vllm/compilation/multi_output_match.py +108 -0
  78. vllm/compilation/noop_elimination.py +135 -0
  79. vllm/compilation/pass_manager.py +74 -0
  80. vllm/compilation/sequence_parallelism.py +266 -0
  81. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  82. vllm/compilation/vllm_inductor_pass.py +68 -0
  83. vllm/compilation/wrapper.py +129 -0
  84. vllm/config.py +4179 -0
  85. vllm/connections.py +170 -0
  86. vllm/core/__init__.py +0 -0
  87. vllm/core/block/__init__.py +0 -0
  88. vllm/core/block/block_table.py +398 -0
  89. vllm/core/block/common.py +370 -0
  90. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  91. vllm/core/block/interfaces.py +318 -0
  92. vllm/core/block/naive_block.py +465 -0
  93. vllm/core/block/prefix_caching_block.py +1134 -0
  94. vllm/core/block/utils.py +27 -0
  95. vllm/core/block_manager.py +520 -0
  96. vllm/core/evictor.py +156 -0
  97. vllm/core/interfaces.py +134 -0
  98. vllm/core/placeholder_block_space_manager.py +99 -0
  99. vllm/core/scheduler.py +2060 -0
  100. vllm/device_allocator/__init__.py +0 -0
  101. vllm/device_allocator/cumem.py +280 -0
  102. vllm/distributed/__init__.py +5 -0
  103. vllm/distributed/communication_op.py +40 -0
  104. vllm/distributed/device_communicators/__init__.py +0 -0
  105. vllm/distributed/device_communicators/base_device_communicator.py +151 -0
  106. vllm/distributed/device_communicators/cpu_communicator.py +139 -0
  107. vllm/distributed/device_communicators/cuda_communicator.py +131 -0
  108. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  109. vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
  110. vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
  111. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  112. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  113. vllm/distributed/device_communicators/pynccl.py +217 -0
  114. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  115. vllm/distributed/device_communicators/shm_broadcast.py +557 -0
  116. vllm/distributed/device_communicators/tpu_communicator.py +93 -0
  117. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  118. vllm/distributed/kv_transfer/README.md +29 -0
  119. vllm/distributed/kv_transfer/__init__.py +11 -0
  120. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  121. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  122. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  123. vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
  124. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  125. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
  126. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  132. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  133. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  134. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  135. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  136. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  137. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  138. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  139. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  140. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  141. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  142. vllm/distributed/parallel_state.py +1209 -0
  143. vllm/distributed/utils.py +366 -0
  144. vllm/engine/__init__.py +0 -0
  145. vllm/engine/arg_utils.py +1724 -0
  146. vllm/engine/async_llm_engine.py +1261 -0
  147. vllm/engine/async_timeout.py +191 -0
  148. vllm/engine/llm_engine.py +2150 -0
  149. vllm/engine/metrics.py +717 -0
  150. vllm/engine/metrics_types.py +96 -0
  151. vllm/engine/multiprocessing/__init__.py +183 -0
  152. vllm/engine/multiprocessing/client.py +745 -0
  153. vllm/engine/multiprocessing/engine.py +450 -0
  154. vllm/engine/output_processor/__init__.py +0 -0
  155. vllm/engine/output_processor/interfaces.py +74 -0
  156. vllm/engine/output_processor/multi_step.py +210 -0
  157. vllm/engine/output_processor/single_step.py +136 -0
  158. vllm/engine/output_processor/stop_checker.py +130 -0
  159. vllm/engine/output_processor/util.py +27 -0
  160. vllm/engine/protocol.py +302 -0
  161. vllm/entrypoints/__init__.py +0 -0
  162. vllm/entrypoints/api_server.py +177 -0
  163. vllm/entrypoints/chat_utils.py +1259 -0
  164. vllm/entrypoints/cli/__init__.py +0 -0
  165. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  166. vllm/entrypoints/cli/benchmark/base.py +38 -0
  167. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  168. vllm/entrypoints/cli/benchmark/main.py +53 -0
  169. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  170. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  171. vllm/entrypoints/cli/collect_env.py +35 -0
  172. vllm/entrypoints/cli/main.py +59 -0
  173. vllm/entrypoints/cli/openai.py +175 -0
  174. vllm/entrypoints/cli/serve.py +59 -0
  175. vllm/entrypoints/cli/types.py +24 -0
  176. vllm/entrypoints/launcher.py +146 -0
  177. vllm/entrypoints/llm.py +1450 -0
  178. vllm/entrypoints/logger.py +44 -0
  179. vllm/entrypoints/openai/__init__.py +0 -0
  180. vllm/entrypoints/openai/api_server.py +1130 -0
  181. vllm/entrypoints/openai/cli_args.py +296 -0
  182. vllm/entrypoints/openai/logits_processors.py +89 -0
  183. vllm/entrypoints/openai/protocol.py +1806 -0
  184. vllm/entrypoints/openai/run_batch.py +439 -0
  185. vllm/entrypoints/openai/serving_chat.py +1210 -0
  186. vllm/entrypoints/openai/serving_completion.py +557 -0
  187. vllm/entrypoints/openai/serving_embedding.py +245 -0
  188. vllm/entrypoints/openai/serving_engine.py +569 -0
  189. vllm/entrypoints/openai/serving_models.py +314 -0
  190. vllm/entrypoints/openai/serving_pooling.py +237 -0
  191. vllm/entrypoints/openai/serving_score.py +439 -0
  192. vllm/entrypoints/openai/serving_tokenization.py +147 -0
  193. vllm/entrypoints/openai/serving_transcription.py +421 -0
  194. vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
  195. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  196. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
  197. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
  198. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  199. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
  200. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
  201. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
  202. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  203. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
  204. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
  205. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  206. vllm/entrypoints/score_utils.py +49 -0
  207. vllm/entrypoints/ssl.py +74 -0
  208. vllm/entrypoints/utils.py +136 -0
  209. vllm/env_override.py +34 -0
  210. vllm/envs.py +800 -0
  211. vllm/executor/__init__.py +0 -0
  212. vllm/executor/executor_base.py +400 -0
  213. vllm/executor/mp_distributed_executor.py +243 -0
  214. vllm/executor/msgspec_utils.py +29 -0
  215. vllm/executor/multiproc_worker_utils.py +312 -0
  216. vllm/executor/ray_distributed_executor.py +700 -0
  217. vllm/executor/ray_utils.py +400 -0
  218. vllm/executor/uniproc_executor.py +141 -0
  219. vllm/forward_context.py +159 -0
  220. vllm/inputs/__init__.py +37 -0
  221. vllm/inputs/data.py +248 -0
  222. vllm/inputs/parse.py +121 -0
  223. vllm/inputs/preprocess.py +745 -0
  224. vllm/inputs/registry.py +212 -0
  225. vllm/jsontree.py +79 -0
  226. vllm/logger.py +210 -0
  227. vllm/logging_utils/__init__.py +7 -0
  228. vllm/logging_utils/formatter.py +17 -0
  229. vllm/logits_process.py +121 -0
  230. vllm/lora/__init__.py +0 -0
  231. vllm/lora/fully_sharded_layers.py +335 -0
  232. vllm/lora/layers.py +1263 -0
  233. vllm/lora/lora.py +198 -0
  234. vllm/lora/models.py +802 -0
  235. vllm/lora/ops/__init__.py +0 -0
  236. vllm/lora/ops/torch_ops/__init__.py +15 -0
  237. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  238. vllm/lora/ops/triton_ops/__init__.py +11 -0
  239. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  240. vllm/lora/ops/triton_ops/lora_expand.py +293 -0
  241. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  242. vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
  243. vllm/lora/ops/triton_ops/utils.py +121 -0
  244. vllm/lora/peft_helper.py +115 -0
  245. vllm/lora/punica_wrapper/__init__.py +9 -0
  246. vllm/lora/punica_wrapper/punica_base.py +483 -0
  247. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  248. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  249. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  250. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  251. vllm/lora/punica_wrapper/utils.py +161 -0
  252. vllm/lora/request.py +97 -0
  253. vllm/lora/resolver.py +83 -0
  254. vllm/lora/utils.py +237 -0
  255. vllm/lora/worker_manager.py +251 -0
  256. vllm/model_executor/__init__.py +15 -0
  257. vllm/model_executor/custom_op.py +153 -0
  258. vllm/model_executor/guided_decoding/__init__.py +180 -0
  259. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  260. vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
  261. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  262. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  263. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  264. vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
  265. vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
  266. vllm/model_executor/guided_decoding/utils.py +241 -0
  267. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  268. vllm/model_executor/layers/__init__.py +0 -0
  269. vllm/model_executor/layers/activation.py +368 -0
  270. vllm/model_executor/layers/fused_moe/__init__.py +51 -0
  271. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  272. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  273. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  274. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  275. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  276. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  277. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  278. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  279. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  280. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  281. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  282. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  283. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  284. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  285. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  286. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  287. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  426. vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
  427. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
  428. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
  429. vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
  430. vllm/model_executor/layers/fused_moe/layer.py +949 -0
  431. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  432. vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
  433. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  434. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
  435. vllm/model_executor/layers/fused_moe/utils.py +48 -0
  436. vllm/model_executor/layers/layernorm.py +277 -0
  437. vllm/model_executor/layers/lightning_attn.py +651 -0
  438. vllm/model_executor/layers/linear.py +1518 -0
  439. vllm/model_executor/layers/logits_processor.py +196 -0
  440. vllm/model_executor/layers/mamba/__init__.py +0 -0
  441. vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
  442. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  443. vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
  444. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  445. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  446. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
  447. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  448. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  449. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  450. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  451. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  452. vllm/model_executor/layers/pooler.py +336 -0
  453. vllm/model_executor/layers/quantization/__init__.py +153 -0
  454. vllm/model_executor/layers/quantization/aqlm.py +374 -0
  455. vllm/model_executor/layers/quantization/awq.py +184 -0
  456. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  457. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  458. vllm/model_executor/layers/quantization/base_config.py +145 -0
  459. vllm/model_executor/layers/quantization/bitblas.py +459 -0
  460. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  461. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  462. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
  463. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
  464. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
  465. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  466. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  467. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  468. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
  469. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  470. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  471. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  472. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  473. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
  474. vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
  475. vllm/model_executor/layers/quantization/experts_int8.py +194 -0
  476. vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
  477. vllm/model_executor/layers/quantization/fp8.py +832 -0
  478. vllm/model_executor/layers/quantization/gguf.py +408 -0
  479. vllm/model_executor/layers/quantization/gptq.py +276 -0
  480. vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
  481. vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
  482. vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
  483. vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
  484. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  485. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  486. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  487. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  488. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  489. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  490. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  491. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  492. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
  493. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  494. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  495. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  496. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  497. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  498. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  499. vllm/model_executor/layers/quantization/kv_cache.py +137 -0
  500. vllm/model_executor/layers/quantization/marlin.py +259 -0
  501. vllm/model_executor/layers/quantization/modelopt.py +410 -0
  502. vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
  503. vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
  504. vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
  505. vllm/model_executor/layers/quantization/qqq.py +273 -0
  506. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  507. vllm/model_executor/layers/quantization/quark/quark.py +385 -0
  508. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  509. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
  510. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  511. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
  512. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  513. vllm/model_executor/layers/quantization/quark/utils.py +102 -0
  514. vllm/model_executor/layers/quantization/schema.py +85 -0
  515. vllm/model_executor/layers/quantization/torchao.py +127 -0
  516. vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
  517. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  518. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  519. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
  520. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  521. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  522. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  523. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  524. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  525. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  526. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  527. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  528. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  529. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  530. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  531. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  532. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  533. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  534. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  535. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  536. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  537. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  538. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  539. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  540. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  541. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  542. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  543. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  544. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  545. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  546. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  547. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  548. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  549. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  550. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  551. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
  723. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  724. vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
  725. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  726. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  727. vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
  728. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
  729. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  730. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  731. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
  732. vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
  733. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  734. vllm/model_executor/layers/rejection_sampler.py +400 -0
  735. vllm/model_executor/layers/resampler.py +269 -0
  736. vllm/model_executor/layers/rotary_embedding.py +1598 -0
  737. vllm/model_executor/layers/sampler.py +1221 -0
  738. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  739. vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
  740. vllm/model_executor/layers/utils.py +99 -0
  741. vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
  742. vllm/model_executor/model_loader/__init__.py +20 -0
  743. vllm/model_executor/model_loader/loader.py +1542 -0
  744. vllm/model_executor/model_loader/neuron.py +243 -0
  745. vllm/model_executor/model_loader/tensorizer.py +468 -0
  746. vllm/model_executor/model_loader/utils.py +171 -0
  747. vllm/model_executor/model_loader/weight_utils.py +749 -0
  748. vllm/model_executor/models/__init__.py +27 -0
  749. vllm/model_executor/models/adapters.py +247 -0
  750. vllm/model_executor/models/arctic.py +559 -0
  751. vllm/model_executor/models/aria.py +656 -0
  752. vllm/model_executor/models/aya_vision.py +461 -0
  753. vllm/model_executor/models/baichuan.py +469 -0
  754. vllm/model_executor/models/bamba.py +542 -0
  755. vllm/model_executor/models/bart.py +936 -0
  756. vllm/model_executor/models/bert.py +725 -0
  757. vllm/model_executor/models/blip.py +337 -0
  758. vllm/model_executor/models/blip2.py +717 -0
  759. vllm/model_executor/models/bloom.py +358 -0
  760. vllm/model_executor/models/chameleon.py +1135 -0
  761. vllm/model_executor/models/chatglm.py +476 -0
  762. vllm/model_executor/models/clip.py +410 -0
  763. vllm/model_executor/models/commandr.py +466 -0
  764. vllm/model_executor/models/constant_size_cache.py +136 -0
  765. vllm/model_executor/models/dbrx.py +469 -0
  766. vllm/model_executor/models/deepseek.py +484 -0
  767. vllm/model_executor/models/deepseek_mtp.py +266 -0
  768. vllm/model_executor/models/deepseek_v2.py +830 -0
  769. vllm/model_executor/models/deepseek_vl2.py +647 -0
  770. vllm/model_executor/models/eagle.py +247 -0
  771. vllm/model_executor/models/exaone.py +548 -0
  772. vllm/model_executor/models/fairseq2_llama.py +153 -0
  773. vllm/model_executor/models/falcon.py +508 -0
  774. vllm/model_executor/models/florence2.py +1102 -0
  775. vllm/model_executor/models/fuyu.py +388 -0
  776. vllm/model_executor/models/gemma.py +423 -0
  777. vllm/model_executor/models/gemma2.py +423 -0
  778. vllm/model_executor/models/gemma3.py +531 -0
  779. vllm/model_executor/models/gemma3_mm.py +716 -0
  780. vllm/model_executor/models/glm.py +22 -0
  781. vllm/model_executor/models/glm4.py +303 -0
  782. vllm/model_executor/models/glm4v.py +647 -0
  783. vllm/model_executor/models/gpt2.py +313 -0
  784. vllm/model_executor/models/gpt_bigcode.py +336 -0
  785. vllm/model_executor/models/gpt_j.py +337 -0
  786. vllm/model_executor/models/gpt_neox.py +330 -0
  787. vllm/model_executor/models/granite.py +494 -0
  788. vllm/model_executor/models/granite_speech.py +777 -0
  789. vllm/model_executor/models/granitemoe.py +435 -0
  790. vllm/model_executor/models/granitemoeshared.py +339 -0
  791. vllm/model_executor/models/gritlm.py +245 -0
  792. vllm/model_executor/models/grok1.py +560 -0
  793. vllm/model_executor/models/h2ovl.py +542 -0
  794. vllm/model_executor/models/idefics2_vision_model.py +387 -0
  795. vllm/model_executor/models/idefics3.py +767 -0
  796. vllm/model_executor/models/interfaces.py +569 -0
  797. vllm/model_executor/models/interfaces_base.py +163 -0
  798. vllm/model_executor/models/intern_vit.py +476 -0
  799. vllm/model_executor/models/internlm2.py +453 -0
  800. vllm/model_executor/models/internlm2_ve.py +146 -0
  801. vllm/model_executor/models/internvl.py +945 -0
  802. vllm/model_executor/models/jais.py +371 -0
  803. vllm/model_executor/models/jamba.py +590 -0
  804. vllm/model_executor/models/kimi_vl.py +577 -0
  805. vllm/model_executor/models/llama.py +619 -0
  806. vllm/model_executor/models/llama4.py +530 -0
  807. vllm/model_executor/models/llama_eagle.py +152 -0
  808. vllm/model_executor/models/llama_eagle3.py +232 -0
  809. vllm/model_executor/models/llava.py +869 -0
  810. vllm/model_executor/models/llava_next.py +582 -0
  811. vllm/model_executor/models/llava_next_video.py +470 -0
  812. vllm/model_executor/models/llava_onevision.py +954 -0
  813. vllm/model_executor/models/mamba.py +271 -0
  814. vllm/model_executor/models/mamba2.py +302 -0
  815. vllm/model_executor/models/mamba_cache.py +76 -0
  816. vllm/model_executor/models/medusa.py +210 -0
  817. vllm/model_executor/models/minicpm.py +592 -0
  818. vllm/model_executor/models/minicpm3.py +229 -0
  819. vllm/model_executor/models/minicpmo.py +725 -0
  820. vllm/model_executor/models/minicpmv.py +1287 -0
  821. vllm/model_executor/models/minimax_cache.py +35 -0
  822. vllm/model_executor/models/minimax_text_01.py +1261 -0
  823. vllm/model_executor/models/mistral3.py +598 -0
  824. vllm/model_executor/models/mixtral.py +485 -0
  825. vllm/model_executor/models/mixtral_quant.py +447 -0
  826. vllm/model_executor/models/mllama.py +1623 -0
  827. vllm/model_executor/models/mllama4.py +838 -0
  828. vllm/model_executor/models/mlp_speculator.py +205 -0
  829. vllm/model_executor/models/modernbert.py +325 -0
  830. vllm/model_executor/models/module_mapping.py +71 -0
  831. vllm/model_executor/models/molmo.py +1567 -0
  832. vllm/model_executor/models/moonvit.py +628 -0
  833. vllm/model_executor/models/mpt.py +329 -0
  834. vllm/model_executor/models/nemotron.py +506 -0
  835. vllm/model_executor/models/nemotron_nas.py +446 -0
  836. vllm/model_executor/models/nvlm_d.py +212 -0
  837. vllm/model_executor/models/olmo.py +390 -0
  838. vllm/model_executor/models/olmo2.py +412 -0
  839. vllm/model_executor/models/olmoe.py +449 -0
  840. vllm/model_executor/models/opt.py +410 -0
  841. vllm/model_executor/models/orion.py +356 -0
  842. vllm/model_executor/models/paligemma.py +397 -0
  843. vllm/model_executor/models/persimmon.py +342 -0
  844. vllm/model_executor/models/phi.py +354 -0
  845. vllm/model_executor/models/phi3.py +18 -0
  846. vllm/model_executor/models/phi3_small.py +463 -0
  847. vllm/model_executor/models/phi3v.py +722 -0
  848. vllm/model_executor/models/phi4mm.py +1263 -0
  849. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  850. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  851. vllm/model_executor/models/phimoe.py +666 -0
  852. vllm/model_executor/models/pixtral.py +1281 -0
  853. vllm/model_executor/models/plamo2.py +736 -0
  854. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  855. vllm/model_executor/models/qwen.py +360 -0
  856. vllm/model_executor/models/qwen2.py +552 -0
  857. vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
  858. vllm/model_executor/models/qwen2_5_vl.py +1136 -0
  859. vllm/model_executor/models/qwen2_audio.py +402 -0
  860. vllm/model_executor/models/qwen2_moe.py +531 -0
  861. vllm/model_executor/models/qwen2_rm.py +130 -0
  862. vllm/model_executor/models/qwen2_vl.py +1409 -0
  863. vllm/model_executor/models/qwen3.py +319 -0
  864. vllm/model_executor/models/qwen3_moe.py +528 -0
  865. vllm/model_executor/models/qwen_vl.py +784 -0
  866. vllm/model_executor/models/registry.py +611 -0
  867. vllm/model_executor/models/roberta.py +332 -0
  868. vllm/model_executor/models/siglip.py +522 -0
  869. vllm/model_executor/models/skyworkr1v.py +949 -0
  870. vllm/model_executor/models/smolvlm.py +51 -0
  871. vllm/model_executor/models/solar.py +504 -0
  872. vllm/model_executor/models/stablelm.py +349 -0
  873. vllm/model_executor/models/starcoder2.py +355 -0
  874. vllm/model_executor/models/telechat2.py +139 -0
  875. vllm/model_executor/models/teleflm.py +78 -0
  876. vllm/model_executor/models/transformers.py +442 -0
  877. vllm/model_executor/models/ultravox.py +655 -0
  878. vllm/model_executor/models/utils.py +714 -0
  879. vllm/model_executor/models/vision.py +149 -0
  880. vllm/model_executor/models/whisper.py +746 -0
  881. vllm/model_executor/models/zamba2.py +1008 -0
  882. vllm/model_executor/parameter.py +458 -0
  883. vllm/model_executor/pooling_metadata.py +71 -0
  884. vllm/model_executor/sampling_metadata.py +596 -0
  885. vllm/model_executor/utils.py +53 -0
  886. vllm/multimodal/__init__.py +31 -0
  887. vllm/multimodal/audio.py +105 -0
  888. vllm/multimodal/base.py +218 -0
  889. vllm/multimodal/hasher.py +103 -0
  890. vllm/multimodal/image.py +77 -0
  891. vllm/multimodal/inputs.py +843 -0
  892. vllm/multimodal/parse.py +454 -0
  893. vllm/multimodal/processing.py +1760 -0
  894. vllm/multimodal/profiling.py +274 -0
  895. vllm/multimodal/registry.py +321 -0
  896. vllm/multimodal/utils.py +386 -0
  897. vllm/multimodal/video.py +166 -0
  898. vllm/outputs.py +521 -0
  899. vllm/platforms/__init__.py +286 -0
  900. vllm/platforms/cpu.py +182 -0
  901. vllm/platforms/cuda.py +463 -0
  902. vllm/platforms/hpu.py +94 -0
  903. vllm/platforms/interface.py +427 -0
  904. vllm/platforms/neuron.py +69 -0
  905. vllm/platforms/rocm.py +346 -0
  906. vllm/platforms/tpu.py +174 -0
  907. vllm/platforms/xpu.py +142 -0
  908. vllm/plugins/__init__.py +82 -0
  909. vllm/pooling_params.py +53 -0
  910. vllm/profiler/__init__.py +7 -0
  911. vllm/profiler/layerwise_profile.py +374 -0
  912. vllm/profiler/utils.py +147 -0
  913. vllm/prompt_adapter/__init__.py +0 -0
  914. vllm/prompt_adapter/layers.py +82 -0
  915. vllm/prompt_adapter/models.py +357 -0
  916. vllm/prompt_adapter/request.py +36 -0
  917. vllm/prompt_adapter/utils.py +97 -0
  918. vllm/prompt_adapter/worker_manager.py +178 -0
  919. vllm/py.typed +2 -0
  920. vllm/reasoning/__init__.py +12 -0
  921. vllm/reasoning/abs_reasoning_parsers.py +189 -0
  922. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  923. vllm/reasoning/granite_reasoning_parser.py +362 -0
  924. vllm/sampling_params.py +598 -0
  925. vllm/scalar_type.py +335 -0
  926. vllm/scripts.py +14 -0
  927. vllm/sequence.py +1486 -0
  928. vllm/spec_decode/__init__.py +0 -0
  929. vllm/spec_decode/batch_expansion.py +505 -0
  930. vllm/spec_decode/draft_model_runner.py +335 -0
  931. vllm/spec_decode/interfaces.py +98 -0
  932. vllm/spec_decode/medusa_worker.py +137 -0
  933. vllm/spec_decode/metrics.py +212 -0
  934. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  935. vllm/spec_decode/mqa_scorer.py +159 -0
  936. vllm/spec_decode/multi_step_worker.py +416 -0
  937. vllm/spec_decode/ngram_worker.py +195 -0
  938. vllm/spec_decode/proposer_worker_base.py +58 -0
  939. vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
  940. vllm/spec_decode/spec_decode_worker.py +1324 -0
  941. vllm/spec_decode/target_model_runner.py +44 -0
  942. vllm/spec_decode/top1_proposer.py +274 -0
  943. vllm/spec_decode/util.py +276 -0
  944. vllm/test_utils.py +129 -0
  945. vllm/third_party/__init__.py +0 -0
  946. vllm/third_party/pynvml.py +6139 -0
  947. vllm/tracing.py +130 -0
  948. vllm/transformers_utils/__init__.py +19 -0
  949. vllm/transformers_utils/config.py +813 -0
  950. vllm/transformers_utils/configs/__init__.py +52 -0
  951. vllm/transformers_utils/configs/arctic.py +206 -0
  952. vllm/transformers_utils/configs/chatglm.py +71 -0
  953. vllm/transformers_utils/configs/cohere2.py +194 -0
  954. vllm/transformers_utils/configs/dbrx.py +280 -0
  955. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  956. vllm/transformers_utils/configs/eagle.py +65 -0
  957. vllm/transformers_utils/configs/exaone.py +191 -0
  958. vllm/transformers_utils/configs/falcon.py +89 -0
  959. vllm/transformers_utils/configs/h2ovl.py +15 -0
  960. vllm/transformers_utils/configs/internvl.py +53 -0
  961. vllm/transformers_utils/configs/jais.py +237 -0
  962. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  963. vllm/transformers_utils/configs/medusa.py +62 -0
  964. vllm/transformers_utils/configs/mllama.py +30 -0
  965. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  966. vllm/transformers_utils/configs/moonvit.py +32 -0
  967. vllm/transformers_utils/configs/mpt.py +179 -0
  968. vllm/transformers_utils/configs/nemotron.py +204 -0
  969. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  970. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  971. vllm/transformers_utils/configs/solar.py +246 -0
  972. vllm/transformers_utils/configs/telechat2.py +63 -0
  973. vllm/transformers_utils/configs/ultravox.py +107 -0
  974. vllm/transformers_utils/detokenizer.py +167 -0
  975. vllm/transformers_utils/detokenizer_utils.py +188 -0
  976. vllm/transformers_utils/processor.py +210 -0
  977. vllm/transformers_utils/processors/__init__.py +6 -0
  978. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  979. vllm/transformers_utils/s3_utils.py +161 -0
  980. vllm/transformers_utils/tokenizer.py +291 -0
  981. vllm/transformers_utils/tokenizer_base.py +146 -0
  982. vllm/transformers_utils/tokenizer_group.py +110 -0
  983. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  984. vllm/transformers_utils/tokenizers/mistral.py +483 -0
  985. vllm/transformers_utils/utils.py +98 -0
  986. vllm/triton_utils/__init__.py +5 -0
  987. vllm/triton_utils/importing.py +53 -0
  988. vllm/usage/__init__.py +0 -0
  989. vllm/usage/usage_lib.py +255 -0
  990. vllm/utils.py +2692 -0
  991. vllm/v1/__init__.py +0 -0
  992. vllm/v1/attention/__init__.py +0 -0
  993. vllm/v1/attention/backends/__init__.py +0 -0
  994. vllm/v1/attention/backends/flash_attn.py +783 -0
  995. vllm/v1/attention/backends/flashinfer.py +638 -0
  996. vllm/v1/attention/backends/mla/__init__.py +0 -0
  997. vllm/v1/attention/backends/mla/common.py +974 -0
  998. vllm/v1/attention/backends/mla/flashmla.py +149 -0
  999. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1000. vllm/v1/attention/backends/pallas.py +221 -0
  1001. vllm/v1/attention/backends/triton_attn.py +198 -0
  1002. vllm/v1/core/__init__.py +0 -0
  1003. vllm/v1/core/block_pool.py +281 -0
  1004. vllm/v1/core/encoder_cache_manager.py +149 -0
  1005. vllm/v1/core/kv_cache_manager.py +385 -0
  1006. vllm/v1/core/kv_cache_utils.py +744 -0
  1007. vllm/v1/core/sched/__init__.py +0 -0
  1008. vllm/v1/core/sched/interface.py +134 -0
  1009. vllm/v1/core/sched/output.py +126 -0
  1010. vllm/v1/core/sched/scheduler.py +838 -0
  1011. vllm/v1/core/sched/utils.py +22 -0
  1012. vllm/v1/core/specialized_manager.py +161 -0
  1013. vllm/v1/engine/__init__.py +166 -0
  1014. vllm/v1/engine/async_llm.py +532 -0
  1015. vllm/v1/engine/core.py +701 -0
  1016. vllm/v1/engine/core_client.py +942 -0
  1017. vllm/v1/engine/detokenizer.py +260 -0
  1018. vllm/v1/engine/exceptions.py +16 -0
  1019. vllm/v1/engine/llm_engine.py +285 -0
  1020. vllm/v1/engine/logprobs.py +198 -0
  1021. vllm/v1/engine/mm_input_cache.py +82 -0
  1022. vllm/v1/engine/output_processor.py +420 -0
  1023. vllm/v1/engine/parallel_sampling.py +132 -0
  1024. vllm/v1/engine/processor.py +387 -0
  1025. vllm/v1/executor/__init__.py +0 -0
  1026. vllm/v1/executor/abstract.py +112 -0
  1027. vllm/v1/executor/multiproc_executor.py +480 -0
  1028. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1029. vllm/v1/kv_cache_interface.py +166 -0
  1030. vllm/v1/metrics/__init__.py +0 -0
  1031. vllm/v1/metrics/loggers.py +498 -0
  1032. vllm/v1/metrics/stats.py +238 -0
  1033. vllm/v1/outputs.py +111 -0
  1034. vllm/v1/request.py +178 -0
  1035. vllm/v1/sample/__init__.py +0 -0
  1036. vllm/v1/sample/metadata.py +43 -0
  1037. vllm/v1/sample/ops/__init__.py +0 -0
  1038. vllm/v1/sample/ops/bad_words.py +38 -0
  1039. vllm/v1/sample/ops/penalties.py +58 -0
  1040. vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
  1041. vllm/v1/sample/rejection_sampler.py +631 -0
  1042. vllm/v1/sample/sampler.py +270 -0
  1043. vllm/v1/sample/tpu/__init__.py +0 -0
  1044. vllm/v1/sample/tpu/metadata.py +118 -0
  1045. vllm/v1/sample/tpu/sampler.py +154 -0
  1046. vllm/v1/serial_utils.py +274 -0
  1047. vllm/v1/spec_decode/__init__.py +0 -0
  1048. vllm/v1/spec_decode/eagle.py +318 -0
  1049. vllm/v1/spec_decode/metadata.py +61 -0
  1050. vllm/v1/spec_decode/metrics.py +164 -0
  1051. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1052. vllm/v1/spec_decode/utils.py +18 -0
  1053. vllm/v1/stats/__init__.py +0 -0
  1054. vllm/v1/stats/common.py +453 -0
  1055. vllm/v1/structured_output/__init__.py +113 -0
  1056. vllm/v1/structured_output/backend_guidance.py +215 -0
  1057. vllm/v1/structured_output/backend_types.py +96 -0
  1058. vllm/v1/structured_output/backend_xgrammar.py +299 -0
  1059. vllm/v1/structured_output/request.py +84 -0
  1060. vllm/v1/structured_output/utils.py +174 -0
  1061. vllm/v1/utils.py +249 -0
  1062. vllm/v1/worker/__init__.py +0 -0
  1063. vllm/v1/worker/block_table.py +87 -0
  1064. vllm/v1/worker/gpu_input_batch.py +677 -0
  1065. vllm/v1/worker/gpu_model_runner.py +1776 -0
  1066. vllm/v1/worker/gpu_worker.py +349 -0
  1067. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1068. vllm/v1/worker/tpu_model_runner.py +1419 -0
  1069. vllm/v1/worker/tpu_worker.py +260 -0
  1070. vllm/v1/worker/utils.py +74 -0
  1071. vllm/v1/worker/worker_base.py +64 -0
  1072. vllm/version.py +40 -0
  1073. vllm/vllm_flash_attn/.gitkeep +0 -0
  1074. vllm/worker/__init__.py +0 -0
  1075. vllm/worker/cache_engine.py +144 -0
  1076. vllm/worker/cpu_enc_dec_model_runner.py +323 -0
  1077. vllm/worker/cpu_model_runner.py +668 -0
  1078. vllm/worker/cpu_pooling_model_runner.py +122 -0
  1079. vllm/worker/cpu_worker.py +400 -0
  1080. vllm/worker/enc_dec_model_runner.py +542 -0
  1081. vllm/worker/hpu_model_runner.py +2221 -0
  1082. vllm/worker/hpu_worker.py +483 -0
  1083. vllm/worker/model_runner.py +2056 -0
  1084. vllm/worker/model_runner_base.py +281 -0
  1085. vllm/worker/multi_step_hpu_worker.py +122 -0
  1086. vllm/worker/multi_step_model_runner.py +908 -0
  1087. vllm/worker/multi_step_tpu_worker.py +107 -0
  1088. vllm/worker/multi_step_worker.py +196 -0
  1089. vllm/worker/neuron_model_runner.py +336 -0
  1090. vllm/worker/neuron_worker.py +138 -0
  1091. vllm/worker/pooling_model_runner.py +200 -0
  1092. vllm/worker/tpu_model_runner.py +908 -0
  1093. vllm/worker/tpu_worker.py +332 -0
  1094. vllm/worker/utils.py +52 -0
  1095. vllm/worker/worker.py +570 -0
  1096. vllm/worker/worker_base.py +644 -0
  1097. vllm/worker/xpu_model_runner.py +603 -0
  1098. vllm/worker/xpu_worker.py +185 -0
  1099. vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
  1100. vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
  1101. vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
  1102. vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
  1103. vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
vllm/config.py ADDED
@@ -0,0 +1,4179 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import ast
4
+ import copy
5
+ import enum
6
+ import hashlib
7
+ import inspect
8
+ import json
9
+ import re
10
+ import sys
11
+ import textwrap
12
+ import warnings
13
+ from collections import Counter
14
+ from contextlib import contextmanager
15
+ from dataclasses import (MISSING, dataclass, field, fields, is_dataclass,
16
+ replace)
17
+ from importlib.util import find_spec
18
+ from pathlib import Path
19
+ from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Final, Literal,
20
+ Optional, Protocol, TypeVar, Union, get_args)
21
+
22
+ import torch
23
+ from pydantic import BaseModel, Field, PrivateAttr
24
+ from torch.distributed import ProcessGroup, ReduceOp
25
+ from transformers import PretrainedConfig
26
+
27
+ import vllm.envs as envs
28
+ from vllm.compilation.inductor_pass import CallableInductorPass, InductorPass
29
+ from vllm.logger import init_logger
30
+ from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS,
31
+ QuantizationMethods,
32
+ get_quantization_config)
33
+ from vllm.model_executor.models import ModelRegistry
34
+ from vllm.platforms import CpuArchEnum, current_platform
35
+ from vllm.sampling_params import GuidedDecodingParams
36
+ from vllm.tracing import is_otel_available, otel_import_error_traceback
37
+ from vllm.transformers_utils.config import (
38
+ ConfigFormat, get_config, get_hf_image_processor_config,
39
+ get_hf_text_config, get_pooling_config,
40
+ get_sentence_transformer_tokenizer_config, is_encoder_decoder,
41
+ try_get_generation_config, uses_mrope)
42
+ from vllm.transformers_utils.s3_utils import S3Model
43
+ from vllm.transformers_utils.utils import is_s3, maybe_model_redirect
44
+ from vllm.utils import (GiB_bytes, LayerBlockType, cuda_device_count_stateless,
45
+ get_cpu_memory, get_open_port, is_torch_equal_or_newer,
46
+ random_uuid, resolve_obj_by_qualname)
47
+
48
+ if TYPE_CHECKING:
49
+ from _typeshed import DataclassInstance
50
+ from ray.util.placement_group import PlacementGroup
51
+
52
+ from vllm.executor.executor_base import ExecutorBase
53
+ from vllm.model_executor.layers.quantization.base_config import (
54
+ QuantizationConfig)
55
+ from vllm.model_executor.model_loader.loader import BaseModelLoader
56
+
57
+ ConfigType = type[DataclassInstance]
58
+ else:
59
+ QuantizationConfig = None
60
+ ConfigType = type
61
+
62
+ logger = init_logger(__name__)
63
+
64
+ ConfigT = TypeVar("ConfigT", bound=ConfigType)
65
+
66
+ # This value is chosen to have a balance between ITL and TTFT. Note it is
67
+ # not optimized for throughput.
68
+ _DEFAULT_MAX_NUM_BATCHED_TOKENS = 2048
69
+ _POOLING_MODEL_MAX_NUM_BATCHED_TOKENS = 32768
70
+ _MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS = 5120
71
+
72
+ TaskOption = Literal["auto", "generate", "embedding", "embed", "classify",
73
+ "score", "reward", "transcription"]
74
+
75
+ _ResolvedTask = Literal["generate", "embed", "classify", "score", "reward",
76
+ "draft", "transcription"]
77
+
78
+ RunnerType = Literal["generate", "pooling", "draft", "transcription"]
79
+
80
+ _RUNNER_TASKS: dict[RunnerType, list[_ResolvedTask]] = {
81
+ "generate": ["generate"],
82
+ "pooling": ["embed", "classify", "score", "reward"],
83
+ "draft": ["draft"],
84
+ "transcription": ["transcription"],
85
+ }
86
+
87
+ _TASK_RUNNER: dict[_ResolvedTask, RunnerType] = {
88
+ task: runner
89
+ for runner, tasks in _RUNNER_TASKS.items()
90
+ for task in tasks
91
+ }
92
+
93
+ HfOverrides = Union[dict[str, Any], Callable[[PretrainedConfig],
94
+ PretrainedConfig]]
95
+
96
+
97
+ class SupportsHash(Protocol):
98
+
99
+ def compute_hash(self) -> str:
100
+ ...
101
+
102
+
103
+ class SupportsMetricsInfo(Protocol):
104
+
105
+ def metrics_info(self) -> dict[str, str]:
106
+ ...
107
+
108
+
109
+ class ModelImpl(str, enum.Enum):
110
+ AUTO = "auto"
111
+ VLLM = "vllm"
112
+ TRANSFORMERS = "transformers"
113
+
114
+
115
+ def get_attr_docs(cls: type[Any]) -> dict[str, str]:
116
+ """
117
+ Get any docstrings placed after attribute assignments in a class body.
118
+
119
+ https://davidism.com/mit-license/
120
+ """
121
+
122
+ def pairwise(iterable):
123
+ """
124
+ Manually implement https://docs.python.org/3/library/itertools.html#itertools.pairwise
125
+
126
+ Can be removed when Python 3.9 support is dropped.
127
+ """
128
+ iterator = iter(iterable)
129
+ a = next(iterator, None)
130
+
131
+ for b in iterator:
132
+ yield a, b
133
+ a = b
134
+
135
+ cls_node = ast.parse(textwrap.dedent(inspect.getsource(cls))).body[0]
136
+
137
+ if not isinstance(cls_node, ast.ClassDef):
138
+ raise TypeError("Given object was not a class.")
139
+
140
+ out = {}
141
+
142
+ # Consider each pair of nodes.
143
+ for a, b in pairwise(cls_node.body):
144
+ # Must be an assignment then a constant string.
145
+ if (not isinstance(a, (ast.Assign, ast.AnnAssign))
146
+ or not isinstance(b, ast.Expr)
147
+ or not isinstance(b.value, ast.Constant)
148
+ or not isinstance(b.value.value, str)):
149
+ continue
150
+
151
+ doc = inspect.cleandoc(b.value.value)
152
+
153
+ # An assignment can have multiple targets (a = b = v), but an
154
+ # annotated assignment only has one target.
155
+ targets = a.targets if isinstance(a, ast.Assign) else [a.target]
156
+
157
+ for target in targets:
158
+ # Must be assigning to a plain name.
159
+ if not isinstance(target, ast.Name):
160
+ continue
161
+
162
+ out[target.id] = doc
163
+
164
+ return out
165
+
166
+
167
+ def config(cls: ConfigT) -> ConfigT:
168
+ """
169
+ A decorator that ensures all fields in a dataclass have default values
170
+ and that each field has a docstring.
171
+ """
172
+ if not is_dataclass(cls):
173
+ raise TypeError("The decorated class must be a dataclass.")
174
+ attr_docs = get_attr_docs(cls)
175
+ for f in fields(cls):
176
+ if f.init and f.default is MISSING and f.default_factory is MISSING:
177
+ raise ValueError(
178
+ f"Field '{f.name}' in {cls.__name__} must have a default value."
179
+ )
180
+ if f.name not in attr_docs:
181
+ raise ValueError(
182
+ f"Field '{f.name}' in {cls.__name__} must have a docstring.")
183
+ return cls
184
+
185
+
186
+ def get_field(cls: ConfigType, name: str) -> Field:
187
+ """Get the default factory field of a dataclass by name. Used for getting
188
+ default factory fields in `EngineArgs`."""
189
+ if not is_dataclass(cls):
190
+ raise TypeError("The given class is not a dataclass.")
191
+ cls_fields = {f.name: f for f in fields(cls)}
192
+ if name not in cls_fields:
193
+ raise ValueError(f"Field '{name}' not found in {cls.__name__}.")
194
+ named_field: Field = cls_fields.get(name)
195
+ if (default_factory := named_field.default_factory) is not MISSING:
196
+ return field(default_factory=default_factory)
197
+ if (default := named_field.default) is not MISSING:
198
+ return field(default=default)
199
+ raise ValueError(
200
+ f"{cls.__name__}.{name} must have a default value or default factory.")
201
+
202
+
203
+ class ModelConfig:
204
+ """Configuration for the model.
205
+
206
+ Args:
207
+ model: Name or path of the huggingface model to use.
208
+ It is also used as the content for `model_name` tag in metrics
209
+ output when `served_model_name` is not specified.
210
+ task: The task to use the model for. Each vLLM instance only supports
211
+ one task, even if the same model can be used for multiple tasks.
212
+ When the model only supports one task, "auto" can be used to select
213
+ it; otherwise, you must specify explicitly which task to use.
214
+ tokenizer: Name or path of the huggingface tokenizer to use.
215
+ tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
216
+ available, "slow" will always use the slow tokenizer,
217
+ "mistral" will always use the tokenizer from `mistral_common`, and
218
+ "custom" will use --tokenizer to select the preregistered tokenizer.
219
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
220
+ downloading the model and tokenizer.
221
+ allowed_local_media_path: Allowing API requests to read local images or
222
+ videos from directories specified by the server file system.
223
+ This is a security risk. Should only be enabled in trusted
224
+ environments.
225
+ dtype: Data type for model weights and activations. The "auto" option
226
+ will use FP16 precision for FP32 and FP16 models, and BF16 precision
227
+ for BF16 models.
228
+ seed: Random seed for reproducibility.
229
+ revision: The specific model version to use. It can be a branch name,
230
+ a tag name, or a commit id. If unspecified, will use the default
231
+ version.
232
+ code_revision: The specific revision to use for the model code on
233
+ Hugging Face Hub. It can be a branch name, a tag name, or a
234
+ commit id. If unspecified, will use the default version.
235
+ tokenizer_revision: The specific tokenizer version to use. It can be a
236
+ branch name, a tag name, or a commit id. If unspecified, will use
237
+ the default version.
238
+ max_model_len: Maximum length of a sequence (including prompt and
239
+ output). If None, will be derived from the model.
240
+ spec_target_max_model_len: Specify the the maximum length for spec
241
+ decoding draft models.
242
+ quantization: Quantization method that was used to quantize the model
243
+ weights. If None, we assume the model weights are not quantized.
244
+ enforce_eager: Whether to enforce eager execution. If True, we will
245
+ disable CUDA graph and always execute the model in eager mode.
246
+ If False, we will use CUDA graph and eager execution in hybrid.
247
+ If None, the user did not specify, so default to False.
248
+ max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
249
+ When a sequence has context length larger than this, we fall back
250
+ to eager mode. Additionally for encoder-decoder models, if the
251
+ sequence length of the encoder input is larger than this, we fall
252
+ back to the eager mode.
253
+ max_logprobs: Maximum number of log probabilities. Defaults to 20.
254
+ disable_sliding_window: Whether to disable sliding window. If True,
255
+ we will disable the sliding window functionality of the model.
256
+ If the model does not support sliding window, this argument is
257
+ ignored.
258
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
259
+ detokenizer.
260
+ served_model_name: The model name used in metrics tag `model_name`,
261
+ matches the model name exposed via the APIs. If multiple model
262
+ names provided, the first name will be used. If not specified,
263
+ the model name will be the same as `model`.
264
+ limit_mm_per_prompt: Maximum number of data items per modality
265
+ per prompt. Only applicable for multimodal models.
266
+ use_async_output_proc: Whether to use async output processor.
267
+ Defaults to True.
268
+ config_format: The config format which shall be loaded.
269
+ Defaults to 'auto' which defaults to 'hf'.
270
+ hf_token: The token to use as HTTP bearer authorization for remote files
271
+ . If `True`, will use the token generated when running
272
+ `huggingface-cli login` (stored in `~/.huggingface`).
273
+ hf_overrides: If a dictionary, contains arguments to be forwarded to the
274
+ HuggingFace config. If a callable, it is called to update the
275
+ HuggingFace config.
276
+ mm_processor_kwargs: Arguments to be forwarded to the model's processor
277
+ for multi-modal data, e.g., image processor.
278
+ disable_mm_preprocessor_cache: If true, then disables caching of the
279
+ multi-modal preprocessor/mapper. (not recommended)
280
+ override_neuron_config: Initialize non default neuron config or
281
+ override default neuron config that are specific to Neuron devices,
282
+ this argument will be used to configure the neuron config that
283
+ can not be gathered from the vllm arguments.
284
+ override_pooler_config: Initialize non default pooling config or
285
+ override default pooling config for the pooling model.
286
+ logits_processor_pattern: Optional regex pattern specifying valid
287
+ logits processor qualified names that can be passed with the
288
+ `logits_processors` extra completion argument. Defaults to None,
289
+ which allows no processors.
290
+ generation_config: Configuration parameter file for generation.
291
+ model_impl: Which implementation of the model to use:
292
+ "auto" will try to use the vLLM implementation if it exists and
293
+ fall back to the Transformers implementation if no vLLM
294
+ implementation is available.
295
+ "vllm" will use the vLLM model implementation.
296
+ "transformers" will use the Transformers model implementation.
297
+ override_generation_config: Override the generation config with the
298
+ given config.
299
+ """
300
+
301
+ def compute_hash(self) -> str:
302
+ """
303
+ WARNING: Whenever a new field is added to this config,
304
+ ensure that it is included in the factors list if
305
+ it affects the computation graph.
306
+
307
+ Provide a hash that uniquely identifies all the configs
308
+ that affect the structure of the computation
309
+ graph from input ids/embeddings to the final hidden states,
310
+ excluding anything before input ids/embeddings and after
311
+ the final hidden states.
312
+ """
313
+ factors: list[Any] = []
314
+ factors.append(self.model)
315
+ factors.append(self.dtype)
316
+ factors.append(self.quantization)
317
+ factors.append(self.revision)
318
+ factors.append(self.code_revision)
319
+ factors.append(self.max_model_len)
320
+ factors.append(self.max_logprobs)
321
+ factors.append(self.disable_sliding_window)
322
+ factors.append(self.trust_remote_code)
323
+ factors.append(self.mm_processor_kwargs)
324
+ factors.append(self.generation_config)
325
+ factors.append(self.model_impl)
326
+ factors.append(self.override_generation_config)
327
+ factors.append(self.rope_scaling)
328
+ factors.append(self.rope_theta)
329
+ # hf_config can control how the model looks!
330
+ factors.append(self.hf_config.to_json_string())
331
+ str_factors = str(factors)
332
+ assert_hashable(str_factors)
333
+ return hashlib.sha256(str(factors).encode()).hexdigest()
334
+
335
+ def __init__(
336
+ self,
337
+ model: str,
338
+ task: Union[TaskOption, Literal["draft"]],
339
+ tokenizer: str,
340
+ tokenizer_mode: str,
341
+ trust_remote_code: bool,
342
+ dtype: Union[str, torch.dtype],
343
+ seed: int,
344
+ hf_config_path: Optional[str] = None,
345
+ allowed_local_media_path: str = "",
346
+ revision: Optional[str] = None,
347
+ code_revision: Optional[str] = None,
348
+ rope_scaling: Optional[dict[str, Any]] = None,
349
+ rope_theta: Optional[float] = None,
350
+ tokenizer_revision: Optional[str] = None,
351
+ max_model_len: Optional[int] = None,
352
+ spec_target_max_model_len: Optional[int] = None,
353
+ quantization: Optional[str] = None,
354
+ enforce_eager: Optional[bool] = None,
355
+ max_seq_len_to_capture: Optional[int] = None,
356
+ max_logprobs: int = 20,
357
+ disable_sliding_window: bool = False,
358
+ disable_cascade_attn: bool = False,
359
+ skip_tokenizer_init: bool = False,
360
+ served_model_name: Optional[Union[str, list[str]]] = None,
361
+ limit_mm_per_prompt: Optional[dict[str, int]] = None,
362
+ use_async_output_proc: bool = True,
363
+ config_format: ConfigFormat = ConfigFormat.AUTO,
364
+ hf_token: Optional[Union[bool, str]] = None,
365
+ hf_overrides: Optional[HfOverrides] = None,
366
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
367
+ disable_mm_preprocessor_cache: bool = False,
368
+ override_neuron_config: Optional[dict[str, Any]] = None,
369
+ override_pooler_config: Optional["PoolerConfig"] = None,
370
+ logits_processor_pattern: Optional[str] = None,
371
+ generation_config: str = "auto",
372
+ enable_sleep_mode: bool = False,
373
+ override_generation_config: Optional[dict[str, Any]] = None,
374
+ model_impl: Union[str, ModelImpl] = ModelImpl.AUTO,
375
+ ) -> None:
376
+ self.model = maybe_model_redirect(model)
377
+ self.tokenizer = maybe_model_redirect(tokenizer)
378
+
379
+ self.hf_config_path = hf_config_path
380
+ if isinstance(hf_config_path, str):
381
+ self.hf_config_path = maybe_model_redirect(hf_config_path)
382
+
383
+ self.tokenizer_mode = tokenizer_mode
384
+ self.trust_remote_code = trust_remote_code
385
+ self.allowed_local_media_path = allowed_local_media_path
386
+ self.seed = seed
387
+ self.revision = revision
388
+ self.code_revision = code_revision
389
+ self.rope_scaling = rope_scaling
390
+ self.rope_theta = rope_theta
391
+ self.model_impl = model_impl
392
+
393
+ if hf_overrides is None:
394
+ hf_overrides = {}
395
+
396
+ if callable(hf_overrides):
397
+ hf_overrides_kw = {}
398
+ hf_overrides_fn = hf_overrides
399
+ else:
400
+ hf_overrides_kw = hf_overrides
401
+ hf_overrides_fn = None
402
+
403
+ if rope_scaling is not None:
404
+ hf_override: dict[str, Any] = {"rope_scaling": rope_scaling}
405
+ hf_overrides_kw.update(hf_override)
406
+ hf_overrides_str = json.dumps(hf_overrides)
407
+ msg = (
408
+ "`--rope-scaling` will be removed in a future release. "
409
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
410
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
411
+ if rope_theta is not None:
412
+ hf_override = {"rope_theta": rope_theta}
413
+ hf_overrides_kw.update(hf_override)
414
+ hf_overrides_str = json.dumps(hf_overrides)
415
+ msg = (
416
+ "`--rope-theta` will be removed in a future release. "
417
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
418
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
419
+
420
+ self.maybe_pull_model_tokenizer_for_s3(model, tokenizer)
421
+
422
+ if (backend := envs.VLLM_ATTENTION_BACKEND
423
+ ) and backend == "FLASHINFER" and find_spec("flashinfer") is None:
424
+ raise ValueError(
425
+ "VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
426
+ "module was not found. See "
427
+ "https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile " # noqa: E501
428
+ "for instructions on how to install it.")
429
+
430
+ # The tokenizer version is consistent with the model version by default.
431
+ if tokenizer_revision is None:
432
+ self.tokenizer_revision = revision
433
+ else:
434
+ self.tokenizer_revision = tokenizer_revision
435
+ self.quantization = quantization
436
+ self.enforce_eager = enforce_eager
437
+ self.max_seq_len_to_capture = max_seq_len_to_capture
438
+ self.max_logprobs = max_logprobs
439
+ self.disable_sliding_window = disable_sliding_window
440
+ self.disable_cascade_attn = disable_cascade_attn
441
+ self.skip_tokenizer_init = skip_tokenizer_init
442
+ self.enable_sleep_mode = enable_sleep_mode
443
+
444
+ from vllm.platforms import current_platform
445
+
446
+ if (self.enable_sleep_mode
447
+ and not current_platform.is_sleep_mode_available()):
448
+ raise ValueError(
449
+ "Sleep mode is not supported on current platform.")
450
+
451
+ hf_config = get_config(self.hf_config_path or self.model,
452
+ trust_remote_code, revision, code_revision,
453
+ config_format)
454
+
455
+ if hf_overrides_kw:
456
+ logger.info("Overriding HF config with %s", hf_overrides_kw)
457
+ hf_config.update(hf_overrides_kw)
458
+ if hf_overrides_fn:
459
+ logger.info("Overriding HF config with %s", hf_overrides_fn)
460
+ hf_config = hf_overrides_fn(hf_config)
461
+
462
+ self.hf_config = hf_config
463
+
464
+ self.hf_text_config = get_hf_text_config(self.hf_config)
465
+ self.attention_chunk_size = getattr(self.hf_text_config,
466
+ "attention_chunk_size", None)
467
+ self.encoder_config = self._get_encoder_config()
468
+ self.hf_image_processor_config = get_hf_image_processor_config(
469
+ self.model, hf_token=hf_token, revision=revision)
470
+ self.dtype = _get_and_verify_dtype(self.hf_config, dtype)
471
+ self.use_async_output_proc = use_async_output_proc
472
+ self.mm_processor_kwargs = mm_processor_kwargs
473
+ self.disable_mm_preprocessor_cache = disable_mm_preprocessor_cache
474
+
475
+ # Set enforce_eager to False if the value is unset.
476
+ if self.enforce_eager is None:
477
+ self.enforce_eager = False
478
+
479
+ interleaved_attn_models = ["gemma2", "gemma3_text", "cohere2"]
480
+ sliding_window = getattr(self.hf_text_config, "sliding_window", None)
481
+ has_interleaved_attention = (sliding_window is not None) and (
482
+ isinstance(sliding_window, list) or
483
+ (self.hf_text_config.model_type in interleaved_attn_models))
484
+
485
+ if (not self.disable_sliding_window and has_interleaved_attention):
486
+ if (backend :=
487
+ envs.VLLM_ATTENTION_BACKEND) in ("XFORMERS", "FLASHINFER"):
488
+ sliding_window_len_min = get_min_sliding_window(
489
+ self.hf_text_config.sliding_window)
490
+
491
+ logger.warning_once(
492
+ f"{self.hf_text_config.model_type} has interleaved "
493
+ "attention, which is currently not supported by the "
494
+ f"{backend} backend. Disabling sliding window and capping "
495
+ "the max length to the sliding window size "
496
+ f"({sliding_window_len_min}).")
497
+ self.disable_sliding_window = True
498
+ else:
499
+ # for a model with interleaved attention,
500
+ # the scheduler and the model treat it as full attention
501
+ # (i.e., not dropping any tokens outside the window).
502
+ # only the attention layer itself is aware of the sliding
503
+ # window, and use the window size to compute the attention.
504
+ self.hf_text_config.interleaved_sliding_window = sliding_window
505
+ delattr(self.hf_text_config, "sliding_window")
506
+ sliding_window = None
507
+
508
+ self.max_model_len = _get_and_verify_max_len(
509
+ hf_config=self.hf_text_config,
510
+ max_model_len=max_model_len,
511
+ disable_sliding_window=self.disable_sliding_window,
512
+ sliding_window_len=self.get_hf_config_sliding_window(),
513
+ spec_target_max_model_len=spec_target_max_model_len,
514
+ encoder_config=self.encoder_config)
515
+ self.served_model_name = get_served_model_name(model,
516
+ served_model_name)
517
+ self.multimodal_config = self._init_multimodal_config(
518
+ limit_mm_per_prompt)
519
+ if not self.skip_tokenizer_init:
520
+ self._verify_tokenizer_mode()
521
+
522
+ self.is_attention_free = self._init_attention_free()
523
+ self.is_hybrid = self._init_is_hybrid()
524
+ self.has_noops = self._init_has_noops()
525
+ self.has_inner_state = self._init_has_inner_state()
526
+
527
+ if current_platform.is_neuron():
528
+ self.override_neuron_config = override_neuron_config
529
+ else:
530
+ self.override_neuron_config = None
531
+
532
+ supported_tasks, task = self._resolve_task(task)
533
+ self.supported_tasks = supported_tasks
534
+ self.task: Final = task
535
+ if self.task in ("draft", "generate"):
536
+ self.truncation_side = "left"
537
+ else:
538
+ self.truncation_side = "right"
539
+
540
+ self.pooler_config = self._init_pooler_config(override_pooler_config)
541
+ self.logits_processor_pattern = logits_processor_pattern
542
+
543
+ self.generation_config = generation_config
544
+ self.override_generation_config = override_generation_config or {}
545
+
546
+ self._verify_quantization()
547
+ self._verify_cuda_graph()
548
+ self._verify_bnb_config()
549
+
550
+ @property
551
+ def registry(self):
552
+ return ModelRegistry
553
+
554
+ @property
555
+ def architectures(self) -> list[str]:
556
+ return getattr(self.hf_config, "architectures", [])
557
+
558
+ def maybe_pull_model_tokenizer_for_s3(self, model: str,
559
+ tokenizer: str) -> None:
560
+ """
561
+ Pull the model config or tokenizer to a temporary
562
+ directory in case of S3.
563
+
564
+ Args:
565
+ model: The model name or path.
566
+ tokenizer: The tokenizer name or path.
567
+
568
+ """
569
+ if is_s3(model) or is_s3(tokenizer):
570
+ if is_s3(model):
571
+ s3_model = S3Model()
572
+ s3_model.pull_files(
573
+ model, allow_pattern=["*.model", "*.py", "*.json"])
574
+ self.model_weights = self.model
575
+ self.model = s3_model.dir
576
+
577
+ if is_s3(tokenizer):
578
+ s3_tokenizer = S3Model()
579
+ s3_tokenizer.pull_files(
580
+ model, ignore_pattern=["*.pt", "*.safetensors", "*.bin"])
581
+ self.tokenizer = s3_tokenizer.dir
582
+
583
+ def _init_multimodal_config(
584
+ self, limit_mm_per_prompt: Optional[dict[str, int]]
585
+ ) -> Optional["MultiModalConfig"]:
586
+ if self.registry.is_multimodal_model(self.architectures):
587
+ return MultiModalConfig(limit_per_prompt=limit_mm_per_prompt or {})
588
+
589
+ if limit_mm_per_prompt:
590
+ raise ValueError("`limit_mm_per_prompt` is only supported for "
591
+ "multimodal models.")
592
+
593
+ return None
594
+
595
+ def _get_encoder_config(self):
596
+ return get_sentence_transformer_tokenizer_config(
597
+ self.model, self.revision)
598
+
599
+ def _init_pooler_config(
600
+ self,
601
+ override_pooler_config: Optional["PoolerConfig"],
602
+ ) -> Optional["PoolerConfig"]:
603
+
604
+ if self.runner_type == "pooling":
605
+ user_config = override_pooler_config or PoolerConfig()
606
+
607
+ base_config = get_pooling_config(self.model, self.revision)
608
+ if base_config is not None:
609
+ # Only set values that are not overridden by the user
610
+ for k, v in base_config.items():
611
+ if getattr(user_config, k) is None:
612
+ setattr(user_config, k, v)
613
+
614
+ if self.is_matryoshka:
615
+ if user_config.normalize is None:
616
+ user_config.normalize = True
617
+ elif not user_config.normalize:
618
+ raise ValueError(
619
+ "`normalize` must be enabled (set to True) "
620
+ "for models that are compatible with "
621
+ "Matryoshka Representation.")
622
+
623
+ return user_config
624
+
625
+ return None
626
+
627
+ def _init_attention_free(self) -> bool:
628
+ return self.registry.is_attention_free_model(self.architectures)
629
+
630
+ def _init_is_hybrid(self) -> bool:
631
+ return self.registry.is_hybrid_model(self.architectures)
632
+
633
+ def _init_has_noops(self) -> bool:
634
+ architectures = getattr(self.hf_config, "architectures", [])
635
+ return self.registry.is_noops_model(architectures)
636
+
637
+ def _init_has_inner_state(self) -> bool:
638
+ return self.registry.model_has_inner_state(self.architectures)
639
+
640
+ def _verify_tokenizer_mode(self) -> None:
641
+ tokenizer_mode = self.tokenizer_mode.lower()
642
+ if tokenizer_mode not in ["auto", "slow", "mistral", "custom"]:
643
+ raise ValueError(
644
+ f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
645
+ "either 'auto', 'slow', 'mistral' or 'custom'.")
646
+ self.tokenizer_mode = tokenizer_mode
647
+
648
+ def _get_preferred_task(
649
+ self,
650
+ architectures: list[str],
651
+ supported_tasks: set[_ResolvedTask],
652
+ ) -> Optional[_ResolvedTask]:
653
+ model_id = self.model
654
+ if get_pooling_config(model_id, self.revision):
655
+ return "embed"
656
+ if self.registry.is_cross_encoder_model(architectures):
657
+ return "score"
658
+ if self.registry.is_transcription_model(architectures):
659
+ return "transcription"
660
+
661
+ suffix_to_preferred_task: list[tuple[str, _ResolvedTask]] = [
662
+ # Other models follow this pattern
663
+ ("ForCausalLM", "generate"),
664
+ ("ForConditionalGeneration", "generate"),
665
+ ("ForSequenceClassification", "classify"),
666
+ ("ChatModel", "generate"),
667
+ ("LMHeadModel", "generate"),
668
+ ("EmbeddingModel", "embed"),
669
+ ("RewardModel", "reward"),
670
+ ]
671
+ _, arch = self.registry.inspect_model_cls(architectures)
672
+
673
+ for suffix, pref_task in suffix_to_preferred_task:
674
+ if arch.endswith(suffix) and pref_task in supported_tasks:
675
+ return pref_task
676
+
677
+ return None
678
+
679
+ def _resolve_task(
680
+ self,
681
+ task_option: Union[TaskOption, Literal["draft"]],
682
+ ) -> tuple[set[_ResolvedTask], _ResolvedTask]:
683
+ if task_option == "draft":
684
+ return {"draft"}, "draft"
685
+
686
+ registry = self.registry
687
+ architectures = self.architectures
688
+
689
+ runner_support: dict[RunnerType, bool] = {
690
+ # NOTE: Listed from highest to lowest priority,
691
+ # in case the model supports multiple of them
692
+ "transcription": registry.is_transcription_model(architectures),
693
+ "generate": registry.is_text_generation_model(architectures),
694
+ "pooling": registry.is_pooling_model(architectures),
695
+ }
696
+ supported_runner_types_lst: list[RunnerType] = [
697
+ runner_type
698
+ for runner_type, is_supported in runner_support.items()
699
+ if is_supported
700
+ ]
701
+
702
+ supported_tasks_lst: list[_ResolvedTask] = [
703
+ task for runner_type in supported_runner_types_lst
704
+ for task in _RUNNER_TASKS[runner_type]
705
+ ]
706
+ supported_tasks = set(supported_tasks_lst)
707
+
708
+ if task_option == "auto":
709
+ selected_task = next(iter(supported_tasks_lst))
710
+
711
+ if len(supported_tasks_lst) > 1:
712
+ preferred_task = self._get_preferred_task(
713
+ architectures, supported_tasks)
714
+ if preferred_task is not None:
715
+ selected_task = preferred_task
716
+
717
+ logger.info(
718
+ "This model supports multiple tasks: %s. "
719
+ "Defaulting to '%s'.", supported_tasks, selected_task)
720
+ else:
721
+ # Aliases
722
+ if task_option == "embedding":
723
+ preferred_task = self._get_preferred_task(
724
+ architectures, supported_tasks)
725
+ if preferred_task != "embed":
726
+ msg = ("The 'embedding' task will be restricted to "
727
+ "embedding models in a future release. Please "
728
+ "pass `--task classify`, `--task score`, or "
729
+ "`--task reward` explicitly for other pooling "
730
+ "models.")
731
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
732
+
733
+ task_option = preferred_task or "embed"
734
+
735
+ if task_option not in supported_tasks:
736
+ msg = (
737
+ f"This model does not support the '{task_option}' task. "
738
+ f"Supported tasks: {supported_tasks}")
739
+ raise ValueError(msg)
740
+
741
+ selected_task = task_option
742
+
743
+ return supported_tasks, selected_task
744
+
745
+ def _parse_quant_hf_config(self):
746
+ quant_cfg = getattr(self.hf_config, "quantization_config", None)
747
+ if quant_cfg is None:
748
+ # compressed-tensors uses a "compression_config" key
749
+ quant_cfg = getattr(self.hf_config, "compression_config", None)
750
+ return quant_cfg
751
+
752
+ def _verify_quantization(self) -> None:
753
+ supported_quantization = QUANTIZATION_METHODS
754
+ optimized_quantization_methods = [
755
+ "fp8", "marlin", "modelopt", "gptq_marlin_24", "gptq_marlin",
756
+ "awq_marlin", "fbgemm_fp8", "compressed-tensors", "experts_int8",
757
+ "quark", "nvfp4", "bitblas", "gptq_bitblas"
758
+ ]
759
+ if self.quantization is not None:
760
+ self.quantization = self.quantization.lower()
761
+
762
+ # Parse quantization method from the HF model config, if available.
763
+ quant_cfg = self._parse_quant_hf_config()
764
+
765
+ if quant_cfg is not None:
766
+ quant_method = quant_cfg.get("quant_method", "").lower()
767
+ quant_method = quant_method.replace("compressed_tensors",
768
+ "compressed-tensors")
769
+ quant_cfg["quant_method"] = quant_method
770
+
771
+ # Quantization methods which are overrides (i.e. they have a
772
+ # `override_quantization_method` method) must be checked in order
773
+ # of preference (this is particularly important for GPTQ).
774
+ overrides = [
775
+ "marlin",
776
+ "bitblas",
777
+ "gptq_marlin_24",
778
+ "gptq_marlin",
779
+ "gptq_bitblas",
780
+ "awq_marlin",
781
+ "ipex",
782
+ "moe_wna16",
783
+ ]
784
+ quantization_methods = [
785
+ q for q in supported_quantization if q not in overrides
786
+ ]
787
+ # Any custom overrides will be in quantization_methods so we place
788
+ # them at the start of the list so custom overrides have preference
789
+ # over the built in ones.
790
+ quantization_methods = quantization_methods + overrides
791
+
792
+ # Detect which checkpoint is it
793
+ for name in quantization_methods:
794
+ method = get_quantization_config(name)
795
+ quantization_override = method.override_quantization_method(
796
+ quant_cfg, self.quantization)
797
+ if quantization_override is not None:
798
+ # Raise error if the override is not custom (custom would
799
+ # be in QUANTIZATION_METHODS but not QuantizationMethods)
800
+ # and hasn't been added to the overrides list.
801
+ if (name in get_args(QuantizationMethods)
802
+ and name not in overrides):
803
+ raise ValueError(
804
+ f"Quantization method {name} is an override but "
805
+ "is has not been added to the `overrides` list "
806
+ "above. This is necessary to ensure that the "
807
+ "overrides are checked in order of preference.")
808
+ quant_method = quantization_override
809
+ self.quantization = quantization_override
810
+ break
811
+
812
+ # Verify quantization configurations.
813
+ if self.quantization is None:
814
+ self.quantization = quant_method
815
+ elif self.quantization != quant_method:
816
+ raise ValueError(
817
+ "Quantization method specified in the model config "
818
+ f"({quant_method}) does not match the quantization "
819
+ f"method specified in the `quantization` argument "
820
+ f"({self.quantization}).")
821
+
822
+ if self.quantization is not None:
823
+ if self.quantization not in supported_quantization:
824
+ raise ValueError(
825
+ f"Unknown quantization method: {self.quantization}. Must "
826
+ f"be one of {supported_quantization}.")
827
+ from vllm.platforms import current_platform
828
+ current_platform.verify_quantization(self.quantization)
829
+ if self.quantization not in optimized_quantization_methods:
830
+ logger.warning(
831
+ "%s quantization is not fully "
832
+ "optimized yet. The speed can be slower than "
833
+ "non-quantized models.", self.quantization)
834
+
835
+ def _verify_cuda_graph(self) -> None:
836
+ if self.max_seq_len_to_capture is None:
837
+ self.max_seq_len_to_capture = self.max_model_len
838
+ self.max_seq_len_to_capture = min(self.max_seq_len_to_capture,
839
+ self.max_model_len)
840
+ ROCM_UNSUPPORTED_MODELS = ['mllama']
841
+ if (self.hf_config.model_type in ROCM_UNSUPPORTED_MODELS
842
+ and not self.enforce_eager and current_platform.is_rocm()):
843
+ logger.warning(
844
+ "CUDA graph is not supported for %s on ROCm yet, fallback "
845
+ "to the eager mode.", self.hf_config.model_type)
846
+ self.enforce_eager = True
847
+
848
+ def _verify_bnb_config(self) -> None:
849
+ """
850
+ The current version of bitsandbytes (0.45.3) with 8-bit models does not
851
+ yet support CUDA graph.
852
+ # TODO Remove this when bitsandbytes supports.
853
+ """
854
+ is_bitsandbytes = self.quantization == "bitsandbytes"
855
+ has_quantization_config = (getattr(self.hf_config,
856
+ "quantization_config", None)
857
+ is not None)
858
+ is_8bit = (self.hf_config.quantization_config.get(
859
+ "load_in_8bit", False) if has_quantization_config else False)
860
+ if all([
861
+ is_bitsandbytes,
862
+ has_quantization_config,
863
+ is_8bit,
864
+ not self.enforce_eager,
865
+ ]):
866
+ logger.warning(
867
+ "CUDA graph is not supported on BitsAndBytes 8bit yet, "
868
+ "fallback to the eager mode.")
869
+
870
+ self.enforce_eager = True
871
+
872
+ def _verify_with_expert_parallelism(self) -> None:
873
+ num_expert_names = [
874
+ "moe_num_experts", # Dbrx
875
+ "num_experts", # Jamba
876
+ "n_routed_experts", # DeepSeek
877
+ "num_local_experts", # Mixtral
878
+ ]
879
+ num_experts = 0
880
+ for name in num_expert_names:
881
+ num_experts = getattr(self.hf_text_config, name, 0)
882
+ if num_experts > 0:
883
+ break
884
+ if num_experts < 1:
885
+ raise ValueError(
886
+ "Number of experts in the model must be greater than 0 "
887
+ "when expert parallelism is enabled.")
888
+
889
+ def verify_async_output_proc(self, parallel_config, speculative_config,
890
+ device_config) -> None:
891
+ if not self.use_async_output_proc:
892
+ # Nothing to check
893
+ return
894
+
895
+ if parallel_config.pipeline_parallel_size > 1:
896
+ self.use_async_output_proc = False
897
+ return
898
+
899
+ # Reminder: Please update docs/source/features/compatibility_matrix.md
900
+ # If the feature combo become valid
901
+ from vllm.platforms import current_platform
902
+ if not current_platform.is_async_output_supported(self.enforce_eager):
903
+ self.use_async_output_proc = False
904
+ return
905
+
906
+ if envs.VLLM_USE_RAY_SPMD_WORKER:
907
+ self.use_async_output_proc = False
908
+ return
909
+
910
+ # Async postprocessor is not necessary for pooling models
911
+ # since there is no token generation
912
+ if self.runner_type == "pooling":
913
+ self.use_async_output_proc = False
914
+
915
+ # Reminder: Please update docs/source/features/compatibility_matrix.md
916
+ # If the feature combo become valid
917
+ if speculative_config:
918
+ self.use_async_output_proc = False
919
+
920
+ def verify_with_parallel_config(
921
+ self,
922
+ parallel_config: "ParallelConfig",
923
+ ) -> None:
924
+
925
+ if parallel_config.distributed_executor_backend == "external_launcher":
926
+ assert self.seed is not None, (
927
+ "Seed must be set when using external launcher backend to "
928
+ "make sure sampling results are the same across workers.")
929
+
930
+ total_num_attention_heads = getattr(self.hf_text_config,
931
+ "num_attention_heads", 0)
932
+ tensor_parallel_size = parallel_config.tensor_parallel_size
933
+ if total_num_attention_heads % tensor_parallel_size != 0:
934
+ raise ValueError(
935
+ f"Total number of attention heads ({total_num_attention_heads})"
936
+ " must be divisible by tensor parallel size "
937
+ f"({tensor_parallel_size}).")
938
+
939
+ if parallel_config.enable_expert_parallel:
940
+ self._verify_with_expert_parallelism()
941
+
942
+ pipeline_parallel_size = parallel_config.pipeline_parallel_size
943
+ if pipeline_parallel_size > 1:
944
+ if not self.registry.is_pp_supported_model(self.architectures):
945
+ raise NotImplementedError(
946
+ "Pipeline parallelism is not supported for this model. "
947
+ "Supported models implement the `SupportsPP` interface.")
948
+
949
+ if self.use_async_output_proc:
950
+ self.use_async_output_proc = False
951
+
952
+ def get_hf_config_sliding_window(
953
+ self) -> Union[Optional[int], list[Optional[int]]]:
954
+ """Get the sliding window size, or None if disabled."""
955
+
956
+ # Some models, like Qwen2 and Qwen1.5, use `use_sliding_window` in
957
+ # addition to sliding window size. We check if that field is present
958
+ # and if it's False, return None.
959
+ if (hasattr(self.hf_text_config, "use_sliding_window")
960
+ and not self.hf_text_config.use_sliding_window):
961
+ return None
962
+ return getattr(self.hf_text_config, "sliding_window", None)
963
+
964
+ def get_sliding_window(self) -> Optional[Union[int, list[Optional[int]]]]:
965
+ """Get the sliding window size, or None if disabled.
966
+ """
967
+ # If user disables sliding window, return None.
968
+ if self.disable_sliding_window:
969
+ return None
970
+ # Otherwise get the value from the hf config.
971
+ return self.get_hf_config_sliding_window()
972
+
973
+ def get_vocab_size(self) -> int:
974
+ return self.hf_text_config.vocab_size
975
+
976
+ def get_hidden_size(self) -> int:
977
+ return self.hf_text_config.hidden_size
978
+
979
+ @property
980
+ def is_deepseek_mla(self) -> bool:
981
+ if not hasattr(self.hf_text_config, "model_type"):
982
+ return False
983
+ elif self.hf_text_config.model_type in \
984
+ ('deepseek_v2', 'deepseek_v3', 'deepseek_mtp'):
985
+ return self.hf_text_config.kv_lora_rank is not None
986
+ elif self.hf_text_config.model_type == 'eagle':
987
+ # if the model is an EAGLE module, check for the
988
+ # underlying architecture
989
+ return self.hf_text_config.model.model_type in \
990
+ ('deepseek_v2', 'deepseek_v3') \
991
+ and self.hf_text_config.kv_lora_rank is not None
992
+ return False
993
+
994
+ def get_head_size(self) -> int:
995
+ # TODO remove hard code
996
+ if self.is_deepseek_mla:
997
+ qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim",
998
+ 0)
999
+ if self.use_mla:
1000
+ return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
1001
+ else:
1002
+ qk_nope_head_dim = getattr(self.hf_text_config,
1003
+ "qk_nope_head_dim", 0)
1004
+ if qk_rope_head_dim and qk_nope_head_dim:
1005
+ return qk_rope_head_dim + qk_nope_head_dim
1006
+
1007
+ if hasattr(self.hf_text_config,
1008
+ "model_type") and (self.hf_text_config.model_type
1009
+ == "zamba2"):
1010
+ return self.hf_text_config.attention_head_dim
1011
+
1012
+ if self.is_attention_free:
1013
+ return 0
1014
+
1015
+ if hasattr(self.hf_text_config, "head_dim"):
1016
+ return self.hf_text_config.head_dim
1017
+ # FIXME(woosuk): This may not be true for all models.
1018
+ return (self.hf_text_config.hidden_size //
1019
+ self.hf_text_config.num_attention_heads)
1020
+
1021
+ def get_total_num_kv_heads(self) -> int:
1022
+ """Returns the total number of KV heads."""
1023
+ # For GPTBigCode & Falcon:
1024
+ # NOTE: for falcon, when new_decoder_architecture is True, the
1025
+ # multi_query flag is ignored and we use n_head_kv for the number of
1026
+ # KV heads.
1027
+ falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
1028
+ new_decoder_arch_falcon = (
1029
+ self.hf_config.model_type in falcon_model_types
1030
+ and getattr(self.hf_config, "new_decoder_architecture", False))
1031
+ if not new_decoder_arch_falcon and getattr(self.hf_text_config,
1032
+ "multi_query", False):
1033
+ # Multi-query attention, only one KV head.
1034
+ # Currently, tensor parallelism is not supported in this case.
1035
+ return 1
1036
+
1037
+ # For DBRX and MPT
1038
+ if self.hf_config.model_type == "mpt":
1039
+ if "kv_n_heads" in self.hf_config.attn_config:
1040
+ return self.hf_config.attn_config["kv_n_heads"]
1041
+ return self.hf_config.num_attention_heads
1042
+ if self.hf_config.model_type == "dbrx":
1043
+ return getattr(self.hf_config.attn_config, "kv_n_heads",
1044
+ self.hf_config.num_attention_heads)
1045
+
1046
+ if self.hf_config.model_type == "nemotron-nas":
1047
+ for block in self.hf_config.block_configs:
1048
+ if not block.attention.no_op:
1049
+ return self.hf_config.num_attention_heads \
1050
+ // block.attention.n_heads_in_group
1051
+
1052
+ raise RuntimeError("Couldn't determine number of kv heads")
1053
+
1054
+ if self.is_attention_free:
1055
+ return 0
1056
+
1057
+ attributes = [
1058
+ # For Falcon:
1059
+ "n_head_kv",
1060
+ "num_kv_heads",
1061
+ # For LLaMA-2:
1062
+ "num_key_value_heads",
1063
+ # For ChatGLM:
1064
+ "multi_query_group_num",
1065
+ ]
1066
+ for attr in attributes:
1067
+ num_kv_heads = getattr(self.hf_text_config, attr, None)
1068
+ if num_kv_heads is not None:
1069
+ return num_kv_heads
1070
+
1071
+ # For non-grouped-query attention models, the number of KV heads is
1072
+ # equal to the number of attention heads.
1073
+ return self.hf_text_config.num_attention_heads
1074
+
1075
+ def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int:
1076
+ """Returns the number of KV heads per GPU."""
1077
+ if self.use_mla:
1078
+ # When using MLA during decode it becomes MQA
1079
+ return 1
1080
+
1081
+ total_num_kv_heads = self.get_total_num_kv_heads()
1082
+ # If tensor parallelism is used, we divide the number of KV heads by
1083
+ # the tensor parallel size. We will replicate the KV heads in the
1084
+ # case where the number of KV heads is smaller than the tensor
1085
+ # parallel size so each GPU has at least one KV head.
1086
+ return max(1,
1087
+ total_num_kv_heads // parallel_config.tensor_parallel_size)
1088
+
1089
+ def get_num_attention_heads(self,
1090
+ parallel_config: "ParallelConfig") -> int:
1091
+ num_heads = getattr(self.hf_text_config, "num_attention_heads", 0)
1092
+ return num_heads // parallel_config.tensor_parallel_size
1093
+
1094
+ def get_layers_start_end_indices(
1095
+ self, parallel_config: "ParallelConfig") -> tuple[int, int]:
1096
+ from vllm.distributed.utils import get_pp_indices
1097
+ if self.hf_text_config.model_type == "deepseek_mtp":
1098
+ total_num_hidden_layers = getattr(self.hf_text_config,
1099
+ "num_nextn_predict_layers", 0)
1100
+ else:
1101
+ total_num_hidden_layers = getattr(self.hf_text_config,
1102
+ "num_hidden_layers", 0)
1103
+ # the layout order is: DP x PP x TP
1104
+ pp_rank = (parallel_config.rank // parallel_config.tensor_parallel_size
1105
+ ) % parallel_config.pipeline_parallel_size
1106
+ pp_size = parallel_config.pipeline_parallel_size
1107
+ start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
1108
+ return start, end
1109
+
1110
+ def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
1111
+ start, end = self.get_layers_start_end_indices(parallel_config)
1112
+ return end - start
1113
+
1114
+ def get_num_layers_by_block_type(
1115
+ self,
1116
+ parallel_config: "ParallelConfig",
1117
+ block_type: LayerBlockType = LayerBlockType.attention,
1118
+ ) -> int:
1119
+ # This function relies on 'layers_block_type' in hf_config,
1120
+ # for w/o this attribute, we will need to have workarounds like so
1121
+ attn_block_type = block_type == LayerBlockType.attention
1122
+ is_transformer = not self.is_hybrid and \
1123
+ not self.has_noops and \
1124
+ not self.is_attention_free
1125
+ start, end = self.get_layers_start_end_indices(parallel_config)
1126
+
1127
+ if is_transformer:
1128
+ # Handle the basic case first
1129
+ return end - start if attn_block_type else 0
1130
+ elif self.is_attention_free:
1131
+ # Attention free
1132
+ # Note that this code assumes there
1133
+ # is only one type of attention-free block type.
1134
+ return 0 if attn_block_type else end - start
1135
+ elif self.has_noops:
1136
+ block_configs = self.hf_config.block_configs
1137
+ return sum(not bc.attention.no_op
1138
+ for bc in block_configs[start:end])
1139
+ else:
1140
+ # Hybrid model Jamba
1141
+ layers_block_type_value = getattr(self.hf_config,
1142
+ "layers_block_type", None)
1143
+ if layers_block_type_value is not None:
1144
+ if hasattr(self.hf_text_config,
1145
+ "model_type") and (self.hf_text_config.model_type
1146
+ == "zamba2"):
1147
+ if attn_block_type:
1148
+ return sum(t == "hybrid"
1149
+ for t in layers_block_type_value[start:end])
1150
+ else:
1151
+ return self.get_num_layers(parallel_config)
1152
+ return sum(t == block_type.value
1153
+ for t in layers_block_type_value[start:end])
1154
+
1155
+ # Hybrid model Minimax
1156
+ attn_type_list = getattr(self.hf_config, "attn_type_list", None)
1157
+ if attn_type_list:
1158
+ return sum(t == 1 for t in attn_type_list[start:end])
1159
+
1160
+ if layers_block_type_value is None and attn_type_list is None:
1161
+ raise ValueError(
1162
+ "The model is an hybrid without a"
1163
+ "layers_block_type or an attn_type_list in the hf_config,"
1164
+ "cannot determine the num of "
1165
+ f"{block_type.value} layers")
1166
+
1167
+ return sum(t == 1 for t in attn_type_list[start:end])
1168
+
1169
+ def get_multimodal_config(self) -> "MultiModalConfig":
1170
+ """
1171
+ Get the multimodal configuration of the model.
1172
+
1173
+ Raises:
1174
+ ValueError: If the model is not multimodal.
1175
+ """
1176
+ if self.multimodal_config is None:
1177
+ raise ValueError("The model is not multimodal.")
1178
+
1179
+ return self.multimodal_config
1180
+
1181
+ def try_get_generation_config(self) -> dict[str, Any]:
1182
+ if self.generation_config in ("auto", "vllm"):
1183
+ config = try_get_generation_config(
1184
+ self.hf_config_path or self.model,
1185
+ trust_remote_code=self.trust_remote_code,
1186
+ revision=self.revision,
1187
+ )
1188
+ else:
1189
+ config = try_get_generation_config(
1190
+ self.generation_config,
1191
+ trust_remote_code=self.trust_remote_code,
1192
+ )
1193
+
1194
+ if config is None:
1195
+ return {}
1196
+
1197
+ return config.to_diff_dict()
1198
+
1199
+ def get_diff_sampling_param(self) -> dict[str, Any]:
1200
+ """
1201
+ This method returns a dictionary containing the parameters
1202
+ that differ from the default sampling parameters. If
1203
+ `generation_config` is `"vllm"`, an empty dictionary is returned.
1204
+
1205
+ Returns:
1206
+ dict[str, Any]: A dictionary with the differing sampling
1207
+ parameters, if `generation_config` is `"vllm"` an empty dictionary.
1208
+ """
1209
+ if self.generation_config == "vllm":
1210
+ config = {}
1211
+ else:
1212
+ config = self.try_get_generation_config()
1213
+
1214
+ # Overriding with given generation config
1215
+ config.update(self.override_generation_config)
1216
+
1217
+ available_params = [
1218
+ "repetition_penalty",
1219
+ "temperature",
1220
+ "top_k",
1221
+ "top_p",
1222
+ "min_p",
1223
+ "max_new_tokens",
1224
+ ]
1225
+ if any(p in config for p in available_params):
1226
+ diff_sampling_param = {
1227
+ p: config.get(p)
1228
+ for p in available_params if config.get(p) is not None
1229
+ }
1230
+ # Huggingface definition of max_new_tokens is equivalent
1231
+ # to vLLM's max_tokens
1232
+ if "max_new_tokens" in diff_sampling_param:
1233
+ diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
1234
+ "max_new_tokens")
1235
+ else:
1236
+ diff_sampling_param = {}
1237
+
1238
+ if diff_sampling_param:
1239
+ logger.warning_once(
1240
+ "Default sampling parameters have been overridden by the "
1241
+ "model's Hugging Face generation config recommended from the "
1242
+ "model creator. If this is not intended, please relaunch "
1243
+ "vLLM instance with `--generation-config vllm`.")
1244
+ return diff_sampling_param
1245
+
1246
+ @property
1247
+ def is_encoder_decoder(self) -> bool:
1248
+ """Extract the HF encoder/decoder model flag."""
1249
+ return is_encoder_decoder(self.hf_config)
1250
+
1251
+ @property
1252
+ def uses_mrope(self) -> bool:
1253
+ return uses_mrope(self.hf_config)
1254
+
1255
+ @property
1256
+ def is_multimodal_model(self) -> bool:
1257
+ return self.multimodal_config is not None
1258
+
1259
+ @property
1260
+ def is_cross_encoder(self) -> bool:
1261
+ return self.registry.is_cross_encoder_model(self.architectures)
1262
+
1263
+ @property
1264
+ def use_mla(self) -> bool:
1265
+ return self.is_deepseek_mla and not envs.VLLM_MLA_DISABLE
1266
+
1267
+ @property
1268
+ def supported_runner_types(self) -> set[RunnerType]:
1269
+ return {_TASK_RUNNER[task] for task in self.supported_tasks}
1270
+
1271
+ @property
1272
+ def runner_type(self) -> RunnerType:
1273
+ return _TASK_RUNNER[self.task]
1274
+
1275
+ @property
1276
+ def is_v1_compatible(self) -> bool:
1277
+ architectures = getattr(self.hf_config, "architectures", [])
1278
+ return ModelRegistry.is_v1_compatible(architectures)
1279
+
1280
+ @property
1281
+ def is_matryoshka(self) -> bool:
1282
+ return (hasattr(self.hf_config, "matryoshka_dimensions")
1283
+ or getattr(self.hf_config, "is_matryoshka", False))
1284
+
1285
+ @property
1286
+ def matryoshka_dimensions(self):
1287
+ return getattr(self.hf_config, "matryoshka_dimensions", None)
1288
+
1289
+
1290
+ BlockSize = Literal[1, 8, 16, 32, 64, 128]
1291
+ CacheDType = Literal["auto", "fp8", "fp8_e4m3", "fp8_e5m2"]
1292
+ PrefixCachingHashAlgo = Literal["builtin", "sha256"]
1293
+
1294
+
1295
+ @config
1296
+ @dataclass
1297
+ class CacheConfig:
1298
+ """Configuration for the KV cache."""
1299
+
1300
+ block_size: BlockSize = None # type: ignore
1301
+ """Size of a contiguous cache block in number of tokens. This is ignored on
1302
+ neuron devices and set to `--max-model-len`. On CUDA devices, only block
1303
+ sizes up to 32 are supported. On HPU devices, block size defaults to 128.
1304
+
1305
+ This config has no static default. If left unspecified by the user, it will
1306
+ be set in `Platform.check_and_update_configs()` based on the current
1307
+ platform."""
1308
+ gpu_memory_utilization: float = 0.9
1309
+ """The fraction of GPU memory to be used for the model executor, which can
1310
+ range from 0 to 1. For example, a value of 0.5 would imply 50% GPU memory
1311
+ utilization. If unspecified, will use the default value of 0.9. This is a
1312
+ per-instance limit, and only applies to the current vLLM instance. It does
1313
+ not matter if you have another vLLM instance running on the same GPU. For
1314
+ example, if you have two vLLM instances running on the same GPU, you can
1315
+ set the GPU memory utilization to 0.5 for each instance."""
1316
+ swap_space: float = 4
1317
+ """Size of the CPU swap space per GPU (in GiB)."""
1318
+ cache_dtype: CacheDType = "auto"
1319
+ """Data type for kv cache storage. If "auto", will use model data type.
1320
+ CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. ROCm (AMD GPU) supports
1321
+ fp8 (=fp8_e4m3)."""
1322
+ is_attention_free: bool = False
1323
+ """Whether the model is attention-free. This is primarily set in
1324
+ `ModelConfig` and that value should be manually duplicated here."""
1325
+ num_gpu_blocks_override: Optional[int] = None
1326
+ """Number of GPU blocks to use. This overrides the profiled `num_gpu_blocks`
1327
+ if specified. Does nothing if `None`. Used for testing preemption."""
1328
+ sliding_window: Optional[int] = None
1329
+ """Sliding window size for the KV cache. This is primarily set in
1330
+ `ModelConfig` and that value should be manually duplicated here."""
1331
+ enable_prefix_caching: Optional[bool] = None
1332
+ """Whether to enable prefix caching. Disabled by default for V0. Enabled by
1333
+ default for V1."""
1334
+ prefix_caching_hash_algo: PrefixCachingHashAlgo = "builtin"
1335
+ """Set the hash algorithm for prefix caching:\n
1336
+ - "builtin" is Python's built-in hash.\n
1337
+ - "sha256" is collision resistant but with certain overheads."""
1338
+ cpu_offload_gb: float = 0
1339
+ """The space in GiB to offload to CPU, per GPU. Default is 0, which means
1340
+ no offloading. Intuitively, this argument can be seen as a virtual way to
1341
+ increase the GPU memory size. For example, if you have one 24 GB GPU and
1342
+ set this to 10, virtually you can think of it as a 34 GB GPU. Then you can
1343
+ load a 13B model with BF16 weight, which requires at least 26GB GPU memory.
1344
+ Note that this requires fast CPU-GPU interconnect, as part of the model is
1345
+ loaded from CPU memory to GPU memory on the fly in each model forward pass.
1346
+ """
1347
+ calculate_kv_scales: bool = False
1348
+ """This enables dynamic calculation of `k_scale` and `v_scale` when
1349
+ kv_cache_dtype is fp8. If `False`, the scales will be loaded from the model
1350
+ checkpoint if available. Otherwise, the scales will default to 1.0."""
1351
+
1352
+ # Will be set after profiling.
1353
+ num_gpu_blocks: Optional[int] = field(default=None, init=False)
1354
+ """The number of blocks to allocate for GPU memory."""
1355
+ num_cpu_blocks: Optional[int] = field(default=None, init=False)
1356
+ """The number of blocks to allocate for CPU memory."""
1357
+
1358
+ def compute_hash(self) -> str:
1359
+ """
1360
+ WARNING: Whenever a new field is added to this config,
1361
+ ensure that it is included in the factors list if
1362
+ it affects the computation graph.
1363
+
1364
+ Provide a hash that uniquely identifies all the configs
1365
+ that affect the structure of the computation
1366
+ graph from input ids/embeddings to the final hidden states,
1367
+ excluding anything before input ids/embeddings and after
1368
+ the final hidden states.
1369
+ """
1370
+ factors: list[Any] = []
1371
+ factors.append(self.cache_dtype)
1372
+ # `cpu_offload_gb` does not use `torch.compile` yet.
1373
+ hash_str = hashlib.md5(str(factors).encode(),
1374
+ usedforsecurity=False).hexdigest()
1375
+ return hash_str
1376
+
1377
+ def __post_init__(self) -> None:
1378
+ self.swap_space_bytes = self.swap_space * GiB_bytes
1379
+
1380
+ self._verify_args()
1381
+ self._verify_cache_dtype()
1382
+ self._verify_prefix_caching()
1383
+
1384
+ def metrics_info(self):
1385
+ # convert cache_config to dict(key: str, value: str) for prometheus
1386
+ # metrics info
1387
+ return {key: str(value) for key, value in self.__dict__.items()}
1388
+
1389
+ def _verify_args(self) -> None:
1390
+ if self.cpu_offload_gb < 0:
1391
+ raise ValueError("CPU offload space must be non-negative"
1392
+ f", but got {self.cpu_offload_gb}")
1393
+
1394
+ if self.gpu_memory_utilization > 1.0:
1395
+ raise ValueError(
1396
+ "GPU memory utilization must be less than 1.0. Got "
1397
+ f"{self.gpu_memory_utilization}.")
1398
+
1399
+ def _verify_cache_dtype(self) -> None:
1400
+ if self.cache_dtype == "auto":
1401
+ pass
1402
+ elif self.cache_dtype in get_args(CacheDType):
1403
+ logger.info(
1404
+ "Using fp8 data type to store kv cache. It reduces the GPU "
1405
+ "memory footprint and boosts the performance. "
1406
+ "Meanwhile, it may cause accuracy drop without a proper "
1407
+ "scaling factor")
1408
+ else:
1409
+ raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}")
1410
+
1411
+ def _verify_prefix_caching(self) -> None:
1412
+ if not self.enable_prefix_caching:
1413
+ return
1414
+
1415
+ if self.sliding_window is not None and not envs.VLLM_USE_V1:
1416
+ raise NotImplementedError(
1417
+ "Prefix caching is not supported with sliding window. "
1418
+ "Run with --disable-sliding-window to use prefix caching.")
1419
+
1420
+ if (self.enable_prefix_caching and self.prefix_caching_hash_algo
1421
+ not in get_args(PrefixCachingHashAlgo)):
1422
+ raise ValueError(
1423
+ "Unknown prefix caching hash algorithm: "
1424
+ f"{self.prefix_caching_hash_algo}. Must be one of "
1425
+ f"{get_args(PrefixCachingHashAlgo)}.")
1426
+
1427
+ def verify_with_parallel_config(
1428
+ self,
1429
+ parallel_config: "ParallelConfig",
1430
+ ) -> None:
1431
+ total_cpu_memory = get_cpu_memory()
1432
+ # FIXME(woosuk): Here, it is assumed that the GPUs in a tensor parallel
1433
+ # group are in the same node. However, the GPUs may span multiple nodes.
1434
+ num_gpus_per_node = parallel_config.tensor_parallel_size
1435
+ cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node
1436
+
1437
+ msg = (f"{cpu_memory_usage / GiB_bytes:.2f} GiB out of the "
1438
+ f"{total_cpu_memory / GiB_bytes:.2f} GiB total CPU memory "
1439
+ "is allocated for the swap space.")
1440
+ if cpu_memory_usage > 0.7 * total_cpu_memory:
1441
+ raise ValueError("Too large swap space. " + msg)
1442
+ elif cpu_memory_usage > 0.4 * total_cpu_memory:
1443
+ logger.warning("Possibly too large swap space. %s", msg)
1444
+
1445
+
1446
+ @config
1447
+ @dataclass
1448
+ class TokenizerPoolConfig:
1449
+ """This config is deprecated and will be removed in a future release.
1450
+
1451
+ Passing these parameters will have no effect. Please remove them from your
1452
+ configurations.
1453
+ """
1454
+
1455
+ pool_size: int = 0
1456
+ """This parameter is deprecated and will be removed in a future release.
1457
+ Passing this parameter will have no effect. Please remove it from your
1458
+ configurations."""
1459
+ pool_type: str = "ray"
1460
+ """This parameter is deprecated and will be removed in a future release.
1461
+ Passing this parameter will have no effect. Please remove it from your
1462
+ configurations."""
1463
+ extra_config: dict = field(default_factory=dict)
1464
+ """This parameter is deprecated and will be removed in a future release.
1465
+ Passing this parameter will have no effect. Please remove it from your
1466
+ configurations."""
1467
+
1468
+ def __post_init__(self) -> None:
1469
+ logger.warning_once(
1470
+ "TokenizerPoolConfig is deprecated and will be removed in a "
1471
+ "future release. Passing this parameter will have no effect. "
1472
+ "Please remove it from your configurations.")
1473
+
1474
+
1475
+ class LoadFormat(str, enum.Enum):
1476
+ AUTO = "auto"
1477
+ PT = "pt"
1478
+ SAFETENSORS = "safetensors"
1479
+ NPCACHE = "npcache"
1480
+ DUMMY = "dummy"
1481
+ TENSORIZER = "tensorizer"
1482
+ SHARDED_STATE = "sharded_state"
1483
+ GGUF = "gguf"
1484
+ BITSANDBYTES = "bitsandbytes"
1485
+ MISTRAL = "mistral"
1486
+ RUNAI_STREAMER = "runai_streamer"
1487
+ RUNAI_STREAMER_SHARDED = "runai_streamer_sharded"
1488
+ FASTSAFETENSORS = "fastsafetensors"
1489
+
1490
+
1491
+ @config
1492
+ @dataclass
1493
+ class LoadConfig:
1494
+ """Configuration for loading the model weights."""
1495
+
1496
+ load_format: Union[str, LoadFormat,
1497
+ "BaseModelLoader"] = LoadFormat.AUTO.value
1498
+ """The format of the model weights to load:\n
1499
+ - "auto" will try to load the weights in the safetensors format and fall
1500
+ back to the pytorch bin format if safetensors format is not available.\n
1501
+ - "pt" will load the weights in the pytorch bin format.\n
1502
+ - "safetensors" will load the weights in the safetensors format.\n
1503
+ - "npcache" will load the weights in pytorch format and store a numpy cache
1504
+ to speed up the loading.\n
1505
+ - "dummy" will initialize the weights with random values, which is mainly
1506
+ for profiling.\n
1507
+ - "tensorizer" will use CoreWeave's tensorizer library for fast weight
1508
+ loading. See the Tensorize vLLM Model script in the Examples section for
1509
+ more information.\n
1510
+ - "runai_streamer" will load the Safetensors weights using Run:ai Model
1511
+ Streamer.\n
1512
+ - "bitsandbytes" will load the weights using bitsandbytes quantization.\n
1513
+ - "sharded_state" will load weights from pre-sharded checkpoint files,
1514
+ supporting efficient loading of tensor-parallel models.\n
1515
+ - "gguf" will load weights from GGUF format files (details specified in
1516
+ https://github.com/ggml-org/ggml/blob/master/docs/gguf.md).\n
1517
+ - "mistral" will load weights from consolidated safetensors files used by
1518
+ Mistral models."""
1519
+ download_dir: Optional[str] = None
1520
+ """Directory to download and load the weights, default to the default
1521
+ cache directory of Hugging Face."""
1522
+ model_loader_extra_config: dict = field(default_factory=dict)
1523
+ """Extra config for model loader. This will be passed to the model loader
1524
+ corresponding to the chosen load_format. This should be a JSON string that
1525
+ will be parsed into a dictionary."""
1526
+ ignore_patterns: Optional[Union[list[str], str]] = None
1527
+ """The list of patterns to ignore when loading the model. Default to
1528
+ "original/**/*" to avoid repeated loading of llama's checkpoints."""
1529
+ use_tqdm_on_load: bool = True
1530
+ """Whether to enable tqdm for showing progress bar when loading model
1531
+ weights."""
1532
+
1533
+ def compute_hash(self) -> str:
1534
+ """
1535
+ WARNING: Whenever a new field is added to this config,
1536
+ ensure that it is included in the factors list if
1537
+ it affects the computation graph.
1538
+
1539
+ Provide a hash that uniquely identifies all the configs
1540
+ that affect the structure of the computation
1541
+ graph from input ids/embeddings to the final hidden states,
1542
+ excluding anything before input ids/embeddings and after
1543
+ the final hidden states.
1544
+ """
1545
+ # no factors to consider.
1546
+ # this config will not affect the computation graph.
1547
+ factors: list[Any] = []
1548
+ hash_str = hashlib.md5(str(factors).encode(),
1549
+ usedforsecurity=False).hexdigest()
1550
+ return hash_str
1551
+
1552
+ def __post_init__(self):
1553
+ if isinstance(self.load_format, str):
1554
+ load_format = self.load_format.lower()
1555
+ self.load_format = LoadFormat(load_format)
1556
+
1557
+ if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
1558
+ logger.info(
1559
+ "Ignoring the following patterns when downloading weights: %s",
1560
+ self.ignore_patterns)
1561
+ else:
1562
+ self.ignore_patterns = ["original/**/*"]
1563
+
1564
+
1565
+ DistributedExecutorBackend = Literal["ray", "mp", "uni", "external_launcher"]
1566
+
1567
+
1568
+ @config
1569
+ @dataclass
1570
+ class ParallelConfig:
1571
+ """Configuration for the distributed execution."""
1572
+
1573
+ pipeline_parallel_size: int = 1
1574
+ """Number of pipeline parallel groups."""
1575
+ tensor_parallel_size: int = 1
1576
+ """Number of tensor parallel groups."""
1577
+ data_parallel_size: int = 1
1578
+ """Number of data parallel groups. MoE layers will be sharded according to
1579
+ the product of the tensor parallel size and data parallel size."""
1580
+ data_parallel_rank: int = 0
1581
+ """Rank of the data parallel group."""
1582
+ _data_parallel_rank_local: Optional[int] = field(default=None, init=False)
1583
+ """Private field to store the local rank of the data parallel group."""
1584
+
1585
+ @property
1586
+ def data_parallel_rank_local(self) -> int:
1587
+ """Local rank of the data parallel group, defaults to global rank."""
1588
+ if self._data_parallel_rank_local is None:
1589
+ return self.data_parallel_rank
1590
+ return self._data_parallel_rank_local
1591
+
1592
+ @data_parallel_rank_local.setter
1593
+ def data_parallel_rank_local(self, value: int) -> None:
1594
+ """Set the local rank of the data parallel group."""
1595
+ self._data_parallel_rank_local = value
1596
+
1597
+ data_parallel_master_ip: str = "127.0.0.1"
1598
+ """IP of the data parallel master."""
1599
+ data_parallel_master_port: int = 29500
1600
+ """Port of the data parallel master."""
1601
+ enable_expert_parallel: bool = False
1602
+ """Use expert parallelism instead of tensor parallelism for MoE layers."""
1603
+
1604
+ max_parallel_loading_workers: Optional[int] = None
1605
+ """Maximum number of parallal loading workers when loading model
1606
+ sequentially in multiple batches. To avoid RAM OOM when using tensor
1607
+ parallel and large models."""
1608
+
1609
+ disable_custom_all_reduce: bool = False
1610
+ """Disable the custom all-reduce kernel and fall back to NCCL."""
1611
+
1612
+ tokenizer_pool_config: Optional[TokenizerPoolConfig] = None
1613
+ """This parameter is deprecated and will be removed in a future release.
1614
+ Please remove it from your configs"""
1615
+
1616
+ ray_workers_use_nsight: bool = False
1617
+ """Whether to profile Ray workers with nsight, see https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler."""
1618
+
1619
+ placement_group: Optional["PlacementGroup"] = None
1620
+ """ray distributed model workers placement group."""
1621
+
1622
+ distributed_executor_backend: Optional[Union[DistributedExecutorBackend,
1623
+ type["ExecutorBase"]]] = None
1624
+ """Backend to use for distributed model
1625
+ workers, either "ray" or "mp" (multiprocessing). If the product
1626
+ of pipeline_parallel_size and tensor_parallel_size is less than
1627
+ or equal to the number of GPUs available, "mp" will be used to
1628
+ keep processing on a single host. Otherwise, this will default
1629
+ to "ray" if Ray is installed and fail otherwise. Note that tpu
1630
+ and hpu only support Ray for distributed inference."""
1631
+
1632
+ worker_cls: str = "auto"
1633
+ """The full name of the worker class to use. If "auto", the worker class
1634
+ will be determined based on the platform."""
1635
+ sd_worker_cls: str = "auto"
1636
+ """The full name of the worker class to use for speculative decofing.
1637
+ If "auto", the worker class will be determined based on the platform."""
1638
+ worker_extension_cls: str = ""
1639
+ """The full name of the worker extension class to use. The worker extension
1640
+ class is dynamically inherited by the worker class. This is used to inject
1641
+ new attributes and methods to the worker class for use in collective_rpc
1642
+ calls."""
1643
+
1644
+ world_size: int = field(init=False)
1645
+ """world_size is TPxPP, it affects the number of workers we create."""
1646
+ world_size_across_dp: int = field(init=False)
1647
+ """world_size_across_dp is TPxPPxDP, it is the size of the world
1648
+ including data parallelism."""
1649
+
1650
+ rank: int = 0
1651
+ """Global rank in distributed setup."""
1652
+
1653
+ def get_next_dp_init_port(self) -> int:
1654
+ """
1655
+ We might need to initialize process groups in multiple
1656
+ processes that is related to data parallelism,
1657
+ e.g. both in the worker and in the engine, which
1658
+ can live in different processes. To avoid port conflicts, we
1659
+ increment the port number each time we need to initialize a
1660
+ new process group related to data parallelism.
1661
+ """
1662
+ answer = self.data_parallel_master_port
1663
+ self.data_parallel_master_port += 1
1664
+ return answer
1665
+
1666
+ def stateless_init_dp_group(self) -> "ProcessGroup":
1667
+ from vllm.distributed.utils import (
1668
+ stateless_init_torch_distributed_process_group)
1669
+
1670
+ # use gloo since the engine process might not have cuda device
1671
+ dp_group = stateless_init_torch_distributed_process_group(
1672
+ self.data_parallel_master_ip,
1673
+ self.get_next_dp_init_port(),
1674
+ self.data_parallel_rank,
1675
+ self.data_parallel_size,
1676
+ backend="gloo")
1677
+
1678
+ return dp_group
1679
+
1680
+ @staticmethod
1681
+ def has_unfinished_dp(dp_group: "ProcessGroup",
1682
+ has_unfinished: bool) -> bool:
1683
+ tensor = torch.tensor([has_unfinished],
1684
+ dtype=torch.int32,
1685
+ device="cpu")
1686
+ # dp rank 0: has_unfinished_seqs=True
1687
+ # dp rank 1: has_unfinished_seqs=False
1688
+ # aggregated: has_unfinished_seqs=True
1689
+ # so this is an OR operation, i.e. MAX in integers
1690
+ torch.distributed.all_reduce(tensor, op=ReduceOp.MAX, group=dp_group)
1691
+ aggregated_has_unfinished = bool(tensor.item())
1692
+ return aggregated_has_unfinished
1693
+
1694
+ def compute_hash(self):
1695
+ """
1696
+ Provide a hash that uniquely identifies all the configs
1697
+ that affect the structure of the computation
1698
+ graph from input ids/embeddings to the final hidden states,
1699
+ excluding anything before input ids/embeddings and after
1700
+ the final hidden states.
1701
+ """
1702
+ factors: list[Any] = []
1703
+ factors.append(self.pipeline_parallel_size)
1704
+ factors.append(self.tensor_parallel_size)
1705
+ factors.append(self.enable_expert_parallel)
1706
+ return hashlib.sha256(str(factors).encode()).hexdigest()
1707
+
1708
+ def __post_init__(self) -> None:
1709
+ self.world_size = self.pipeline_parallel_size * \
1710
+ self.tensor_parallel_size
1711
+
1712
+ if self.data_parallel_size > 1:
1713
+ # Data parallel was specified in the engine args.
1714
+ self.data_parallel_master_port = get_open_port()
1715
+ # TODO multi-node
1716
+ else:
1717
+ # Otherwise fall back to env vars (e.g. for offline SPMD case).
1718
+ self.data_parallel_size = envs.VLLM_DP_SIZE
1719
+ self.data_parallel_rank = envs.VLLM_DP_RANK
1720
+ self.data_parallel_rank_local = envs.VLLM_DP_RANK_LOCAL
1721
+ self.data_parallel_master_ip = envs.VLLM_DP_MASTER_IP
1722
+ self.data_parallel_master_port = envs.VLLM_DP_MASTER_PORT
1723
+
1724
+ self.world_size_across_dp = self.world_size * self.data_parallel_size
1725
+
1726
+ if self.distributed_executor_backend == "external_launcher":
1727
+ import os
1728
+ os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
1729
+ logger.info("Disabling V1 multiprocessing for external launcher.")
1730
+
1731
+ ray_only_devices: list[str] = []
1732
+ from vllm.platforms import current_platform
1733
+ if (current_platform.device_type in ray_only_devices
1734
+ and self.world_size > 1):
1735
+ if self.distributed_executor_backend is None:
1736
+ self.distributed_executor_backend = "ray"
1737
+ if self.distributed_executor_backend != "ray":
1738
+ raise ValueError(
1739
+ f"{current_platform.device_type.upper()} backend only "
1740
+ "supports Ray for distributed inference.")
1741
+
1742
+ if self.distributed_executor_backend is None and self.world_size > 1:
1743
+ # We use multiprocessing by default if world_size fits on the
1744
+ # current node and we aren't in a ray placement group.
1745
+
1746
+ from vllm.executor import ray_utils
1747
+ backend: DistributedExecutorBackend = "mp"
1748
+ ray_found = ray_utils.ray_is_available()
1749
+ if current_platform.is_neuron():
1750
+ # neuron uses single process to control multiple devices
1751
+ backend = "uni"
1752
+ elif (current_platform.is_cuda()
1753
+ and cuda_device_count_stateless() < self.world_size):
1754
+ if not ray_found:
1755
+ raise ValueError("Unable to load Ray which is "
1756
+ "required for multi-node inference, "
1757
+ "please install Ray with `pip install "
1758
+ "ray`.") from ray_utils.ray_import_err
1759
+ backend = "ray"
1760
+ elif ray_found:
1761
+ if self.placement_group:
1762
+ backend = "ray"
1763
+ else:
1764
+ from ray import is_initialized as ray_is_initialized
1765
+ if ray_is_initialized():
1766
+ from ray.util import get_current_placement_group
1767
+ if get_current_placement_group():
1768
+ backend = "ray"
1769
+ self.distributed_executor_backend = backend
1770
+ logger.info("Defaulting to use %s for distributed inference",
1771
+ backend)
1772
+
1773
+ if self.distributed_executor_backend is None and self.world_size == 1:
1774
+ self.distributed_executor_backend = "uni"
1775
+
1776
+ self._verify_args()
1777
+
1778
+ @property
1779
+ def use_ray(self) -> bool:
1780
+ return self.distributed_executor_backend == "ray" or (
1781
+ isinstance(self.distributed_executor_backend, type)
1782
+ and self.distributed_executor_backend.uses_ray)
1783
+
1784
+ def _verify_args(self) -> None:
1785
+ # Lazy import to avoid circular import
1786
+ from vllm.executor.executor_base import ExecutorBase
1787
+ from vllm.platforms import current_platform
1788
+ if self.distributed_executor_backend not in (
1789
+ "ray", "mp", "uni",
1790
+ "external_launcher", None) and not (isinstance(
1791
+ self.distributed_executor_backend, type) and issubclass(
1792
+ self.distributed_executor_backend, ExecutorBase)):
1793
+ raise ValueError(
1794
+ "Unrecognized distributed executor backend "
1795
+ f"{self.distributed_executor_backend}. Supported "
1796
+ "values are 'ray', 'mp' 'uni', 'external_launcher' or"
1797
+ " custom ExecutorBase subclass.")
1798
+ if self.use_ray:
1799
+ from vllm.executor import ray_utils
1800
+ ray_utils.assert_ray_available()
1801
+
1802
+ if not current_platform.use_custom_allreduce():
1803
+ self.disable_custom_all_reduce = True
1804
+ logger.info(
1805
+ "Disabled the custom all-reduce kernel because it is not "
1806
+ "supported on current platform.")
1807
+ if self.ray_workers_use_nsight and not self.use_ray:
1808
+ raise ValueError("Unable to use nsight profiling unless workers "
1809
+ "run with Ray.")
1810
+
1811
+ assert isinstance(self.worker_extension_cls, str), (
1812
+ "worker_extension_cls must be a string (qualified class name).")
1813
+
1814
+
1815
+ PreemptionMode = Literal["swap", "recompute"]
1816
+ SchedulerPolicy = Literal["fcfs", "priority"]
1817
+
1818
+
1819
+ @config
1820
+ @dataclass
1821
+ class SchedulerConfig:
1822
+ """Scheduler configuration."""
1823
+
1824
+ runner_type: RunnerType = "generate"
1825
+ """The runner type to launch for the model."""
1826
+
1827
+ max_num_batched_tokens: int = None # type: ignore
1828
+ """Maximum number of tokens to be processed in a single iteration.
1829
+
1830
+ This config has no static default. If left unspecified by the user, it will
1831
+ be set in `EngineArgs.create_engine_config` based on the usage context."""
1832
+
1833
+ max_num_seqs: int = None # type: ignore
1834
+ """Maximum number of sequences to be processed in a single iteration.
1835
+
1836
+ This config has no static default. If left unspecified by the user, it will
1837
+ be set in `EngineArgs.create_engine_config` based on the usage context."""
1838
+
1839
+ max_model_len: int = None # type: ignore
1840
+ """Maximum length of a sequence (including prompt and generated text). This
1841
+ is primarily set in `ModelConfig` and that value should be manually
1842
+ duplicated here."""
1843
+
1844
+ max_num_partial_prefills: int = 1
1845
+ """For chunked prefill, the maximum number of sequences that can be
1846
+ partially prefilled concurrently."""
1847
+
1848
+ max_long_partial_prefills: int = 1
1849
+ """For chunked prefill, the maximum number of prompts longer than
1850
+ long_prefill_token_threshold that will be prefilled concurrently. Setting
1851
+ this less than max_num_partial_prefills will allow shorter prompts to jump
1852
+ the queue in front of longer prompts in some cases, improving latency."""
1853
+
1854
+ long_prefill_token_threshold: int = 0
1855
+ """For chunked prefill, a request is considered long if the prompt is
1856
+ longer than this number of tokens."""
1857
+
1858
+ num_lookahead_slots: int = 0
1859
+ """The number of slots to allocate per sequence per
1860
+ step, beyond the known token ids. This is used in speculative
1861
+ decoding to store KV activations of tokens which may or may not be
1862
+ accepted.
1863
+
1864
+ NOTE: This will be replaced by speculative config in the future; it is
1865
+ present to enable correctness tests until then."""
1866
+
1867
+ delay_factor: float = 0.0
1868
+ """Apply a delay (of delay factor multiplied by previous
1869
+ prompt latency) before scheduling next prompt."""
1870
+
1871
+ enable_chunked_prefill: bool = None # type: ignore
1872
+ """If True, prefill requests can be chunked based
1873
+ on the remaining max_num_batched_tokens."""
1874
+
1875
+ is_multimodal_model: bool = False
1876
+ """True if the model is multimodal."""
1877
+
1878
+ # TODO (ywang96): Make this configurable.
1879
+ max_num_encoder_input_tokens: int = field(init=False)
1880
+ """Multimodal encoder compute budget, only used in V1.
1881
+
1882
+ NOTE: This is not currently configurable. It will be overridden by
1883
+ max_num_batched_tokens in case max multimodal embedding size is larger."""
1884
+
1885
+ # TODO (ywang96): Make this configurable.
1886
+ encoder_cache_size: int = field(init=False)
1887
+ """Multimodal encoder cache size, only used in V1.
1888
+
1889
+ NOTE: This is not currently configurable. It will be overridden by
1890
+ max_num_batched_tokens in case max multimodal embedding size is larger."""
1891
+
1892
+ preemption_mode: Optional[PreemptionMode] = None
1893
+ """Whether to perform preemption by swapping or
1894
+ recomputation. If not specified, we determine the mode as follows:
1895
+ We use recomputation by default since it incurs lower overhead than
1896
+ swapping. However, when the sequence group has multiple sequences
1897
+ (e.g., beam search), recomputation is not currently supported. In
1898
+ such a case, we use swapping instead."""
1899
+
1900
+ num_scheduler_steps: int = 1
1901
+ """Maximum number of forward steps per scheduler call."""
1902
+
1903
+ multi_step_stream_outputs: bool = True
1904
+ """If False, then multi-step will stream outputs at the end of all steps"""
1905
+
1906
+ send_delta_data: bool = False
1907
+ """Private API. If used, scheduler sends delta data to
1908
+ workers instead of an entire data. It should be enabled only
1909
+ when SPMD worker architecture is enabled. I.e.,
1910
+ VLLM_USE_RAY_SPMD_WORKER=1"""
1911
+
1912
+ policy: SchedulerPolicy = "fcfs"
1913
+ """The scheduling policy to use:\n
1914
+ - "fcfs" means first come first served, i.e. requests are handled in order
1915
+ of arrival.\n
1916
+ - "priority" means requests are handled based on given priority (lower
1917
+ value means earlier handling) and time of arrival deciding any ties)."""
1918
+
1919
+ chunked_prefill_enabled: bool = field(init=False)
1920
+ """True if chunked prefill is enabled."""
1921
+
1922
+ disable_chunked_mm_input: bool = False
1923
+ """If set to true and chunked prefill is enabled, we do not want to
1924
+ partially schedule a multimodal item. Only used in V1
1925
+ This ensures that if a request has a mixed prompt
1926
+ (like text tokens TTTT followed by image tokens IIIIIIIIII) where only
1927
+ some image tokens can be scheduled (like TTTTIIIII, leaving IIIII),
1928
+ it will be scheduled as TTTT in one step and IIIIIIIIII in the next."""
1929
+
1930
+ scheduler_cls: Union[str, type[object]] = "vllm.core.scheduler.Scheduler"
1931
+ """The scheduler class to use. "vllm.core.scheduler.Scheduler" is the
1932
+ default scheduler. Can be a class directly or the path to a class of form
1933
+ "mod.custom_class"."""
1934
+
1935
+ def compute_hash(self) -> str:
1936
+ """
1937
+ WARNING: Whenever a new field is added to this config,
1938
+ ensure that it is included in the factors list if
1939
+ it affects the computation graph.
1940
+
1941
+ Provide a hash that uniquely identifies all the configs
1942
+ that affect the structure of the computation
1943
+ graph from input ids/embeddings to the final hidden states,
1944
+ excluding anything before input ids/embeddings and after
1945
+ the final hidden states.
1946
+ """
1947
+ # no factors to consider.
1948
+ # this config will not affect the computation graph.
1949
+ factors: list[Any] = []
1950
+ hash_str = hashlib.md5(str(factors).encode(),
1951
+ usedforsecurity=False).hexdigest()
1952
+ return hash_str
1953
+
1954
+ def __post_init__(self) -> None:
1955
+ if self.max_model_len is None:
1956
+ self.max_model_len = 8192
1957
+ logger.warning(
1958
+ "max_model_len was is not set. Defaulting to arbitrary value "
1959
+ "of %d.", self.max_model_len)
1960
+
1961
+ if self.max_num_seqs is None:
1962
+ self.max_num_seqs = 128
1963
+ logger.warning(
1964
+ "max_num_seqs was is not set. Defaulting to arbitrary value "
1965
+ "of %d.", self.max_num_seqs)
1966
+
1967
+ if self.max_num_batched_tokens is None:
1968
+ if self.enable_chunked_prefill:
1969
+ if self.num_scheduler_steps > 1:
1970
+ # Multi-step Chunked-Prefill doesn't allow prompt-chunking
1971
+ # for now. Have max_num_batched_tokens set to max_model_len
1972
+ # so we don't reject sequences on account of a short
1973
+ # max_num_batched_tokens.
1974
+ self.max_num_batched_tokens = max(
1975
+ self.max_model_len, _DEFAULT_MAX_NUM_BATCHED_TOKENS)
1976
+ else:
1977
+ self.max_num_batched_tokens = (
1978
+ _DEFAULT_MAX_NUM_BATCHED_TOKENS)
1979
+ else:
1980
+ # If max_model_len is too short, use
1981
+ # _DEFAULT_MAX_NUM_BATCHED_TOKENS as the default value
1982
+ # for higher throughput.
1983
+ self.max_num_batched_tokens = max(
1984
+ self.max_model_len, _DEFAULT_MAX_NUM_BATCHED_TOKENS)
1985
+
1986
+ if self.runner_type == "pooling":
1987
+ # Choose specific value for higher throughput
1988
+ self.max_num_batched_tokens = max(
1989
+ self.max_num_batched_tokens,
1990
+ _POOLING_MODEL_MAX_NUM_BATCHED_TOKENS,
1991
+ )
1992
+ if self.is_multimodal_model:
1993
+ # The value needs to be at least the number of multimodal tokens
1994
+ self.max_num_batched_tokens = max(
1995
+ self.max_num_batched_tokens,
1996
+ _MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS,
1997
+ )
1998
+
1999
+ self.max_num_encoder_input_tokens = self.max_num_batched_tokens
2000
+ self.encoder_cache_size = self.max_num_batched_tokens
2001
+
2002
+ if self.enable_chunked_prefill:
2003
+ logger.info(
2004
+ "Chunked prefill is enabled with max_num_batched_tokens=%d.",
2005
+ self.max_num_batched_tokens)
2006
+
2007
+ self.chunked_prefill_enabled = self.enable_chunked_prefill
2008
+ if self.max_num_partial_prefills > 1:
2009
+ if self.long_prefill_token_threshold == 0:
2010
+ self.long_prefill_token_threshold = int(self.max_model_len *
2011
+ 0.04)
2012
+
2013
+ logger.info(
2014
+ "Concurrent partial prefills enabled with "
2015
+ "max_num_partial_prefills=%d, max_long_partial_prefills=%d, "
2016
+ "long_prefill_token_threshold=%d",
2017
+ self.max_num_partial_prefills, self.max_long_partial_prefills,
2018
+ self.long_prefill_token_threshold)
2019
+
2020
+ self._verify_args()
2021
+
2022
+ def _verify_args(self) -> None:
2023
+ if (self.max_num_batched_tokens < self.max_model_len
2024
+ and not self.chunked_prefill_enabled):
2025
+ raise ValueError(
2026
+ f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
2027
+ f"smaller than max_model_len ({self.max_model_len}). "
2028
+ "This effectively limits the maximum sequence length to "
2029
+ "max_num_batched_tokens and makes vLLM reject longer "
2030
+ "sequences. Please increase max_num_batched_tokens or "
2031
+ "decrease max_model_len.")
2032
+
2033
+ if self.max_num_batched_tokens < self.max_num_seqs:
2034
+ raise ValueError(
2035
+ f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
2036
+ "be greater than or equal to max_num_seqs "
2037
+ f"({self.max_num_seqs}).")
2038
+
2039
+ if self.num_lookahead_slots < 0:
2040
+ raise ValueError(
2041
+ "num_lookahead_slots "
2042
+ f"({self.num_lookahead_slots}) must be greater than or "
2043
+ "equal to 0.")
2044
+
2045
+ if self.num_scheduler_steps < 1:
2046
+ raise ValueError(
2047
+ "num_scheduler_steps "
2048
+ f"({self.num_scheduler_steps}) must be greater than or "
2049
+ "equal to 1.")
2050
+
2051
+ if self.max_num_partial_prefills < 1:
2052
+ raise ValueError(
2053
+ f"max_num_partial_prefills ({self.max_num_partial_prefills}) "
2054
+ "must be greater than or equal to 1.")
2055
+ elif self.max_num_partial_prefills > 1:
2056
+ if not self.chunked_prefill_enabled:
2057
+ raise ValueError("Chunked prefill must be enabled to set "
2058
+ "max_num_partial_prefills > 1.")
2059
+
2060
+ if self.long_prefill_token_threshold > self.max_model_len:
2061
+ raise ValueError(
2062
+ "long_prefill_token_threshold "
2063
+ f"({self.long_prefill_token_threshold}) cannot be greater "
2064
+ f"than the max_model_len ({self.max_model_len}).")
2065
+
2066
+ if (self.max_long_partial_prefills
2067
+ < 1) or (self.max_long_partial_prefills
2068
+ > self.max_num_partial_prefills):
2069
+ raise ValueError(
2070
+ f"max_long_partial_prefills ({self.max_long_partial_prefills}) "
2071
+ "must be greater than or equal to 1 and less than or equal to "
2072
+ f"max_num_partial_prefills ({self.max_num_partial_prefills}).")
2073
+
2074
+ @property
2075
+ def is_multi_step(self) -> bool:
2076
+ return self.num_scheduler_steps > 1
2077
+
2078
+
2079
+ Device = Literal["auto", "cuda", "neuron", "cpu", "tpu", "xpu", "hpu"]
2080
+
2081
+
2082
+ @config
2083
+ @dataclass
2084
+ class DeviceConfig:
2085
+ """Configuration for the device to use for vLLM execution."""
2086
+
2087
+ device: Union[Device, torch.device] = "auto"
2088
+ """Device type for vLLM execution."""
2089
+ device_type: str = field(init=False)
2090
+ """Device type from the current platform. This is set in
2091
+ `__post_init__`."""
2092
+
2093
+ def compute_hash(self) -> str:
2094
+ """
2095
+ WARNING: Whenever a new field is added to this config,
2096
+ ensure that it is included in the factors list if
2097
+ it affects the computation graph.
2098
+
2099
+ Provide a hash that uniquely identifies all the configs
2100
+ that affect the structure of the computation
2101
+ graph from input ids/embeddings to the final hidden states,
2102
+ excluding anything before input ids/embeddings and after
2103
+ the final hidden states.
2104
+ """
2105
+ # no factors to consider.
2106
+ # the device/platform information will be summarized
2107
+ # by torch/vllm automatically.
2108
+ factors: list[Any] = []
2109
+ hash_str = hashlib.md5(str(factors).encode(),
2110
+ usedforsecurity=False).hexdigest()
2111
+ return hash_str
2112
+
2113
+ def __post_init__(self):
2114
+ if self.device == "auto":
2115
+ # Automated device type detection
2116
+ from vllm.platforms import current_platform
2117
+ self.device_type = current_platform.device_type
2118
+ if not self.device_type:
2119
+ raise RuntimeError(
2120
+ "Failed to infer device type, please set "
2121
+ "the environment variable `VLLM_LOGGING_LEVEL=DEBUG` "
2122
+ "to turn on verbose logging to help debug the issue.")
2123
+ else:
2124
+ # Device type is assigned explicitly
2125
+ self.device_type = self.device
2126
+
2127
+ # Some device types require processing inputs on CPU
2128
+ if self.device_type in ["neuron"]:
2129
+ self.device = torch.device("cpu")
2130
+ elif self.device_type in ["tpu"]:
2131
+ self.device = None
2132
+ else:
2133
+ # Set device with device type
2134
+ self.device = torch.device(self.device_type)
2135
+
2136
+
2137
+ SpeculativeMethod = Literal["ngram", "eagle", "medusa", "mlp_speculator",
2138
+ "draft_model"]
2139
+ SpeculativeAcceptanceMethod = Literal["rejection_sampler",
2140
+ "typical_acceptance_sampler"]
2141
+
2142
+
2143
+ @config
2144
+ @dataclass
2145
+ class SpeculativeConfig:
2146
+ """Configuration for speculative decoding."""
2147
+
2148
+ # General speculative decoding control
2149
+ num_speculative_tokens: int = field(default=None,
2150
+ init=True) # type: ignore
2151
+ """The number of speculative tokens, if provided. It will default to the
2152
+ number in the draft model config if present, otherwise, it is required."""
2153
+ model: Optional[str] = None
2154
+ """The name of the draft model, eagle head, or additional weights, if
2155
+ provided."""
2156
+ method: Optional[SpeculativeMethod] = None
2157
+ """The name of the speculative method to use. If users provide and set the
2158
+ `model` param, the speculative method type will be detected automatically
2159
+ if possible, if `model` param is not provided, the method name must be
2160
+ provided.
2161
+
2162
+ If using `ngram` method, the related configuration `prompt_lookup_max` and
2163
+ `prompt_lookup_min` should be considered."""
2164
+ acceptance_method: SpeculativeAcceptanceMethod = "rejection_sampler"
2165
+ """The method to use for accepting draft tokens:\n
2166
+ - "rejection_sampler" maps to `RejectionSampler`.\n
2167
+ - "typical_acceptance_sampler" maps to `TypicalAcceptanceSampler`.
2168
+
2169
+ If using `typical_acceptance_sampler`, the related configuration
2170
+ `posterior_threshold` and `posterior_alpha` should be considered."""
2171
+ draft_tensor_parallel_size: Optional[int] = None
2172
+ """The degree of the tensor parallelism for the draft model. Can only be 1
2173
+ or the same as the target model's tensor parallel size."""
2174
+ disable_logprobs: bool = True
2175
+ """If set to True, token log probabilities are not returned during
2176
+ speculative decoding. If set to False, token log probabilities are returned
2177
+ according to the log probability settings in SamplingParams."""
2178
+
2179
+ # Draft model configuration
2180
+ quantization: Optional[str] = None
2181
+ """Quantization method that was used to quantize the draft model weights.
2182
+ If `None`, we assume the model weights are not quantized. Note that it only
2183
+ takes effect when using the draft model-based speculative method."""
2184
+ max_model_len: Optional[int] = None
2185
+ """The maximum model length of the draft model. Used when testing the
2186
+ ability to skip speculation for some sequences."""
2187
+ revision: Optional[str] = None
2188
+ """The specific model version to use for the draft model. It can be a
2189
+ branch name, a tag name, or a commit id. If unspecified, will use the
2190
+ default version."""
2191
+ code_revision: Optional[str] = None
2192
+ """The specific revision to use for the draft model code on Hugging Face
2193
+ Hub. It can be a branch name, a tag name, or a commit id. If unspecified,
2194
+ will use the default version."""
2195
+
2196
+ # Advanced control
2197
+ disable_mqa_scorer: bool = False
2198
+ """Disable the MQA scorer and fall back to batch expansion for scoring
2199
+ proposals."""
2200
+ disable_by_batch_size: Optional[int] = None
2201
+ """Disable speculative decoding for new incoming requests when the number
2202
+ of enqueued requests is larger than this value, if provided."""
2203
+
2204
+ # Ngram proposer configuration
2205
+ prompt_lookup_max: Optional[int] = None
2206
+ """Maximum size of ngram token window when using Ngram proposer, required
2207
+ when method is set to ngram."""
2208
+ prompt_lookup_min: Optional[int] = None
2209
+ """Minimum size of ngram token window when using Ngram proposer, if
2210
+ provided. Defaults to 1."""
2211
+
2212
+ # Typical acceptance sampler configuration
2213
+ posterior_threshold: Optional[float] = None
2214
+ """A threshold value that sets a lower bound on the posterior probability
2215
+ of a token in the target model for it to be accepted. This threshold is
2216
+ used only when we use the `TypicalAcceptanceSampler` for token acceptance.
2217
+ """
2218
+ posterior_alpha: Optional[float] = None
2219
+ """Scaling factor for entropy-based threshold, applied when using
2220
+ `TypicalAcceptanceSampler`."""
2221
+
2222
+ # required configuration params passed from engine
2223
+ target_model_config: ModelConfig = field(default=None,
2224
+ init=True) # type: ignore
2225
+ """The configuration of the target model."""
2226
+ target_parallel_config: ParallelConfig = field(default=None,
2227
+ init=True) # type: ignore
2228
+ """The parallel configuration for the target model."""
2229
+ enable_chunked_prefill: bool = field(default=None,
2230
+ init=True) # type: ignore
2231
+ """Whether vLLM is configured to use chunked prefill or not. Used for
2232
+ raising an error since it's not yet compatible with speculative decode."""
2233
+ disable_log_stats: bool = field(default=None, init=True) # type: ignore
2234
+ """Whether to disable the periodic printing of stage times in speculative
2235
+ decoding."""
2236
+
2237
+ # params generated in the post-init stage
2238
+ draft_model_config: ModelConfig = field(default=None,
2239
+ init=True) # type: ignore
2240
+ """The configuration of the draft model initialized internal."""
2241
+ draft_parallel_config: ParallelConfig = field(default=None,
2242
+ init=True) # type: ignore
2243
+ """The parallel configuration for the draft model initialized internal."""
2244
+
2245
+ def compute_hash(self) -> str:
2246
+ """
2247
+ WARNING: Whenever a new field is added to this config,
2248
+ ensure that it is included in the factors list if
2249
+ it affects the computation graph.
2250
+
2251
+ Provide a hash that uniquely identifies all the configs
2252
+ that affect the structure of the computation
2253
+ graph from input ids/embeddings to the final hidden states,
2254
+ excluding anything before input ids/embeddings and after
2255
+ the final hidden states.
2256
+ """
2257
+ factors: list[Any] = []
2258
+ # Eagle3 affects the computation graph because it returns intermediate
2259
+ # hidden states in addition to the final hidden state.
2260
+ factors.append(self.method == "eagle3")
2261
+ hash_str = hashlib.md5(str(factors).encode(),
2262
+ usedforsecurity=False).hexdigest()
2263
+ return hash_str
2264
+
2265
+ @classmethod
2266
+ def from_dict(cls, dict_value: dict) -> "SpeculativeConfig":
2267
+ """Parse the CLI value for the speculative config."""
2268
+ return cls(**dict_value)
2269
+
2270
+ @staticmethod
2271
+ def hf_config_override(hf_config: PretrainedConfig) -> PretrainedConfig:
2272
+ if hf_config.model_type == "deepseek_v3":
2273
+ hf_config.model_type = "deepseek_mtp"
2274
+ if hf_config.model_type == "deepseek_mtp":
2275
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
2276
+ hf_config.update({
2277
+ "n_predict": n_predict,
2278
+ "architectures": ["DeepSeekMTPModel"]
2279
+ })
2280
+ return hf_config
2281
+
2282
+ def __post_init__(self):
2283
+
2284
+ # Note: "method" is a new parameter that helps to extend the
2285
+ # configuration of non-model-based proposers, and the "model" parameter
2286
+ # will be used to set the draft model, eagle head, or additional weight
2287
+ # when needed. If users do not specify "method", the speculative method
2288
+ # will be detected automatically if possible. If the speculative method
2289
+ # can not be detected, it will be considered as the "draft_model" by
2290
+ # default.
2291
+
2292
+ if self.model is None and self.num_speculative_tokens is not None:
2293
+ # TODO(Shangming): Refactor mtp configuration logic when supporting
2294
+ # mtp acceleration for more models besides deepseek_v3
2295
+ if self.target_model_config and \
2296
+ self.target_model_config.hf_text_config.model_type \
2297
+ == "deepseek_v3":
2298
+ # use the draft model from the same model:
2299
+ self.model = self.target_model_config.model
2300
+ elif self.method in ("ngram", "[ngram]"):
2301
+ self.model = "ngram"
2302
+ else:
2303
+ raise ValueError("num_speculative_tokens was provided without "
2304
+ "speculative model.")
2305
+
2306
+ # Automatically configure the method for ngram when "model" is used
2307
+ # instead of "method"
2308
+ if self.method is None and (self.model is not None
2309
+ and self.model in ("ngram", "[ngram]")):
2310
+ self.method = "ngram"
2311
+
2312
+ if self.method in ("ngram", "[ngram]"):
2313
+ # Unified to "ngram" internally
2314
+ self.method = "ngram"
2315
+ # Set default values if not provided
2316
+ if (self.prompt_lookup_min is None
2317
+ and self.prompt_lookup_max is None):
2318
+ # TODO(woosuk): Tune these values. They are arbitrarily chosen.
2319
+ self.prompt_lookup_min = 5
2320
+ self.prompt_lookup_max = 5
2321
+ elif self.prompt_lookup_min is None:
2322
+ assert self.prompt_lookup_max is not None
2323
+ self.prompt_lookup_min = self.prompt_lookup_max
2324
+ elif self.prompt_lookup_max is None:
2325
+ assert self.prompt_lookup_min is not None
2326
+ self.prompt_lookup_max = self.prompt_lookup_min
2327
+
2328
+ # Validate values
2329
+ if self.prompt_lookup_min < 1:
2330
+ raise ValueError(
2331
+ f"prompt_lookup_min={self.prompt_lookup_min} must be > 0")
2332
+ if self.prompt_lookup_max < 1:
2333
+ raise ValueError(
2334
+ f"prompt_lookup_max={self.prompt_lookup_max} must be > 0")
2335
+ if self.prompt_lookup_min > self.prompt_lookup_max:
2336
+ raise ValueError(
2337
+ f"prompt_lookup_min={self.prompt_lookup_min} must "
2338
+ f"be <= prompt_lookup_max={self.prompt_lookup_max}")
2339
+
2340
+ # TODO: current we still need extract vocab_size from target model
2341
+ # config, in future, we may try refactor it out, and set
2342
+ # draft related config as None here.
2343
+ self.draft_model_config = self.target_model_config
2344
+ self.draft_parallel_config = self.target_parallel_config
2345
+ else:
2346
+ self.prompt_lookup_max = 0
2347
+ self.prompt_lookup_min = 0
2348
+
2349
+ if self.model is not None:
2350
+ self.draft_model_config = ModelConfig(
2351
+ model=self.model,
2352
+ task="draft",
2353
+ tokenizer=self.target_model_config.tokenizer,
2354
+ tokenizer_mode=self.target_model_config.tokenizer_mode,
2355
+ trust_remote_code=self.target_model_config.
2356
+ trust_remote_code,
2357
+ allowed_local_media_path=self.target_model_config.
2358
+ allowed_local_media_path,
2359
+ dtype=self.target_model_config.dtype,
2360
+ seed=self.target_model_config.seed,
2361
+ revision=self.revision,
2362
+ code_revision=self.code_revision,
2363
+ tokenizer_revision=self.target_model_config.
2364
+ tokenizer_revision,
2365
+ max_model_len=None,
2366
+ spec_target_max_model_len=self.target_model_config.
2367
+ max_model_len,
2368
+ quantization=self.quantization,
2369
+ enforce_eager=self.target_model_config.enforce_eager,
2370
+ max_seq_len_to_capture=self.target_model_config.
2371
+ max_seq_len_to_capture,
2372
+ max_logprobs=self.target_model_config.max_logprobs,
2373
+ hf_overrides=SpeculativeConfig.hf_config_override,
2374
+ )
2375
+
2376
+ # Automatically detect the method
2377
+ if self.method in ('eagle', 'eagle3'):
2378
+ pass
2379
+ elif "eagle-" in self.draft_model_config.model.lower() or \
2380
+ "eagle3-" in self.draft_model_config.model.lower():
2381
+ self.method = "eagle"
2382
+ elif self.draft_model_config.hf_config.model_type == "medusa":
2383
+ self.method = "medusa"
2384
+ elif (self.draft_model_config.hf_config.model_type ==
2385
+ "mlp_speculator"):
2386
+ self.method = "mlp_speculator"
2387
+ else:
2388
+ self.method = "draft_model"
2389
+
2390
+ # Replace hf_config for EAGLE draft_model
2391
+ if self.method in ("eagle", "eagle3"):
2392
+ if self.enable_chunked_prefill and not envs.VLLM_USE_V1:
2393
+ raise ValueError(
2394
+ "Chunked prefill and EAGLE are not compatible "
2395
+ "when using V0.")
2396
+
2397
+ from vllm.transformers_utils.configs.eagle import (
2398
+ EAGLEConfig)
2399
+ if isinstance(self.draft_model_config.hf_config,
2400
+ EAGLEConfig):
2401
+ pass
2402
+ else:
2403
+ eagle_config = EAGLEConfig(
2404
+ self.draft_model_config.hf_config)
2405
+ self.draft_model_config.hf_config = eagle_config
2406
+
2407
+ if (self.num_speculative_tokens is not None
2408
+ and hasattr(self.draft_model_config.hf_config,
2409
+ "num_lookahead_tokens")):
2410
+ self.draft_model_config.hf_config.num_lookahead_tokens = \
2411
+ self.num_speculative_tokens
2412
+
2413
+ n_predict = getattr(self.draft_model_config.hf_config,
2414
+ "n_predict", None)
2415
+ if n_predict is not None:
2416
+ if self.num_speculative_tokens is None:
2417
+ # Default to max value defined in draft model config.
2418
+ self.num_speculative_tokens = n_predict
2419
+ elif self.num_speculative_tokens > n_predict and \
2420
+ self.num_speculative_tokens % n_predict != 0:
2421
+ # Ensure divisibility for MTP module reuse.
2422
+ raise ValueError(
2423
+ f"num_speculative_tokens:{self.num_speculative_tokens}"
2424
+ f" must be divisible by {n_predict=}")
2425
+
2426
+ self.draft_tensor_parallel_size = \
2427
+ SpeculativeConfig._verify_and_get_draft_tp(
2428
+ self.target_parallel_config,
2429
+ self.draft_tensor_parallel_size,
2430
+ self.draft_model_config.hf_config
2431
+ )
2432
+
2433
+ self.draft_model_config.max_model_len = (
2434
+ SpeculativeConfig._maybe_override_draft_max_model_len(
2435
+ self.max_model_len,
2436
+ self.draft_model_config.max_model_len,
2437
+ self.target_model_config.max_model_len,
2438
+ ))
2439
+
2440
+ self.draft_parallel_config = (
2441
+ SpeculativeConfig.create_draft_parallel_config(
2442
+ self.target_parallel_config,
2443
+ self.draft_tensor_parallel_size))
2444
+
2445
+ if self.acceptance_method == "typical_acceptance_sampler":
2446
+ if self.posterior_threshold is None:
2447
+ self.posterior_threshold = 0.09
2448
+ if self.posterior_alpha is None:
2449
+ self.posterior_alpha = 0.3
2450
+
2451
+ self._verify_args()
2452
+
2453
+ @staticmethod
2454
+ def _maybe_override_draft_max_model_len(
2455
+ speculative_max_model_len: Optional[int],
2456
+ draft_max_model_len: int,
2457
+ target_max_model_len: int,
2458
+ ) -> int:
2459
+ """Determine the max sequence len for the draft model. This is usually
2460
+ the draft_max_model_len, but may be the target_max_model_len if it is
2461
+ less than the draft_max_model_len, or may be speculative_max_model_len
2462
+ if it is specified.
2463
+
2464
+ This is necessary so that sequences do not exceed the capacity of the
2465
+ draft model or the target model.
2466
+
2467
+ speculative_max_model_len is mainly used for testing that sequences can
2468
+ skip speculation.
2469
+ """
2470
+
2471
+ if speculative_max_model_len is not None:
2472
+
2473
+ if speculative_max_model_len > draft_max_model_len:
2474
+ raise ValueError(f"{speculative_max_model_len=} cannot be "
2475
+ f"larger than {draft_max_model_len=}")
2476
+
2477
+ if speculative_max_model_len > target_max_model_len:
2478
+ raise ValueError(f"{speculative_max_model_len=} cannot be "
2479
+ f"larger than {target_max_model_len=}")
2480
+
2481
+ return speculative_max_model_len
2482
+
2483
+ return min(
2484
+ draft_max_model_len,
2485
+ target_max_model_len,
2486
+ )
2487
+
2488
+ @staticmethod
2489
+ def _verify_and_get_draft_tp(
2490
+ target_parallel_config: ParallelConfig,
2491
+ speculative_draft_tensor_parallel_size: Optional[int],
2492
+ draft_hf_config: PretrainedConfig) -> int:
2493
+ """
2494
+ Verifies and adjusts the tensor parallel size for a draft model
2495
+ specified using speculative_draft_tensor_parallel_size.
2496
+ """
2497
+ # If speculative_draft_tensor_parallel_size is unset then set it
2498
+ # appropriately else verify that it is set correctly.
2499
+ if speculative_draft_tensor_parallel_size is None:
2500
+ if draft_hf_config.model_type == "mlp_speculator":
2501
+ speculative_draft_tensor_parallel_size = 1
2502
+ if target_parallel_config.tensor_parallel_size > 1:
2503
+ logger.warning(
2504
+ "%s cannot currently be run with tp>1; "
2505
+ "setting speculative_draft_tensor_parallel_size=1",
2506
+ draft_hf_config.model_type)
2507
+ else:
2508
+ speculative_draft_tensor_parallel_size = \
2509
+ target_parallel_config.tensor_parallel_size
2510
+ elif speculative_draft_tensor_parallel_size not in (
2511
+ 1, target_parallel_config.tensor_parallel_size):
2512
+ raise ValueError(
2513
+ f"{speculative_draft_tensor_parallel_size=} cannot be "
2514
+ f"other value than 1 or target model tensor_parallel_size")
2515
+ return speculative_draft_tensor_parallel_size
2516
+
2517
+ @staticmethod
2518
+ def create_draft_parallel_config(
2519
+ target_parallel_config: ParallelConfig,
2520
+ speculative_draft_tensor_parallel_size: int,
2521
+ ) -> ParallelConfig:
2522
+ """Create a parallel config for use by the draft worker.
2523
+
2524
+ This is mostly a copy of the target parallel config, except the tp_size.
2525
+ """
2526
+ draft_parallel_config = ParallelConfig(
2527
+ pipeline_parallel_size=target_parallel_config.
2528
+ pipeline_parallel_size,
2529
+ tensor_parallel_size=speculative_draft_tensor_parallel_size,
2530
+ distributed_executor_backend=target_parallel_config.
2531
+ distributed_executor_backend,
2532
+ max_parallel_loading_workers=target_parallel_config.
2533
+ max_parallel_loading_workers,
2534
+ disable_custom_all_reduce=target_parallel_config.
2535
+ disable_custom_all_reduce,
2536
+ ray_workers_use_nsight=target_parallel_config.
2537
+ ray_workers_use_nsight,
2538
+ placement_group=target_parallel_config.placement_group,
2539
+ )
2540
+
2541
+ return draft_parallel_config
2542
+
2543
+ def _verify_args(self) -> None:
2544
+ if self.num_speculative_tokens is None:
2545
+ raise ValueError(
2546
+ "num_speculative_tokens must be provided with "
2547
+ "speculative model unless the draft model config contains an "
2548
+ "n_predict parameter.")
2549
+
2550
+ if self.num_speculative_tokens <= 0:
2551
+ raise ValueError("Expected num_speculative_tokens to be greater "
2552
+ f"than zero ({self.num_speculative_tokens}).")
2553
+
2554
+ if self.draft_model_config:
2555
+ self.draft_model_config.verify_with_parallel_config(
2556
+ self.draft_parallel_config)
2557
+ # Validate and set draft token acceptance related settings.
2558
+
2559
+ if self.acceptance_method is None:
2560
+ raise ValueError("acceptance_method is not set. "
2561
+ "Expected values are rejection_sampler or "
2562
+ "typical_acceptance_sampler.")
2563
+
2564
+ if (self.acceptance_method != 'rejection_sampler'
2565
+ and self.acceptance_method != 'typical_acceptance_sampler'):
2566
+ raise ValueError(
2567
+ "Expected acceptance_method to be either "
2568
+ "rejection_sampler or typical_acceptance_sampler. Instead it "
2569
+ f"is {self.acceptance_method}")
2570
+
2571
+ if self.acceptance_method == "typical_acceptance_sampler" and (
2572
+ (self.posterior_threshold is not None
2573
+ and self.posterior_threshold < 0) or
2574
+ (self.posterior_alpha is not None and self.posterior_alpha < 0)):
2575
+ raise ValueError(
2576
+ "Expected the posterior_threshold and posterior_alpha of "
2577
+ "typical_acceptance_sampler to be > 0. "
2578
+ "Instead found posterior_threshold = "
2579
+ f"{self.posterior_threshold} and posterior_alpha = "
2580
+ f"{self.posterior_alpha}")
2581
+
2582
+ if (self.disable_by_batch_size is not None
2583
+ and self.disable_by_batch_size < 2):
2584
+ raise ValueError("Expect the batch size threshold of disabling "
2585
+ "speculative decoding is > 1, but got "
2586
+ f"{self.disable_by_batch_size=}")
2587
+
2588
+ if self.method == "eagle3" and self.target_model_config and \
2589
+ "llama" not in self.target_model_config.hf_text_config.model_type:
2590
+ raise ValueError(
2591
+ "Eagle3 is only supported for Llama models. "
2592
+ f"Got {self.target_model_config.hf_text_config.model_type=}")
2593
+
2594
+ @property
2595
+ def num_lookahead_slots(self) -> int:
2596
+ """The number of additional slots the scheduler should allocate per
2597
+ step, in addition to the slots allocated for each known token.
2598
+
2599
+ This is equal to the number of speculative tokens, as each speculative
2600
+ token must be scored.
2601
+ """
2602
+ return self.num_speculative_tokens
2603
+
2604
+ def use_eagle(self) -> bool:
2605
+ return self.method in ("eagle", "eagle3")
2606
+
2607
+ def __repr__(self) -> str:
2608
+ method = self.method
2609
+ model = None if method == "ngram" else self.draft_model_config.model
2610
+ num_spec_tokens = self.num_speculative_tokens
2611
+ return f"SpeculativeConfig({method=}, {model=}, {num_spec_tokens=})"
2612
+
2613
+
2614
+ LoRADType = Literal["auto", "float16", "bfloat16"]
2615
+
2616
+
2617
+ @config
2618
+ @dataclass
2619
+ class LoRAConfig:
2620
+ """Configuration for LoRA."""
2621
+
2622
+ max_lora_rank: int = 16
2623
+ """Max LoRA rank."""
2624
+ max_loras: int = 1
2625
+ """Max number of LoRAs in a single batch."""
2626
+ fully_sharded_loras: bool = False
2627
+ """By default, only half of the LoRA computation is sharded with tensor
2628
+ parallelism. Enabling this will use the fully sharded layers. At high
2629
+ sequence length, max rank or tensor parallel size, this is likely faster.
2630
+ """
2631
+ max_cpu_loras: Optional[int] = None
2632
+ """Maximum number of LoRAs to store in CPU memory. Must be >= than
2633
+ `max_loras`."""
2634
+ lora_dtype: Union[torch.dtype, LoRADType] = "auto"
2635
+ """Data type for LoRA. If auto, will default to base model dtype."""
2636
+ lora_extra_vocab_size: int = 256
2637
+ """Maximum size of extra vocabulary that can be present in a LoRA adapter
2638
+ (added to the base model vocabulary)."""
2639
+ # This is a constant.
2640
+ lora_vocab_padding_size: ClassVar[int] = 256
2641
+ long_lora_scaling_factors: Optional[tuple[float, ...]] = None
2642
+ """Specify multiple scaling factors (which can be different from base model
2643
+ scaling factor - see eg. Long LoRA) to allow for multiple LoRA adapters
2644
+ trained with those scaling factors to be used at the same time. If not
2645
+ specified, only adapters trained with the base model scaling factor are
2646
+ allowed."""
2647
+ bias_enabled: bool = False
2648
+ """Enable bias for LoRA adapters."""
2649
+
2650
+ def compute_hash(self) -> str:
2651
+ """
2652
+ WARNING: Whenever a new field is added to this config,
2653
+ ensure that it is included in the factors list if
2654
+ it affects the computation graph.
2655
+
2656
+ Provide a hash that uniquely identifies all the configs
2657
+ that affect the structure of the computation
2658
+ graph from input ids/embeddings to the final hidden states,
2659
+ excluding anything before input ids/embeddings and after
2660
+ the final hidden states.
2661
+ """
2662
+ factors: list[Any] = []
2663
+ factors.append(self.max_lora_rank)
2664
+ factors.append(self.max_loras)
2665
+ factors.append(self.fully_sharded_loras)
2666
+ factors.append(self.lora_dtype)
2667
+ factors.append(self.lora_extra_vocab_size)
2668
+ factors.append(self.long_lora_scaling_factors)
2669
+ factors.append(self.bias_enabled)
2670
+ hash_str = hashlib.md5(str(factors).encode(),
2671
+ usedforsecurity=False).hexdigest()
2672
+ return hash_str
2673
+
2674
+ def __post_init__(self):
2675
+ # Setting the maximum rank to 512 should be able to satisfy the vast
2676
+ # majority of applications.
2677
+ possible_max_ranks = (8, 16, 32, 64, 128, 256, 320, 512)
2678
+ possible_lora_extra_vocab_size = (256, 512)
2679
+ if self.max_lora_rank not in possible_max_ranks:
2680
+ raise ValueError(
2681
+ f"max_lora_rank ({self.max_lora_rank}) must be one of "
2682
+ f"{possible_max_ranks}.")
2683
+ if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size:
2684
+ raise ValueError(
2685
+ f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) "
2686
+ f"must be one of {possible_lora_extra_vocab_size}.")
2687
+ if self.max_loras < 1:
2688
+ raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.")
2689
+ if self.max_cpu_loras is None:
2690
+ self.max_cpu_loras = self.max_loras
2691
+ elif self.max_cpu_loras < self.max_loras:
2692
+ raise ValueError(
2693
+ f"max_cpu_loras ({self.max_cpu_loras}) must be >= "
2694
+ f"max_loras ({self.max_loras})")
2695
+
2696
+ def verify_with_cache_config(self, cache_config: CacheConfig):
2697
+ if cache_config.cpu_offload_gb > 0 and not envs.VLLM_USE_V1:
2698
+ raise ValueError(
2699
+ "V0 LoRA does not support CPU offload, please use V1.")
2700
+
2701
+ def verify_with_model_config(self, model_config: ModelConfig):
2702
+ if self.lora_dtype in (None, "auto"):
2703
+ self.lora_dtype = model_config.dtype
2704
+ elif isinstance(self.lora_dtype, str):
2705
+ self.lora_dtype = getattr(torch, self.lora_dtype)
2706
+
2707
+ def verify_lora_support(self):
2708
+ if self.long_lora_scaling_factors is not None and envs.VLLM_USE_V1:
2709
+ raise ValueError(
2710
+ "V1 LoRA does not support long LoRA, please use V0.")
2711
+
2712
+
2713
+ @config
2714
+ @dataclass
2715
+ class PromptAdapterConfig:
2716
+ """Configuration for PromptAdapters."""
2717
+
2718
+ max_prompt_adapters: int = 1
2719
+ """Max number of PromptAdapters in a batch."""
2720
+ max_prompt_adapter_token: int = 0
2721
+ """Max number of PromptAdapters tokens."""
2722
+ max_cpu_prompt_adapters: Optional[int] = None
2723
+ """Maximum number of PromptAdapters to store in CPU memory. Must be >= than
2724
+ `max_prompt_adapters`."""
2725
+ prompt_adapter_dtype: Union[torch.dtype, str] = "auto"
2726
+ """Data type for PromptAdapter. If auto, will default to base model dtype.
2727
+ """
2728
+
2729
+ def compute_hash(self) -> str:
2730
+ """
2731
+ WARNING: Whenever a new field is added to this config,
2732
+ ensure that it is included in the factors list if
2733
+ it affects the computation graph.
2734
+
2735
+ Provide a hash that uniquely identifies all the configs
2736
+ that affect the structure of the computation
2737
+ graph from input ids/embeddings to the final hidden states,
2738
+ excluding anything before input ids/embeddings and after
2739
+ the final hidden states.
2740
+ """
2741
+ # no factors to consider.
2742
+ # this config will not affect the computation graph.
2743
+ factors: list[Any] = []
2744
+ hash_str = hashlib.md5(str(factors).encode(),
2745
+ usedforsecurity=False).hexdigest()
2746
+ return hash_str
2747
+
2748
+ def __post_init__(self):
2749
+
2750
+ if self.max_prompt_adapters < 1:
2751
+ raise ValueError(f"max_prompt_adapters "
2752
+ f"({self.max_prompt_adapters}) must be >= 1.")
2753
+ if self.max_prompt_adapter_token == 0:
2754
+ raise ValueError("max_prompt_adapter_token must be set.")
2755
+ if self.max_cpu_prompt_adapters is None:
2756
+ self.max_cpu_prompt_adapters = self.max_prompt_adapters
2757
+
2758
+ def verify_with_model_config(self, model_config: ModelConfig):
2759
+ if self.prompt_adapter_dtype == "auto":
2760
+ self.prompt_adapter_dtype = model_config.dtype
2761
+ elif isinstance(self.prompt_adapter_dtype, str):
2762
+ self.prompt_adapter_dtype = getattr(torch,
2763
+ self.prompt_adapter_dtype)
2764
+
2765
+
2766
+ @config
2767
+ @dataclass
2768
+ class MultiModalConfig:
2769
+ """Controls the behavior of multimodal models."""
2770
+
2771
+ limit_per_prompt: dict[str, int] = field(default_factory=dict)
2772
+ """
2773
+ The maximum number of input items allowed per prompt for each modality.
2774
+ This should be a JSON string that will be parsed into a dictionary.
2775
+ Defaults to 1 (V0) or 999 (V1) for each modality.
2776
+
2777
+ For example, to allow up to 16 images and 2 videos per prompt:
2778
+ ``{"images": 16, "videos": 2}``
2779
+ """
2780
+
2781
+ def compute_hash(self) -> str:
2782
+ """
2783
+ WARNING: Whenever a new field is added to this config,
2784
+ ensure that it is included in the factors list if
2785
+ it affects the computation graph.
2786
+
2787
+ Provide a hash that uniquely identifies all the configs
2788
+ that affect the structure of the computation
2789
+ graph from input ids/embeddings to the final hidden states,
2790
+ excluding anything before input ids/embeddings and after
2791
+ the final hidden states.
2792
+ """
2793
+ # no factors to consider.
2794
+ # this config will not affect the computation graph.
2795
+ factors: list[Any] = []
2796
+ hash_str = hashlib.md5(str(factors).encode(),
2797
+ usedforsecurity=False).hexdigest()
2798
+ return hash_str
2799
+
2800
+ def get_limit_per_prompt(self, modality: str) -> int:
2801
+ """
2802
+ Get the maximum number of input items allowed per prompt
2803
+ for the given modality.
2804
+ """
2805
+ return self.limit_per_prompt.get(
2806
+ modality,
2807
+ 999 if envs.VLLM_USE_V1 else 1,
2808
+ )
2809
+
2810
+ # TODO: Add configs to init vision tower or not.
2811
+
2812
+
2813
+ @config
2814
+ @dataclass
2815
+ class PoolerConfig:
2816
+ """Controls the behavior of output pooling in pooling models."""
2817
+
2818
+ pooling_type: Optional[str] = None
2819
+ """
2820
+ The pooling method of the pooling model. This should be a key in
2821
+ :class:`vllm.model_executor.layers.pooler.PoolingType`.
2822
+ """
2823
+
2824
+ normalize: Optional[bool] = None
2825
+ """
2826
+ Whether to normalize the pooled outputs. Usually, this should be set to
2827
+ ``True`` for embedding outputs.
2828
+ """
2829
+
2830
+ softmax: Optional[bool] = None
2831
+ """
2832
+ Whether to apply softmax to the pooled outputs. Usually, this should be set
2833
+ to ``True`` for classification outputs.
2834
+ """
2835
+
2836
+ step_tag_id: Optional[int] = None
2837
+ """
2838
+ If set, only the score corresponding to the ``step_tag_id`` in the
2839
+ generated sentence should be returned. Otherwise, the scores for all tokens
2840
+ are returned.
2841
+ """
2842
+
2843
+ returned_token_ids: Optional[list[int]] = None
2844
+ """
2845
+ A list of indices for the vocabulary dimensions to be extracted,
2846
+ such as the token IDs of ``good_token`` and ``bad_token`` in the
2847
+ ``math-shepherd-mistral-7b-prm`` model.
2848
+ """
2849
+
2850
+ def compute_hash(self) -> str:
2851
+ """
2852
+ WARNING: Whenever a new field is added to this config,
2853
+ ensure that it is included in the factors list if
2854
+ it affects the computation graph.
2855
+
2856
+ Provide a hash that uniquely identifies all the configs
2857
+ that affect the structure of the computation
2858
+ graph from input ids/embeddings to the final hidden states,
2859
+ excluding anything before input ids/embeddings and after
2860
+ the final hidden states.
2861
+ """
2862
+ # no factors to consider.
2863
+ # this config will not affect the computation graph.
2864
+ factors: list[Any] = []
2865
+ hash_str = hashlib.md5(str(factors).encode(),
2866
+ usedforsecurity=False).hexdigest()
2867
+ return hash_str
2868
+
2869
+ @staticmethod
2870
+ def from_json(json_str: str) -> "PoolerConfig":
2871
+ return PoolerConfig(**json.loads(json_str))
2872
+
2873
+
2874
+ _STR_DTYPE_TO_TORCH_DTYPE = {
2875
+ "half": torch.float16,
2876
+ "float16": torch.float16,
2877
+ "float": torch.float32,
2878
+ "float32": torch.float32,
2879
+ "bfloat16": torch.bfloat16,
2880
+ }
2881
+
2882
+ _ROCM_NOT_SUPPORTED_DTYPE: list[str] = [] #
2883
+
2884
+
2885
+ def _get_and_verify_dtype(
2886
+ config: PretrainedConfig,
2887
+ dtype: Union[str, torch.dtype],
2888
+ ) -> torch.dtype:
2889
+ # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
2890
+ # because config.torch_dtype can be None.
2891
+ config_dtype = getattr(config.get_text_config(), "torch_dtype", None)
2892
+
2893
+ # Fallback for multi-modal models if the root config
2894
+ # does not define torch_dtype
2895
+ if config_dtype is None and hasattr(config, "vision_config"):
2896
+ config_dtype = getattr(config.vision_config, "torch_dtype", None)
2897
+
2898
+ if config_dtype is None:
2899
+ config_dtype = torch.float32
2900
+
2901
+ if isinstance(dtype, str):
2902
+ dtype = dtype.lower()
2903
+ if dtype == "auto":
2904
+ if config_dtype == torch.float32:
2905
+ # Following common practice, we use float16 for float32 models
2906
+ torch_dtype = torch.float16
2907
+ else:
2908
+ torch_dtype = config_dtype
2909
+
2910
+ if config.model_type == "plamo2":
2911
+ logger.info(
2912
+ "For PLaMo2, we cast models to bfloat16 instead of using "
2913
+ "float16 by default. This is because float16 does not work."
2914
+ )
2915
+ torch_dtype = torch.bfloat16
2916
+
2917
+ from vllm.platforms import current_platform
2918
+ if (current_platform.is_cpu()
2919
+ and current_platform.get_cpu_architecture()
2920
+ == CpuArchEnum.POWERPC
2921
+ and (config_dtype == torch.float16
2922
+ or config_dtype == torch.float32)):
2923
+ logger.info(
2924
+ "For POWERPC, we cast models to bfloat16 instead of "
2925
+ "using float16 by default. Float16 is not currently "
2926
+ "supported for POWERPC.")
2927
+ torch_dtype = torch.bfloat16
2928
+
2929
+ # TODO: change this condition to check if the platform support bf16
2930
+ # instead of checking the OS. For instance M2 shall supports bf16
2931
+ # already. But we need to modify `cpu_extension.cmake` to activate
2932
+ # the feature in the build.
2933
+ if (current_platform.is_cpu() and sys.platform.startswith("darwin")
2934
+ and current_platform.get_cpu_architecture()
2935
+ == CpuArchEnum.ARM and config_dtype == torch.bfloat16):
2936
+ logger.info("For macOS with Apple Silicon, currently bfloat16 "
2937
+ "is not supported. Setting dtype to float16.")
2938
+ torch_dtype = torch.float16
2939
+
2940
+ if current_platform.is_hpu() and config_dtype == torch.float16:
2941
+ logger.info(
2942
+ "For HPU, we cast models to bfloat16 instead of "
2943
+ "using float16 by default. Please specify `dtype` if you "
2944
+ "want to use float16.")
2945
+ torch_dtype = torch.bfloat16
2946
+ elif dtype == "float16" and config.model_type == "plamo2":
2947
+ logger.warning(
2948
+ "For PLaMo2, using float16 is unstable and might cause "
2949
+ "unexpected behavior. Please use bfloat16 or float32 instead.")
2950
+ torch_dtype = torch.float16
2951
+ else:
2952
+ if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
2953
+ raise ValueError(f"Unknown dtype: {dtype}")
2954
+ torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
2955
+ elif isinstance(dtype, torch.dtype):
2956
+ torch_dtype = dtype
2957
+ else:
2958
+ raise ValueError(f"Unknown dtype: {dtype}")
2959
+
2960
+ # Verify the dtype.
2961
+ if torch_dtype != config_dtype:
2962
+ if torch_dtype == torch.float32:
2963
+ # Upcasting to float32 is allowed.
2964
+ logger.info("Upcasting %s to %s.", config_dtype, torch_dtype)
2965
+ pass
2966
+ elif config_dtype == torch.float32:
2967
+ # Downcasting from float32 to float16 or bfloat16 is allowed.
2968
+ logger.info("Downcasting %s to %s.", config_dtype, torch_dtype)
2969
+ pass
2970
+ else:
2971
+ # Casting between float16 and bfloat16 is allowed with a warning.
2972
+ logger.warning("Casting %s to %s.", config_dtype, torch_dtype)
2973
+
2974
+ return torch_dtype
2975
+
2976
+
2977
+ def _get_and_verify_max_len(
2978
+ hf_config: PretrainedConfig,
2979
+ max_model_len: Optional[int],
2980
+ disable_sliding_window: bool,
2981
+ sliding_window_len: Optional[Union[int, list[Optional[int]]]],
2982
+ spec_target_max_model_len: Optional[int] = None,
2983
+ encoder_config: Optional[Any] = None,
2984
+ ) -> int:
2985
+ """Get and verify the model's maximum length."""
2986
+ derived_max_model_len = float("inf")
2987
+ possible_keys = [
2988
+ # OPT
2989
+ "max_position_embeddings",
2990
+ # GPT-2
2991
+ "n_positions",
2992
+ # MPT
2993
+ "max_seq_len",
2994
+ # ChatGLM2
2995
+ "seq_length",
2996
+ # Command-R
2997
+ "model_max_length",
2998
+ # Whisper
2999
+ "max_target_positions",
3000
+ # Others
3001
+ "max_sequence_length",
3002
+ "max_seq_length",
3003
+ "seq_len",
3004
+ ]
3005
+ # Choose the smallest "max_length" from the possible keys.
3006
+ max_len_key = None
3007
+ for key in possible_keys:
3008
+ max_len = getattr(hf_config, key, None)
3009
+ if max_len is not None:
3010
+ max_len_key = key if max_len < derived_max_model_len \
3011
+ else max_len_key
3012
+ derived_max_model_len = min(derived_max_model_len, max_len)
3013
+ # For Command-R / Cohere, Cohere2 / Aya Vision models
3014
+ if tmp_max_len := getattr(hf_config, "model_max_length", None):
3015
+ max_len_key = "model_max_length"
3016
+ derived_max_model_len = tmp_max_len
3017
+
3018
+ # If sliding window is manually disabled, max_length should be less
3019
+ # than the sliding window length in the model config.
3020
+ if disable_sliding_window and sliding_window_len is not None:
3021
+
3022
+ sliding_window_len_min = get_min_sliding_window(sliding_window_len)
3023
+ max_len_key = "sliding_window" \
3024
+ if sliding_window_len_min < derived_max_model_len else max_len_key
3025
+ derived_max_model_len = min(derived_max_model_len,
3026
+ sliding_window_len_min)
3027
+
3028
+ # If none of the keys were found in the config, use a default and
3029
+ # log a warning.
3030
+ if derived_max_model_len == float("inf"):
3031
+ if max_model_len is not None:
3032
+ # If max_model_len is specified, we use it.
3033
+ return max_model_len
3034
+
3035
+ if spec_target_max_model_len is not None:
3036
+ # If this is a speculative draft model, we use the max model len
3037
+ # from the target model.
3038
+ return spec_target_max_model_len
3039
+
3040
+ default_max_len = 2048
3041
+ logger.warning(
3042
+ "The model's config.json does not contain any of the following "
3043
+ "keys to determine the original maximum length of the model: "
3044
+ "%s. Assuming the model's maximum length is %d.", possible_keys,
3045
+ default_max_len)
3046
+ derived_max_model_len = default_max_len
3047
+
3048
+ rope_scaling = getattr(hf_config, "rope_scaling", None)
3049
+ # NOTE(woosuk): Gemma3's max_model_len (128K) is already scaled by RoPE
3050
+ # scaling, so we skip applying the scaling factor again.
3051
+ if rope_scaling is not None and "gemma3" not in hf_config.model_type:
3052
+ # No need to consider "type" key because of patch_rope_scaling when
3053
+ # loading HF config
3054
+ rope_type = rope_scaling["rope_type"]
3055
+
3056
+ if rope_type not in ("su", "longrope", "llama3"):
3057
+ if disable_sliding_window:
3058
+ # TODO(robertgshaw): Find a model that supports rope_scaling
3059
+ # with sliding window to see if this case should be allowed.
3060
+ raise NotImplementedError(
3061
+ "Disabling sliding window is not supported for models "
3062
+ "with rope_scaling. Please raise an issue so we can "
3063
+ "investigate.")
3064
+
3065
+ # NOTE: rope_type == "default" does not define factor
3066
+ # https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/modeling_rope_utils.py
3067
+ scaling_factor = rope_scaling.get("factor", 1.0)
3068
+
3069
+ if rope_type == "yarn":
3070
+ derived_max_model_len = rope_scaling[
3071
+ "original_max_position_embeddings"]
3072
+ derived_max_model_len *= scaling_factor
3073
+
3074
+ if encoder_config and "max_seq_length" in encoder_config:
3075
+ derived_max_model_len = encoder_config["max_seq_length"]
3076
+
3077
+ # If the user specified a max length, make sure it is smaller than the
3078
+ # derived length from the HF model config.
3079
+ if max_model_len is None:
3080
+ max_model_len = int(derived_max_model_len)
3081
+ elif max_model_len > derived_max_model_len:
3082
+ # Some models might have a separate key for specifying model_max_length
3083
+ # that will be bigger than derived_max_model_len. We compare user input
3084
+ # with model_max_length and allow this override when it's smaller.
3085
+ model_max_length = getattr(hf_config, "model_max_length", None)
3086
+ if model_max_length is not None and max_model_len <= model_max_length:
3087
+ if disable_sliding_window:
3088
+ # TODO(robertgshaw): Find a model that has model_max_length
3089
+ # with sliding window to see if this case should be allowed.
3090
+ raise NotImplementedError(
3091
+ "Disabling sliding window is not supported for models "
3092
+ "model_max_length in the config. Please raise an issue "
3093
+ "so we can investigate.")
3094
+ else:
3095
+ msg = (
3096
+ f"User-specified max_model_len ({max_model_len}) is greater "
3097
+ f"than the derived max_model_len ({max_len_key}="
3098
+ f"{derived_max_model_len} or model_max_length="
3099
+ f"{model_max_length} in model's config.json). This may lead "
3100
+ "to incorrect model outputs or CUDA errors.")
3101
+ if envs.VLLM_ALLOW_LONG_MAX_MODEL_LEN:
3102
+ logger.warning(
3103
+ "%s Make sure the value is correct and within the "
3104
+ "model context size.", msg)
3105
+ else:
3106
+ raise ValueError(
3107
+ f"{msg} To allow overriding this maximum, set "
3108
+ "the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN=1")
3109
+ return int(max_model_len)
3110
+
3111
+
3112
+ def get_min_sliding_window(
3113
+ sliding_window: Union[int, list[Optional[int]]]) -> int:
3114
+ if isinstance(sliding_window, list):
3115
+ return min(s for s in sliding_window if s is not None)
3116
+
3117
+ return sliding_window
3118
+
3119
+
3120
+ def get_served_model_name(model: str,
3121
+ served_model_name: Optional[Union[str, list[str]]]):
3122
+ """
3123
+ If the input is a non-empty list, the first model_name in
3124
+ `served_model_name` is taken.
3125
+ If the input is a non-empty string, it is used directly.
3126
+ For cases where the input is either an empty string or an
3127
+ empty list, the fallback is to use `self.model`.
3128
+ """
3129
+ if not served_model_name:
3130
+ return model
3131
+ if isinstance(served_model_name, list):
3132
+ return served_model_name[0]
3133
+ return served_model_name
3134
+
3135
+
3136
+ GuidedDecodingBackendV0 = Literal["auto", "outlines", "lm-format-enforcer",
3137
+ "xgrammar", "guidance"]
3138
+ GuidedDecodingBackendV1 = Literal["auto", "xgrammar", "guidance"]
3139
+
3140
+
3141
+ @config
3142
+ @dataclass
3143
+ class DecodingConfig:
3144
+ """Dataclass which contains the decoding strategy of the engine."""
3145
+
3146
+ guided_decoding_backend: Union[
3147
+ GuidedDecodingBackendV0,
3148
+ GuidedDecodingBackendV1] = "auto" if envs.VLLM_USE_V1 else "xgrammar"
3149
+ """Which engine will be used for guided decoding (JSON schema / regex etc)
3150
+ by default. With "auto", we will make opinionated choices based on request
3151
+ contents and what the backend libraries currently support, so the behavior
3152
+ is subject to change in each release."""
3153
+
3154
+ reasoning_backend: Optional[str] = None
3155
+ """Select the reasoning parser depending on the model that you're using.
3156
+ This is used to parse the reasoning content into OpenAI API format.
3157
+ Required for `--enable-reasoning`."""
3158
+
3159
+ def compute_hash(self) -> str:
3160
+ """
3161
+ WARNING: Whenever a new field is added to this config,
3162
+ ensure that it is included in the factors list if
3163
+ it affects the computation graph.
3164
+
3165
+ Provide a hash that uniquely identifies all the configs
3166
+ that affect the structure of the computation
3167
+ graph from input ids/embeddings to the final hidden states,
3168
+ excluding anything before input ids/embeddings and after
3169
+ the final hidden states.
3170
+ """
3171
+ # no factors to consider.
3172
+ # this config will not affect the computation graph.
3173
+ factors: list[Any] = []
3174
+ hash_str = hashlib.md5(str(factors).encode(),
3175
+ usedforsecurity=False).hexdigest()
3176
+ return hash_str
3177
+
3178
+ def __post_init__(self):
3179
+ backend = GuidedDecodingParams(
3180
+ backend=self.guided_decoding_backend).backend_name
3181
+ if envs.VLLM_USE_V1:
3182
+ valid_guided_backends = get_args(GuidedDecodingBackendV1)
3183
+ else:
3184
+ valid_guided_backends = get_args(GuidedDecodingBackendV0)
3185
+ if backend not in valid_guided_backends:
3186
+ raise ValueError(f"Invalid guided_decoding_backend '{backend}',"
3187
+ f" must be one of {valid_guided_backends}")
3188
+
3189
+
3190
+ @dataclass
3191
+ class ObservabilityConfig:
3192
+ """Configuration for observability - metrics and tracing."""
3193
+ show_hidden_metrics: bool = False
3194
+
3195
+ otlp_traces_endpoint: Optional[str] = None
3196
+
3197
+ # Collecting detailed timing information for each request can be expensive.
3198
+
3199
+ # If set, collects the model forward time for the request.
3200
+ collect_model_forward_time: bool = False
3201
+
3202
+ # If set, collects the model execute time for the request.
3203
+ collect_model_execute_time: bool = False
3204
+
3205
+ def compute_hash(self) -> str:
3206
+ """
3207
+ WARNING: Whenever a new field is added to this config,
3208
+ ensure that it is included in the factors list if
3209
+ it affects the computation graph.
3210
+
3211
+ Provide a hash that uniquely identifies all the configs
3212
+ that affect the structure of the computation
3213
+ graph from input ids/embeddings to the final hidden states,
3214
+ excluding anything before input ids/embeddings and after
3215
+ the final hidden states.
3216
+ """
3217
+ # no factors to consider.
3218
+ # this config will not affect the computation graph.
3219
+ factors: list[Any] = []
3220
+ hash_str = hashlib.md5(str(factors).encode(),
3221
+ usedforsecurity=False).hexdigest()
3222
+ return hash_str
3223
+
3224
+ def __post_init__(self):
3225
+ if not is_otel_available() and self.otlp_traces_endpoint is not None:
3226
+ raise ValueError(
3227
+ "OpenTelemetry is not available. Unable to configure "
3228
+ "'otlp_traces_endpoint'. Ensure OpenTelemetry packages are "
3229
+ f"installed. Original error:\n{otel_import_error_traceback}")
3230
+
3231
+
3232
+ class KVTransferConfig(BaseModel):
3233
+ """Configuration for distributed KV cache transfer."""
3234
+
3235
+ # The KV connector for vLLM to transmit KV caches between vLLM instances.
3236
+ kv_connector: Optional[str] = None
3237
+
3238
+ # The device used by kv connector to buffer the KV cache.
3239
+ # Currently only support 'cuda'.
3240
+ kv_buffer_device: Optional[str] = "cuda"
3241
+
3242
+ # The buffer size for TorchDistributedConnector. Measured in number of
3243
+ # bytes. Recommended value: 1e9 (about 1GB).
3244
+ kv_buffer_size: float = 1e9
3245
+
3246
+ # Whether this vLLM instance produces, consumes KV cache, or both. Choices
3247
+ # are 'kv_producer', 'kv_consumer', and 'both'.
3248
+ kv_role: Optional[str] = None
3249
+
3250
+ # The rank of this vLLM instance in the KV cache transfer. Typical value:
3251
+ # 0 for prefill instance, 1 for decode instance.
3252
+ # Currently only 1P1D is supported.
3253
+ kv_rank: Optional[int] = None
3254
+
3255
+ # The number of parallel instances for KV cache transfer. For
3256
+ # PyNcclConnector, this should be 2.
3257
+ kv_parallel_size: int = 1
3258
+
3259
+ # The KV connector ip, used to build distributed connection
3260
+ kv_ip: str = "127.0.0.1"
3261
+
3262
+ # The KV connector port, used to build distributed connection
3263
+ kv_port: int = 14579
3264
+
3265
+ # any extra config that the connector may need
3266
+ kv_connector_extra_config: dict[str, Any] = {}
3267
+
3268
+ def compute_hash(self) -> str:
3269
+ """
3270
+ WARNING: Whenever a new field is added to this config,
3271
+ ensure that it is included in the factors list if
3272
+ it affects the computation graph.
3273
+
3274
+ Provide a hash that uniquely identifies all the configs
3275
+ that affect the structure of the computation
3276
+ graph from input ids/embeddings to the final hidden states,
3277
+ excluding anything before input ids/embeddings and after
3278
+ the final hidden states.
3279
+ """
3280
+ # no factors to consider.
3281
+ # this config will not affect the computation graph.
3282
+ factors: list[Any] = []
3283
+ hash_str = hashlib.md5(str(factors).encode(),
3284
+ usedforsecurity=False).hexdigest()
3285
+ return hash_str
3286
+
3287
+ @classmethod
3288
+ def from_cli(cls, cli_value: str) -> "KVTransferConfig":
3289
+ """Parse the CLI value for the kv cache transfer config."""
3290
+ return KVTransferConfig.model_validate_json(cli_value)
3291
+
3292
+ def model_post_init(self, __context: Any) -> None:
3293
+
3294
+ if self.kv_role is not None and self.kv_role not in [
3295
+ "kv_producer", "kv_consumer", "kv_both"
3296
+ ]:
3297
+ raise ValueError(
3298
+ f"Unsupported kv_role: {self.kv_role}. "
3299
+ f"Supported roles are `kv_producer`, `kv_consumer`, "
3300
+ f"and `kv_both`")
3301
+
3302
+ if self.kv_connector is not None and self.kv_role is None:
3303
+ raise ValueError("Please specify kv_disagg_role when kv_connector "
3304
+ "is set, supported roles are `kv_producer`, "
3305
+ "`kv_consumer`, and `kv_both`")
3306
+
3307
+ @property
3308
+ def is_kv_transfer_instance(self) -> bool:
3309
+ return self.kv_connector is not None and \
3310
+ self.kv_role in ["kv_producer", "kv_consumer", "kv_both"]
3311
+
3312
+ @property
3313
+ def is_kv_producer(self) -> bool:
3314
+ return self.kv_connector is not None and \
3315
+ self.kv_role in ["kv_producer", "kv_both"]
3316
+
3317
+ @property
3318
+ def is_kv_consumer(self) -> bool:
3319
+ return self.kv_connector is not None and \
3320
+ self.kv_role in ["kv_consumer", "kv_both"]
3321
+
3322
+ def get_from_extra_config(self, key, default) -> Any:
3323
+ return self.kv_connector_extra_config.get(key, default)
3324
+
3325
+
3326
+ class CompilationLevel:
3327
+ # constants for the levels of the compilation process
3328
+ NO_COMPILATION = 0
3329
+ DYNAMO_AS_IS = 1
3330
+ DYNAMO_ONCE = 2
3331
+ PIECEWISE = 3
3332
+
3333
+
3334
+ class CompilationConfig(BaseModel):
3335
+ """
3336
+ Configuration for compilation.
3337
+ It has three parts:
3338
+ - Top-level Compilation control:
3339
+ - level: the level of compilation.
3340
+ - 0: no compilation.
3341
+ - 1: dynamo as is.
3342
+ - 2: dynamo once.
3343
+ - 3: piecewise compilation.
3344
+ - debug_dump_path: the path to dump the debug information.
3345
+ - cache_dir: the directory to store the compiled graph, to
3346
+ accelerate Inductor compilation. By default, it will use
3347
+ model-related information to generate a cache directory.
3348
+ - backend: the backend for compilation. It needs to be a string.
3349
+ - "" (empty string): use the default backend.
3350
+ - "eager"/"openxla"/...: use the specified backend registered in PyTorch.
3351
+ - "full.module.name": a qualified name which can be used to import the backend function.
3352
+ We use string to avoid serialization issues when using compilation in a distributed setting.
3353
+ When the compilation level is 1 or 2, the backend is used for the compilation directly (it sees the whole graph).
3354
+ When the compilation level is 3, the backend is used for the piecewise compilation (it sees a part of the graph).
3355
+ - custom_ops: fine-grained control over which custom ops to enable/disable.
3356
+ Use 'all' to enable all, 'none' to disable all.
3357
+ Also specify a list of custom op names to enable (prefixed with a '+'),
3358
+ or disable (prefixed with a '-').
3359
+ Examples:
3360
+ - 'all,-op1' to enable all except op1
3361
+ - 'none,+op1,+op2' to enable only op1 and op2
3362
+ By default, all custom ops are enabled when running without Inductor
3363
+ and disabled when running with Inductor (compile_level >= Inductor).
3364
+ - splitting_ops: a list of ops to split the full graph into subgraphs, used in piecewise compilation.
3365
+ - CudaGraph capture:
3366
+ - use_cudagraph: whether to use cudagraph inside compilation.
3367
+ - False: cudagraph inside compilation is not used.
3368
+ - True: cudagraph inside compilation is used. It requires
3369
+ that all input buffers have fixed addresses, and all
3370
+ splitting ops write their outputs to input buffers.
3371
+ Note that this is orthogonal to the cudagraph capture logic
3372
+ outside of compilation.
3373
+ TODO: move outside cudagraph logic into compilation.
3374
+ torch.compile will handle cudagraph capture logic in the future.
3375
+ - cudagraph_capture_sizes: sizes to capture cudagraph.
3376
+ - None (default): capture sizes are inferred from vllm config.
3377
+ - list[int]: capture sizes are specified as given.
3378
+ - cudagraph_num_of_warmups: number of warmup runs for cudagraph.
3379
+ It means the first several runs will be treated as warmup runs.
3380
+ Only after that, the execution will be recorded, and the recorded
3381
+ cudagraph will be used for subsequent runs.
3382
+ - cudagraph_copy_inputs: whether to copy input tensors for
3383
+ cudagraph. If the caller can guarantee that the same input buffers
3384
+ are always used, it can set this to False. Otherwise, it should
3385
+ set this to True, and the compiler will copy the input to an
3386
+ internally managed buffer. Default is False.
3387
+ - Inductor compilation:
3388
+ - use_inductor: whether to use inductor compilation.
3389
+ - False: inductor compilation is not used. graph runs in eager.
3390
+ - True: inductor compilation is used. one graph for symbolic shape
3391
+ is compiled. In addition, compile for compile_sizes,
3392
+ using configurations in inductor_compile_config.
3393
+ - compile_sizes: sizes to compile for inductor. In addition
3394
+ to integers, it also supports "cudagraph_capture_sizes" to
3395
+ specify the sizes for cudagraph capture.
3396
+ - inductor_compile_config: additional configurations for inductor.
3397
+ - None: use default configurations.
3398
+ - inductor_passes: additional passes for inductor. It is a dictionary
3399
+ from pass name to pass function qualified name. We use function
3400
+ name because the config uses json format. If we pass the config
3401
+ from Python, functions can also be passed directly via Python object
3402
+ constructor, e.g. `CompilationConfig(inductor_passes={"a": func})`
3403
+ - custom inductor passes: see PassConfig for more details
3404
+
3405
+ Why we have different sizes for cudagraph and inductor:
3406
+ - cudagraph: a cudagraph captured for a specific size can only be used
3407
+ for the same size. We need to capture all the sizes we want to use.
3408
+ - inductor: a graph compiled by inductor for a general shape can be used
3409
+ for different sizes. Inductor can also compile for specific sizes,
3410
+ where it can have more information to optimize the graph with fully
3411
+ static shapes. However, we find the general shape compilation is
3412
+ sufficient for most cases. It might be beneficial to compile for
3413
+ certain small batchsizes, where inductor is good at optimizing.
3414
+ """ # noqa
3415
+ level: int = 0
3416
+ debug_dump_path: str = ""
3417
+ cache_dir: str = ""
3418
+ backend: str = ""
3419
+ custom_ops: list[str] = Field(default_factory=list)
3420
+ splitting_ops: list[str] = Field(default=None) # type: ignore
3421
+
3422
+ use_inductor: bool = True
3423
+ compile_sizes: Optional[list[Union[int, str]]] = Field(default=None)
3424
+ inductor_compile_config: dict = Field(default_factory=dict)
3425
+ inductor_passes: dict[str, str] = Field(default_factory=dict)
3426
+
3427
+ use_cudagraph: bool = False
3428
+ cudagraph_num_of_warmups: int = 0
3429
+ cudagraph_capture_sizes: Optional[list[int]] = None
3430
+ cudagraph_copy_inputs: bool = False
3431
+
3432
+ class PassConfig(BaseModel):
3433
+ """
3434
+ Configuration for custom Inductor passes.
3435
+ This is separate from general CompilationConfig so that inductor passes
3436
+ don't all have access to full configuration - that would create a cycle
3437
+ as the PassManager is set as a property of config.
3438
+ - dump_graph_stages: list of stages for which we want to dump the graph.
3439
+ Each pass defines its own stages (before, after, maybe in-between).
3440
+ - dump_graph_dir: directory to dump the graphs. Default is .
3441
+ - enable_fusion: whether to enable the custom fusion pass.
3442
+ - enable_noop: whether to enable the custom no-op elimination pass.
3443
+ TODO(luka) better pass enabling system.
3444
+ - enable_sequence_parallelism: whether to enable sequence parallelism.
3445
+ """
3446
+ dump_graph_stages: list[str] = Field(default_factory=list)
3447
+ dump_graph_dir: Path = Field(default=Path("."))
3448
+ enable_fusion: bool = True
3449
+ enable_noop: bool = True
3450
+ enable_sequence_parallelism: bool = False
3451
+
3452
+ def uuid(self):
3453
+ """
3454
+ Produces a hash unique to the pass configuration.
3455
+ Any new fields that affect compilation should be added to the hash.
3456
+ Do not include dump_graph_* in the hash - they don't affect
3457
+ compilation.
3458
+ """
3459
+ dict_ = self.model_dump(include={"enable_fusion", "enable_noop", \
3460
+ "enable_sequence_parallelism"})
3461
+ return InductorPass.hash_dict(dict_)
3462
+
3463
+ def model_post_init(self, __context: Any) -> None:
3464
+ if not self.enable_noop and self.enable_fusion:
3465
+ logger.warning_once(
3466
+ "Fusion enabled but reshape elimination disabled. "
3467
+ "RMSNorm + quant (fp8) fusion might not work")
3468
+
3469
+ pass_config: PassConfig = Field(default_factory=PassConfig)
3470
+
3471
+ # not configurable, computed after init
3472
+ max_capture_size: int = PrivateAttr
3473
+ local_cache_dir: str = PrivateAttr # local cache dir for each rank
3474
+ # optimization:
3475
+ # Intuitively, bs_to_padded_graph_size should be dict[int, int].
3476
+ # since we know all keys are in a range [0, max_capture_size],
3477
+ # we can optimize it to list[int] for better lookup performance.
3478
+ bs_to_padded_graph_size: list[int] = PrivateAttr
3479
+
3480
+ # keep track of enabled and disabled custom ops
3481
+ enabled_custom_ops: Counter[str] = PrivateAttr
3482
+ disabled_custom_ops: Counter[str] = PrivateAttr
3483
+ traced_files: set[str] = PrivateAttr
3484
+ compilation_time: float = PrivateAttr
3485
+
3486
+ # Per-model forward context
3487
+ # Map from layer name to layer objects that need to be accessed outside
3488
+ # model code, e.g., Attention, FusedMOE when dp_size>1.
3489
+ static_forward_context: dict[str, Any] = PrivateAttr
3490
+
3491
+ def compute_hash(self) -> str:
3492
+ """
3493
+ WARNING: Whenever a new field is added to this config,
3494
+ ensure that it is included in the factors list if
3495
+ it affects the computation graph.
3496
+
3497
+ Provide a hash that uniquely identifies all the configs
3498
+ that affect the structure of the computation
3499
+ graph from input ids/embeddings to the final hidden states,
3500
+ excluding anything before input ids/embeddings and after
3501
+ the final hidden states.
3502
+ """
3503
+ factors: list[Any] = []
3504
+ factors.append(self.level)
3505
+ factors.append(self.backend)
3506
+ factors.append(self.custom_ops)
3507
+ factors.append(self.splitting_ops)
3508
+ factors.append(self.use_inductor)
3509
+ factors.append(self.inductor_compile_config)
3510
+ factors.append(self.inductor_passes)
3511
+ factors.append(self.pass_config.uuid())
3512
+ return hashlib.sha256(str(factors).encode()).hexdigest()
3513
+
3514
+ def __repr__(self) -> str:
3515
+ exclude = {
3516
+ "static_forward_context",
3517
+ "enabled_custom_ops",
3518
+ "disabled_custom_ops",
3519
+ "compilation_time",
3520
+ "bs_to_padded_graph_size",
3521
+ "pass_config",
3522
+ "traced_files",
3523
+ }
3524
+ return self.model_dump_json(exclude=exclude, exclude_unset=True)
3525
+
3526
+ __str__ = __repr__
3527
+
3528
+ @classmethod
3529
+ def from_cli(cls, cli_value: str) -> "CompilationConfig":
3530
+ """Parse the CLI value for the compilation config."""
3531
+ if cli_value in ["0", "1", "2", "3"]:
3532
+ return cls(level=int(cli_value))
3533
+ # do not use `eval`, it is dangerous and can execute arbitrary code
3534
+ dict_value = ast.literal_eval(cli_value)
3535
+ return CompilationConfig.model_validate(dict_value)
3536
+
3537
+ def model_post_init(self, __context: Any) -> None:
3538
+
3539
+ count_none = self.custom_ops.count("none")
3540
+ count_all = self.custom_ops.count("all")
3541
+ assert count_none + count_all <= 1, "Can only specify 'none' or 'all'"
3542
+
3543
+ # TODO(zou3519/luka): There are 2 issues with auto-functionalization V2:
3544
+ # 1. A bug in PyTorch, fixed in 2.7:
3545
+ # https://github.com/pytorch/pytorch/issues/147924
3546
+ # 2. Custom passes (fusion) rely on auto-functionalization V1 and don't
3547
+ # work with V2. Addressing this will take extra engineering effort
3548
+ # and it is not yet a priority. RFC here:
3549
+ # https://github.com/vllm-project/vllm/issues/14703
3550
+
3551
+ if is_torch_equal_or_newer("2.6"):
3552
+ KEY = 'enable_auto_functionalized_v2'
3553
+ if KEY not in self.inductor_compile_config:
3554
+ self.inductor_compile_config[KEY] = False
3555
+
3556
+ if self.splitting_ops is None:
3557
+ self.splitting_ops = []
3558
+
3559
+ for k, v in self.inductor_passes.items():
3560
+ if not isinstance(v, str):
3561
+ assert callable(v), (
3562
+ f"pass {k} should be callable or a qualified name")
3563
+ self.inductor_compile_config[k] = v if isinstance(
3564
+ v, InductorPass) else CallableInductorPass(v)
3565
+ continue
3566
+
3567
+ # resolve function from qualified name
3568
+ names = v.split(".")
3569
+ module = ".".join(names[:-1])
3570
+ func_name = names[-1]
3571
+ func = __import__(module).__dict__[func_name]
3572
+ self.inductor_compile_config[k] = func if isinstance(
3573
+ func, InductorPass) else CallableInductorPass(func)
3574
+
3575
+ self.enabled_custom_ops = Counter()
3576
+ self.disabled_custom_ops = Counter()
3577
+ self.traced_files = set()
3578
+ self.static_forward_context = {}
3579
+ self.compilation_time = 0.0
3580
+
3581
+ def init_backend(self, vllm_config: "VllmConfig") -> Union[str, Callable]:
3582
+ if self.level == CompilationLevel.NO_COMPILATION:
3583
+ raise ValueError("No compilation level is set.")
3584
+
3585
+ from torch._dynamo.backends.registry import list_backends
3586
+ torch_backends = list_backends(exclude_tags=tuple())
3587
+ if self.level in [
3588
+ CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE
3589
+ ]:
3590
+ if self.backend == "":
3591
+ return "eager"
3592
+ if self.backend in torch_backends:
3593
+ return self.backend
3594
+ return resolve_obj_by_qualname(self.backend)
3595
+
3596
+ # TODO: pass user-specified backend to piecewise compilation
3597
+ # merge with the config use_inductor
3598
+ assert self.level == CompilationLevel.PIECEWISE
3599
+
3600
+ from vllm.compilation.backends import VllmBackend
3601
+ return VllmBackend(vllm_config)
3602
+
3603
+ def init_with_cudagraph_sizes(self,
3604
+ cudagraph_capture_sizes: list[int]) -> None:
3605
+ """To complete the initialization of config,
3606
+ we need to know the cudagraph sizes."""
3607
+
3608
+ if self.cudagraph_capture_sizes is None:
3609
+ self.cudagraph_capture_sizes = cudagraph_capture_sizes
3610
+ else:
3611
+ # de-duplicate the sizes provided by the config
3612
+ self.cudagraph_capture_sizes = list(
3613
+ set(self.cudagraph_capture_sizes))
3614
+ logger.info(("cudagraph sizes specified by model runner"
3615
+ " %s is overridden by config %s"),
3616
+ cudagraph_capture_sizes, self.cudagraph_capture_sizes)
3617
+
3618
+ computed_compile_sizes = []
3619
+ if self.compile_sizes is not None:
3620
+ # de-duplicate the sizes provided by the config
3621
+ self.compile_sizes = list(set(self.compile_sizes))
3622
+ for x in self.compile_sizes:
3623
+ if isinstance(x, str):
3624
+ assert x == "cudagraph_capture_sizes", \
3625
+ "Unrecognized size type in compile_sizes, " \
3626
+ f"expect 'cudagraph_capture_sizes', got {x}"
3627
+ computed_compile_sizes.extend(self.cudagraph_capture_sizes)
3628
+ else:
3629
+ assert isinstance(x, int)
3630
+ computed_compile_sizes.append(x)
3631
+ self.compile_sizes = computed_compile_sizes # type: ignore
3632
+
3633
+ # sort to make sure cudagraph capture sizes are in descending order
3634
+ self.cudagraph_capture_sizes.sort(reverse=True)
3635
+ self.max_capture_size = self.cudagraph_capture_sizes[
3636
+ 0] if self.cudagraph_capture_sizes else 0
3637
+
3638
+ # pre-compute the mapping from batch size to padded graph size
3639
+ self.bs_to_padded_graph_size = [
3640
+ 0 for i in range(self.max_capture_size + 1)
3641
+ ]
3642
+ for end, start in zip(self.cudagraph_capture_sizes,
3643
+ self.cudagraph_capture_sizes[1:] + [0]):
3644
+ for bs in range(start, end):
3645
+ if bs == start:
3646
+ self.bs_to_padded_graph_size[bs] = start
3647
+ else:
3648
+ self.bs_to_padded_graph_size[bs] = end
3649
+ self.bs_to_padded_graph_size[
3650
+ self.max_capture_size] = self.max_capture_size
3651
+
3652
+ def set_splitting_ops_for_v1(self):
3653
+ # If default, override splitting ops for piecewise cudagraph on V1.
3654
+ # NOTE: this function needs to be called
3655
+ if not self.splitting_ops:
3656
+ self.splitting_ops = [
3657
+ "vllm.unified_attention",
3658
+ "vllm.unified_attention_with_output",
3659
+ ]
3660
+
3661
+
3662
+ @dataclass
3663
+ class VllmConfig:
3664
+ """Dataclass which contains all vllm-related configuration. This
3665
+ simplifies passing around the distinct configurations in the codebase.
3666
+ """
3667
+
3668
+ model_config: ModelConfig = field(default=None, init=True) # type: ignore
3669
+ cache_config: CacheConfig = field(default=None, init=True) # type: ignore
3670
+ parallel_config: ParallelConfig = field(default_factory=ParallelConfig,
3671
+ init=True)
3672
+ scheduler_config: SchedulerConfig = field(default_factory=SchedulerConfig,
3673
+ init=True)
3674
+ device_config: DeviceConfig = field(default=None,
3675
+ init=True) # type: ignore
3676
+ load_config: LoadConfig = field(default=None, init=True) # type: ignore
3677
+ lora_config: Optional[LoRAConfig] = None
3678
+ speculative_config: SpeculativeConfig = field(default=None,
3679
+ init=True) # type: ignore
3680
+ decoding_config: Optional[DecodingConfig] = None
3681
+ observability_config: Optional[ObservabilityConfig] = None
3682
+ prompt_adapter_config: Optional[PromptAdapterConfig] = None
3683
+ quant_config: Optional[QuantizationConfig] = None
3684
+ compilation_config: CompilationConfig = field(default=None,
3685
+ init=True) # type: ignore
3686
+ kv_transfer_config: KVTransferConfig = field(default=None,
3687
+ init=True) # type: ignore
3688
+ # some opaque config, only used to provide additional information
3689
+ # for the hash computation, mainly used for testing, debugging or out of
3690
+ # tree config registration.
3691
+ additional_config: SupportsHash = field(default=None,
3692
+ init=True) # type: ignore
3693
+ instance_id: str = ""
3694
+
3695
+ def compute_hash(self) -> str:
3696
+ """
3697
+ WARNING: Whenever a new field is added to this config,
3698
+ ensure that it is included in the factors list if
3699
+ it affects the computation graph.
3700
+
3701
+ Provide a hash that uniquely identifies all the configs
3702
+ that affect the structure of the computation
3703
+ graph from input ids/embeddings to the final hidden states,
3704
+ excluding anything before input ids/embeddings and after
3705
+ the final hidden states.
3706
+ """
3707
+ factors: list[Any] = []
3708
+
3709
+ # summarize vllm config
3710
+ vllm_factors: list[Any] = []
3711
+ from vllm import __version__
3712
+ vllm_factors.append(__version__)
3713
+ vllm_factors.append(envs.VLLM_USE_V1)
3714
+ if self.model_config:
3715
+ vllm_factors.append(self.model_config.compute_hash())
3716
+ else:
3717
+ vllm_factors.append("None")
3718
+ if self.cache_config:
3719
+ vllm_factors.append(self.cache_config.compute_hash())
3720
+ else:
3721
+ vllm_factors.append("None")
3722
+ if self.parallel_config:
3723
+ vllm_factors.append(self.parallel_config.compute_hash())
3724
+ else:
3725
+ vllm_factors.append("None")
3726
+ if self.scheduler_config:
3727
+ vllm_factors.append(self.scheduler_config.compute_hash())
3728
+ else:
3729
+ vllm_factors.append("None")
3730
+ if self.device_config:
3731
+ vllm_factors.append(self.device_config.compute_hash())
3732
+ else:
3733
+ vllm_factors.append("None")
3734
+ if self.load_config:
3735
+ vllm_factors.append(self.load_config.compute_hash())
3736
+ else:
3737
+ vllm_factors.append("None")
3738
+ if self.lora_config:
3739
+ vllm_factors.append(self.lora_config.compute_hash())
3740
+ # LoRA creates static buffers based on max_num_batched_tokens.
3741
+ # The tensor sizes and strides get captured in the torch.compile
3742
+ # graph explicitly.
3743
+ vllm_factors.append(
3744
+ str(self.scheduler_config.max_num_batched_tokens))
3745
+ else:
3746
+ vllm_factors.append("None")
3747
+ if self.speculative_config:
3748
+ vllm_factors.append(self.speculative_config.compute_hash())
3749
+ else:
3750
+ vllm_factors.append("None")
3751
+ if self.decoding_config:
3752
+ vllm_factors.append(self.decoding_config.compute_hash())
3753
+ else:
3754
+ vllm_factors.append("None")
3755
+ if self.observability_config:
3756
+ vllm_factors.append(self.observability_config.compute_hash())
3757
+ else:
3758
+ vllm_factors.append("None")
3759
+ if self.prompt_adapter_config:
3760
+ vllm_factors.append(self.prompt_adapter_config.compute_hash())
3761
+ else:
3762
+ vllm_factors.append("None")
3763
+ if self.quant_config:
3764
+ pass # should be captured by model_config.quantization
3765
+ if self.compilation_config:
3766
+ vllm_factors.append(self.compilation_config.compute_hash())
3767
+ else:
3768
+ vllm_factors.append("None")
3769
+ if self.kv_transfer_config:
3770
+ vllm_factors.append(self.kv_transfer_config.compute_hash())
3771
+ else:
3772
+ vllm_factors.append("None")
3773
+ if self.additional_config:
3774
+ vllm_factors.append(self.additional_config.compute_hash())
3775
+ else:
3776
+ vllm_factors.append("None")
3777
+ factors.append(vllm_factors)
3778
+
3779
+ hash_str = hashlib.md5(str(factors).encode(),
3780
+ usedforsecurity=False).hexdigest()[:10]
3781
+ return hash_str
3782
+
3783
+ def pad_for_cudagraph(self, batch_size: int) -> int:
3784
+ # if batch_size > self.compilation_config.max_capture_size,
3785
+ # it should raise an IndexError.
3786
+ # the caller should make sure the batch_size is within the range,
3787
+ # i.e., batch_size <= self.compilation_config.max_capture_size
3788
+ return self.compilation_config.bs_to_padded_graph_size[batch_size]
3789
+
3790
+ @staticmethod
3791
+ def _get_quantization_config(
3792
+ model_config: ModelConfig,
3793
+ load_config: LoadConfig) -> Optional[QuantizationConfig]:
3794
+ """Get the quantization config."""
3795
+ from vllm.platforms import current_platform
3796
+ if model_config.quantization is not None:
3797
+ from vllm.model_executor.model_loader.weight_utils import (
3798
+ get_quant_config)
3799
+ quant_config = get_quant_config(model_config, load_config)
3800
+ capability_tuple = current_platform.get_device_capability()
3801
+
3802
+ if capability_tuple is not None:
3803
+ capability = capability_tuple.to_int()
3804
+ if capability < quant_config.get_min_capability():
3805
+ raise ValueError(
3806
+ f"The quantization method {model_config.quantization} "
3807
+ "is not supported for the current GPU. Minimum "
3808
+ f"capability: {quant_config.get_min_capability()}. "
3809
+ f"Current capability: {capability}.")
3810
+ supported_dtypes = quant_config.get_supported_act_dtypes()
3811
+ if model_config.dtype not in supported_dtypes:
3812
+ raise ValueError(
3813
+ f"{model_config.dtype} is not supported for quantization "
3814
+ f"method {model_config.quantization}. Supported dtypes: "
3815
+ f"{supported_dtypes}")
3816
+ return quant_config
3817
+ return None
3818
+
3819
+ @staticmethod
3820
+ def get_quantization_config(
3821
+ model_config: ModelConfig,
3822
+ load_config: LoadConfig) -> Optional[QuantizationConfig]:
3823
+ import copy
3824
+
3825
+ # For some reason, the _ version of this modifies the model_config
3826
+ # object, so using deepcopy to avoid this problem.
3827
+ return VllmConfig._get_quantization_config(copy.deepcopy(model_config),
3828
+ load_config)
3829
+
3830
+ def with_hf_config(
3831
+ self,
3832
+ hf_config: PretrainedConfig,
3833
+ architectures: Optional[list[str]] = None,
3834
+ ) -> "VllmConfig":
3835
+ if architectures is not None:
3836
+ hf_config = copy.deepcopy(hf_config)
3837
+ hf_config.architectures = architectures
3838
+
3839
+ model_config = copy.deepcopy(self.model_config)
3840
+ model_config.hf_config = hf_config
3841
+
3842
+ return replace(self, model_config=model_config)
3843
+
3844
+ def __post_init__(self):
3845
+ """Verify configs are valid & consistent with each other.
3846
+ """
3847
+ if self.model_config is not None:
3848
+ self.model_config.verify_async_output_proc(self.parallel_config,
3849
+ self.speculative_config,
3850
+ self.device_config)
3851
+ self.model_config.verify_with_parallel_config(self.parallel_config)
3852
+
3853
+ if self.cache_config is not None:
3854
+ self.cache_config.verify_with_parallel_config(self.parallel_config)
3855
+
3856
+ if self.lora_config:
3857
+ self.lora_config.verify_with_cache_config(self.cache_config)
3858
+ self.lora_config.verify_with_model_config(self.model_config)
3859
+ self.lora_config.verify_lora_support()
3860
+ if self.prompt_adapter_config:
3861
+ self.prompt_adapter_config.verify_with_model_config(
3862
+ self.model_config)
3863
+
3864
+ if self.quant_config is None and \
3865
+ self.model_config is not None and self.load_config is not None:
3866
+ self.quant_config = VllmConfig._get_quantization_config(
3867
+ self.model_config, self.load_config)
3868
+
3869
+ from vllm.platforms import current_platform
3870
+ if self.scheduler_config is not None and \
3871
+ self.model_config is not None and \
3872
+ self.scheduler_config.chunked_prefill_enabled and \
3873
+ self.model_config.dtype == torch.float32 and \
3874
+ current_platform.get_device_capability() == (7, 5):
3875
+ logger.warning_once(
3876
+ "Turing devices tensor cores do not support float32 matmul. "
3877
+ "To workaround this limitation, vLLM will set 'ieee' input "
3878
+ "precision for chunked prefill triton kernels.")
3879
+
3880
+ if self.compilation_config is None:
3881
+ self.compilation_config = CompilationConfig()
3882
+ if self.compilation_config.pass_config.enable_sequence_parallelism:
3883
+ self.compilation_config.custom_ops.append("+rms_norm")
3884
+ if envs.VLLM_USE_V1 and self.model_config is not None and \
3885
+ not self.model_config.enforce_eager:
3886
+ # NOTE(woosuk): Currently, we use inductor because the piecewise
3887
+ # CUDA graphs do not work properly with the custom CUDA kernels.
3888
+ # FIXME(woosuk): Disable inductor to reduce the compilation time
3889
+ # and avoid any potential issues with the inductor.
3890
+ # FIXME(rob): Add function to set all of these.
3891
+ if not self.compilation_config.custom_ops:
3892
+ self.compilation_config.custom_ops = ["none"]
3893
+ self.compilation_config.use_cudagraph = True
3894
+ self.compilation_config.use_inductor = True
3895
+ self.compilation_config.cudagraph_num_of_warmups = 1
3896
+ self.compilation_config.pass_config.enable_fusion = False
3897
+ self.compilation_config.pass_config.enable_noop = False
3898
+ self.compilation_config.level = CompilationLevel.PIECEWISE
3899
+ self.compilation_config.set_splitting_ops_for_v1()
3900
+
3901
+ if self.parallel_config is not None and \
3902
+ self.parallel_config.tensor_parallel_size > 1 and \
3903
+ self.parallel_config.pipeline_parallel_size > 1 and \
3904
+ self.compilation_config is not None and \
3905
+ self.compilation_config.pass_config is not None and \
3906
+ self.compilation_config.pass_config.enable_sequence_parallelism:
3907
+ logger.warning_once(
3908
+ "Sequence parallelism is not supported with pipeline "
3909
+ "parallelism. Disabling sequence parallelism.")
3910
+ self.compilation_config.pass_config.\
3911
+ enable_sequence_parallelism = False
3912
+
3913
+ self._set_cudagraph_sizes()
3914
+
3915
+ if self.cache_config is not None and \
3916
+ self.cache_config.cpu_offload_gb > 0 and \
3917
+ self.compilation_config.level != CompilationLevel.NO_COMPILATION \
3918
+ and not envs.VLLM_USE_V1:
3919
+ logger.warning(
3920
+ "CPU offload is not supported with `torch.compile` in v0 yet."
3921
+ " Disabling `torch.compile`.")
3922
+ self.compilation_config.level = CompilationLevel.NO_COMPILATION
3923
+
3924
+ if ((not envs.VLLM_USE_V1) and self.lora_config is not None
3925
+ and self.compilation_config.level
3926
+ != CompilationLevel.NO_COMPILATION):
3927
+ logger.warning(
3928
+ "LoRA for V0 is not supported with `torch.compile` yet. "
3929
+ "Disabling `torch.compile`.")
3930
+ self.compilation_config.level = CompilationLevel.NO_COMPILATION
3931
+
3932
+
3933
+ if self.model_config and self.model_config.use_mla and \
3934
+ not (current_platform.is_cuda() or current_platform.is_rocm()):
3935
+ logger.info(
3936
+ "MLA is enabled on a non-GPU platform; forcing chunked "
3937
+ "prefill and prefix caching to be disabled.")
3938
+ self.scheduler_config.enable_chunked_prefill = False
3939
+ self.scheduler_config.chunked_prefill_enabled = False
3940
+ self.scheduler_config.max_num_batched_tokens = max(
3941
+ self.scheduler_config.max_model_len,
3942
+ _DEFAULT_MAX_NUM_BATCHED_TOKENS)
3943
+
3944
+ if self.cache_config is not None:
3945
+ self.cache_config.enable_prefix_caching = False
3946
+
3947
+ current_platform.check_and_update_config(self)
3948
+
3949
+ if not self.instance_id:
3950
+ self.instance_id = random_uuid()[:5]
3951
+
3952
+ def update_sizes_for_sequence_parallelism(self,
3953
+ possible_sizes: list) -> list:
3954
+ # remove the sizes that not multiple of tp_size when
3955
+ # enable sequence parallelism
3956
+ removed_sizes = [
3957
+ size for size in possible_sizes
3958
+ if size % self.parallel_config.tensor_parallel_size != 0
3959
+ ]
3960
+ if removed_sizes:
3961
+ logger.warning(
3962
+ "Batch sizes %s are removed because they are not "
3963
+ "multiple of tp_size %d when "
3964
+ "sequence parallelism is enabled", removed_sizes,
3965
+ self.parallel_config.tensor_parallel_size)
3966
+
3967
+ return [
3968
+ size for size in possible_sizes
3969
+ if size % self.parallel_config.tensor_parallel_size == 0
3970
+ ]
3971
+
3972
+ def _set_cudagraph_sizes(self):
3973
+ """
3974
+ cudagraph batchsize padding logic:
3975
+
3976
+ `[1, 2, 4] + [8 * i for i in range(1, 1025)]` is a list of all possible
3977
+ batch sizes that cudagraph will capture.
3978
+
3979
+ Depending on the engine's configuration of `max_num_seqs`, the
3980
+ candidate batch sizes to capture cudagraph will shrink to the subset
3981
+ which just cover the range of `[1, max_num_seqs]`. In the common case,
3982
+ `max_num_seqs` is 256, and the cudagraph batch sizes will be
3983
+ `[1, 2, 4, 8, 16, 24, 32, 40, ..., 256]`.
3984
+
3985
+ However, if users specify the cudagraph capture sizes through
3986
+ compilation config, we will use the specified sizes instead.
3987
+
3988
+ In the end, `vllm_config.compilation_config.cudagraph_capture_sizes`
3989
+ will be the final sizes to capture cudagraph (in descending order).
3990
+
3991
+ During runtime, if batchsize is larger than
3992
+ `vllm_config.compilation_config.cudagraph_capture_sizes`,
3993
+ no cudagraph will be used.
3994
+ If the batch size is no larger than
3995
+ `vllm_config.compilation_config.cudagraph_capture_sizes`,
3996
+ we can quickly find the padded graph size for a given batch size by
3997
+ looking up `vllm_config.compilation_config.bs_to_padded_graph_size`.
3998
+ """
3999
+
4000
+ # calculate the default `batch_size_capture_list`
4001
+ if not envs.VLLM_USE_V1:
4002
+ batch_size_capture_list = []
4003
+ max_batchsize_to_capture = 0
4004
+ if self.scheduler_config is not None and \
4005
+ self.model_config is not None and \
4006
+ not self.model_config.enforce_eager:
4007
+
4008
+ possible_sizes = [1, 2, 4] + [8 * i for i in range(1, 1025)]
4009
+ if self.parallel_config.tensor_parallel_size > 1 and \
4010
+ self.compilation_config.pass_config.enable_sequence_parallelism:
4011
+ possible_sizes = self.update_sizes_for_sequence_parallelism(
4012
+ possible_sizes)
4013
+
4014
+ # find the minimum size that is larger than max_num_seqs,
4015
+ # which then becomes the max_batchsize_to_capture
4016
+ larger_sizes = [
4017
+ x for x in possible_sizes
4018
+ if x >= self.scheduler_config.max_num_seqs
4019
+ ]
4020
+ if larger_sizes:
4021
+ max_batchsize_to_capture = larger_sizes[0]
4022
+ else:
4023
+ max_batchsize_to_capture = possible_sizes[-1]
4024
+
4025
+ # filter out the sizes that are
4026
+ # larger than max_batchsize_to_capture
4027
+ batch_size_capture_list = [
4028
+ size for size in possible_sizes
4029
+ if size <= max_batchsize_to_capture
4030
+ ]
4031
+ else:
4032
+ batch_size_capture_list = []
4033
+ if self.model_config is not None and \
4034
+ not self.model_config.enforce_eager:
4035
+ batch_size_capture_list = [1, 2, 4
4036
+ ] + [i for i in range(8, 513, 8)]
4037
+ if self.parallel_config.tensor_parallel_size > 1 and \
4038
+ self.compilation_config.pass_config.enable_sequence_parallelism:
4039
+ batch_size_capture_list = \
4040
+ self.update_sizes_for_sequence_parallelism(batch_size_capture_list)
4041
+
4042
+ max_num_tokens = self.scheduler_config.max_num_batched_tokens
4043
+ batch_size_capture_list = [
4044
+ size for size in batch_size_capture_list
4045
+ if size <= max_num_tokens
4046
+ ]
4047
+
4048
+ self.compilation_config.init_with_cudagraph_sizes(
4049
+ batch_size_capture_list)
4050
+
4051
+ def __str__(self):
4052
+ return (
4053
+ f"model={self.model_config.model!r},"
4054
+ f" speculative_config={self.speculative_config!r},"
4055
+ f" tokenizer={self.model_config.tokenizer!r}, "
4056
+ f"skip_tokenizer_init={self.model_config.skip_tokenizer_init},"
4057
+ f" tokenizer_mode={self.model_config.tokenizer_mode}, "
4058
+ f"revision={self.model_config.revision}, "
4059
+ f"override_neuron_config={self.model_config.override_neuron_config},"
4060
+ f" tokenizer_revision={self.model_config.tokenizer_revision}, "
4061
+ f"trust_remote_code={self.model_config.trust_remote_code}, "
4062
+ f"dtype={self.model_config.dtype}, "
4063
+ f"max_seq_len={self.model_config.max_model_len},"
4064
+ f" download_dir={self.load_config.download_dir!r}, "
4065
+ f"load_format={self.load_config.load_format}, "
4066
+ f"tensor_parallel_size={self.parallel_config.tensor_parallel_size},"
4067
+ f" pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, " # noqa
4068
+ f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, " # noqa
4069
+ f"quantization={self.model_config.quantization}, "
4070
+ f"enforce_eager={self.model_config.enforce_eager}, "
4071
+ f"kv_cache_dtype={self.cache_config.cache_dtype}, "
4072
+ f" device_config={self.device_config.device}, "
4073
+ f"decoding_config={self.decoding_config!r}, "
4074
+ f"observability_config={self.observability_config!r}, "
4075
+ f"seed={self.model_config.seed}, "
4076
+ f"served_model_name={self.model_config.served_model_name}, "
4077
+ f"num_scheduler_steps={self.scheduler_config.num_scheduler_steps}, "
4078
+ f"multi_step_stream_outputs={self.scheduler_config.multi_step_stream_outputs}, " # noqa
4079
+ f"enable_prefix_caching={self.cache_config.enable_prefix_caching}, "
4080
+ f"chunked_prefill_enabled={self.scheduler_config.chunked_prefill_enabled}, " # noqa
4081
+ f"use_async_output_proc={self.model_config.use_async_output_proc}, "
4082
+ f"disable_mm_preprocessor_cache={self.model_config.disable_mm_preprocessor_cache!r}, " # noqa
4083
+ f"mm_processor_kwargs={self.model_config.mm_processor_kwargs}, "
4084
+ f"pooler_config={self.model_config.pooler_config!r}, "
4085
+ f"compilation_config={self.compilation_config!r}")
4086
+
4087
+
4088
+ _current_vllm_config: Optional[VllmConfig] = None
4089
+
4090
+
4091
+ @contextmanager
4092
+ def set_current_vllm_config(vllm_config: VllmConfig, check_compile=False):
4093
+ """
4094
+ Temporarily set the current vLLM config.
4095
+ Used during model initialization.
4096
+ We save the current vLLM config in a global variable,
4097
+ so that all modules can access it, e.g. custom ops
4098
+ can access the vLLM config to determine how to dispatch.
4099
+ """
4100
+ global _current_vllm_config
4101
+ old_vllm_config = _current_vllm_config
4102
+ from vllm.compilation.counter import compilation_counter
4103
+ num_models_seen = compilation_counter.num_models_seen
4104
+ try:
4105
+ _current_vllm_config = vllm_config
4106
+ yield
4107
+ except Exception:
4108
+ raise
4109
+ else:
4110
+ logger.debug("enabled custom ops: %s",
4111
+ vllm_config.compilation_config.enabled_custom_ops)
4112
+ logger.debug("disabled custom ops: %s",
4113
+ vllm_config.compilation_config.disabled_custom_ops)
4114
+ if check_compile and \
4115
+ vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \
4116
+ and compilation_counter.num_models_seen == num_models_seen:
4117
+ # If the model supports compilation,
4118
+ # compilation_counter.num_models_seen should be increased
4119
+ # by at least 1.
4120
+ # If it is not increased, it means the model does not support
4121
+ # compilation (does not have @support_torch_compile decorator).
4122
+ logger.warning(
4123
+ "`torch.compile` is turned on, but the model %s"
4124
+ " does not support it. Please open an issue on GitHub"
4125
+ " if you want it to be supported.",
4126
+ vllm_config.model_config.model)
4127
+ finally:
4128
+ _current_vllm_config = old_vllm_config
4129
+
4130
+
4131
+ def get_current_vllm_config() -> VllmConfig:
4132
+ if _current_vllm_config is None:
4133
+ # in ci, usually when we test custom ops/modules directly,
4134
+ # we don't set the vllm config. In that case, we set a default
4135
+ # config.
4136
+ logger.warning("Current vLLM config is not set.")
4137
+ from vllm.config import VllmConfig
4138
+ return VllmConfig()
4139
+ return _current_vllm_config
4140
+
4141
+
4142
+ def contains_object_print(text):
4143
+ """
4144
+ Check if the text looks like a printed Python object, e.g.
4145
+ contains any substring matching the pattern: "at 0xFFFFFFF>"
4146
+ We match against 0x followed by 2-16 hex chars (there's
4147
+ a max of 16 on a 64 bit system).
4148
+
4149
+ Args:
4150
+ text (str): The text to check
4151
+
4152
+ Returns:
4153
+ bool: True if a match is found, False otherwise
4154
+ """
4155
+ pattern = r'at 0x[a-fA-F0-9]{2,16}>'
4156
+ match = re.search(pattern, text)
4157
+ return match is not None
4158
+
4159
+
4160
+ def assert_hashable(text):
4161
+ if not contains_object_print(text):
4162
+ return True
4163
+ raise AssertionError(
4164
+ f"vLLM tried to hash some configs that may have Python objects ids "
4165
+ f"in them. This is a bug, please file an issue. "
4166
+ f"Text being hashed: {text}")
4167
+
4168
+
4169
+ T = TypeVar("T")
4170
+
4171
+
4172
+ def get_layers_from_vllm_config(vllm_config: VllmConfig,
4173
+ layer_type: type[T]) -> dict[str, T]:
4174
+ return {
4175
+ layer_name: layer
4176
+ for layer_name, layer in
4177
+ vllm_config.compilation_config.static_forward_context.items()
4178
+ if isinstance(layer, layer_type)
4179
+ }