vllm-cpu-avx512vnni 0.10.2.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.

Files changed (1395) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2022 -0
  5. vllm/_ipex_ops.py +404 -0
  6. vllm/_version.py +34 -0
  7. vllm/adapter_commons/__init__.py +0 -0
  8. vllm/adapter_commons/layers.py +16 -0
  9. vllm/adapter_commons/models.py +106 -0
  10. vllm/adapter_commons/request.py +26 -0
  11. vllm/adapter_commons/utils.py +93 -0
  12. vllm/adapter_commons/worker_manager.py +39 -0
  13. vllm/assets/__init__.py +0 -0
  14. vllm/assets/audio.py +45 -0
  15. vllm/assets/base.py +41 -0
  16. vllm/assets/image.py +50 -0
  17. vllm/assets/video.py +138 -0
  18. vllm/attention/__init__.py +19 -0
  19. vllm/attention/backends/__init__.py +0 -0
  20. vllm/attention/backends/abstract.py +348 -0
  21. vllm/attention/backends/differential_flash_attn.py +935 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1499 -0
  23. vllm/attention/backends/flash_attn.py +933 -0
  24. vllm/attention/backends/flashmla.py +238 -0
  25. vllm/attention/backends/mla/__init__.py +0 -0
  26. vllm/attention/backends/mla/common.py +1310 -0
  27. vllm/attention/backends/placeholder_attn.py +340 -0
  28. vllm/attention/backends/rocm_aiter_mla.py +410 -0
  29. vllm/attention/backends/rocm_flash_attn.py +953 -0
  30. vllm/attention/backends/triton_mla.py +111 -0
  31. vllm/attention/backends/utils.py +610 -0
  32. vllm/attention/backends/xformers.py +805 -0
  33. vllm/attention/layer.py +552 -0
  34. vllm/attention/layers/__init__.py +0 -0
  35. vllm/attention/layers/chunked_local_attention.py +91 -0
  36. vllm/attention/layers/cross_attention.py +159 -0
  37. vllm/attention/layers/encoder_only_attention.py +86 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  40. vllm/attention/ops/common.py +139 -0
  41. vllm/attention/ops/flashmla.py +123 -0
  42. vllm/attention/ops/merge_attn_states.py +43 -0
  43. vllm/attention/ops/paged_attn.py +261 -0
  44. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  45. vllm/attention/ops/prefix_prefill.py +928 -0
  46. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  47. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  48. vllm/attention/ops/triton_decode_attention.py +676 -0
  49. vllm/attention/ops/triton_flash_attention.py +984 -0
  50. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  51. vllm/attention/ops/triton_unified_attention.py +854 -0
  52. vllm/attention/selector.py +243 -0
  53. vllm/attention/utils/__init__.py +0 -0
  54. vllm/attention/utils/fa_utils.py +85 -0
  55. vllm/attention/utils/kv_sharing_utils.py +33 -0
  56. vllm/beam_search.py +87 -0
  57. vllm/benchmarks/__init__.py +0 -0
  58. vllm/benchmarks/datasets.py +2651 -0
  59. vllm/benchmarks/latency.py +170 -0
  60. vllm/benchmarks/lib/__init__.py +3 -0
  61. vllm/benchmarks/lib/endpoint_request_func.py +510 -0
  62. vllm/benchmarks/lib/ready_checker.py +72 -0
  63. vllm/benchmarks/lib/utils.py +80 -0
  64. vllm/benchmarks/serve.py +1247 -0
  65. vllm/benchmarks/throughput.py +696 -0
  66. vllm/collect_env.py +823 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/activation_quant_fusion.py +193 -0
  69. vllm/compilation/backends.py +641 -0
  70. vllm/compilation/base_static_graph.py +51 -0
  71. vllm/compilation/collective_fusion.py +1190 -0
  72. vllm/compilation/compiler_interface.py +572 -0
  73. vllm/compilation/counter.py +47 -0
  74. vllm/compilation/cuda_graph.py +193 -0
  75. vllm/compilation/cuda_piecewise_backend.py +117 -0
  76. vllm/compilation/decorators.py +316 -0
  77. vllm/compilation/fix_functionalization.py +208 -0
  78. vllm/compilation/fusion.py +600 -0
  79. vllm/compilation/fusion_attn.py +303 -0
  80. vllm/compilation/fx_utils.py +84 -0
  81. vllm/compilation/inductor_pass.py +136 -0
  82. vllm/compilation/monitor.py +57 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +165 -0
  85. vllm/compilation/pass_manager.py +88 -0
  86. vllm/compilation/sequence_parallelism.py +484 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +50 -0
  89. vllm/compilation/wrapper.py +138 -0
  90. vllm/config/__init__.py +3921 -0
  91. vllm/config/cache.py +214 -0
  92. vllm/config/compilation.py +580 -0
  93. vllm/config/kv_events.py +50 -0
  94. vllm/config/kv_transfer.py +111 -0
  95. vllm/config/load.py +113 -0
  96. vllm/config/lora.py +132 -0
  97. vllm/config/parallel.py +446 -0
  98. vllm/config/scheduler.py +304 -0
  99. vllm/config/utils.py +29 -0
  100. vllm/connections.py +174 -0
  101. vllm/core/__init__.py +0 -0
  102. vllm/core/block/__init__.py +0 -0
  103. vllm/core/block/block_table.py +399 -0
  104. vllm/core/block/common.py +371 -0
  105. vllm/core/block/cpu_gpu_block_allocator.py +439 -0
  106. vllm/core/block/interfaces.py +319 -0
  107. vllm/core/block/naive_block.py +466 -0
  108. vllm/core/block/prefix_caching_block.py +1135 -0
  109. vllm/core/block/utils.py +28 -0
  110. vllm/core/block_manager.py +523 -0
  111. vllm/core/evictor.py +157 -0
  112. vllm/core/interfaces.py +139 -0
  113. vllm/core/placeholder_block_space_manager.py +103 -0
  114. vllm/core/scheduler.py +2028 -0
  115. vllm/device_allocator/__init__.py +0 -0
  116. vllm/device_allocator/cumem.py +286 -0
  117. vllm/distributed/__init__.py +6 -0
  118. vllm/distributed/communication_op.py +41 -0
  119. vllm/distributed/device_communicators/__init__.py +0 -0
  120. vllm/distributed/device_communicators/all2all.py +259 -0
  121. vllm/distributed/device_communicators/all_reduce_utils.py +292 -0
  122. vllm/distributed/device_communicators/base_device_communicator.py +277 -0
  123. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  124. vllm/distributed/device_communicators/cuda_communicator.py +294 -0
  125. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  126. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  127. vllm/distributed/device_communicators/pynccl.py +290 -0
  128. vllm/distributed/device_communicators/pynccl_wrapper.py +382 -0
  129. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  130. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  131. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  132. vllm/distributed/device_communicators/symm_mem.py +136 -0
  133. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  134. vllm/distributed/device_communicators/xpu_communicator.py +69 -0
  135. vllm/distributed/eplb/__init__.py +8 -0
  136. vllm/distributed/eplb/eplb_state.py +619 -0
  137. vllm/distributed/eplb/rebalance_algo.py +234 -0
  138. vllm/distributed/eplb/rebalance_execute.py +424 -0
  139. vllm/distributed/kv_events.py +362 -0
  140. vllm/distributed/kv_transfer/README.md +29 -0
  141. vllm/distributed/kv_transfer/__init__.py +13 -0
  142. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  143. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  145. vllm/distributed/kv_transfer/kv_connector/factory.py +108 -0
  146. vllm/distributed/kv_transfer/kv_connector/utils.py +246 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/base.py +356 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +266 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1319 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +484 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +542 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +266 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +414 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  158. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  159. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  160. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  161. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  162. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  163. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  164. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  165. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  166. vllm/distributed/parallel_state.py +1489 -0
  167. vllm/distributed/tpu_distributed_utils.py +178 -0
  168. vllm/distributed/utils.py +536 -0
  169. vllm/engine/__init__.py +0 -0
  170. vllm/engine/arg_utils.py +1857 -0
  171. vllm/engine/async_llm_engine.py +1044 -0
  172. vllm/engine/async_timeout.py +173 -0
  173. vllm/engine/llm_engine.py +1849 -0
  174. vllm/engine/metrics.py +577 -0
  175. vllm/engine/metrics_types.py +84 -0
  176. vllm/engine/multiprocessing/__init__.py +145 -0
  177. vllm/engine/multiprocessing/client.py +643 -0
  178. vllm/engine/multiprocessing/engine.py +470 -0
  179. vllm/engine/output_processor/__init__.py +0 -0
  180. vllm/engine/output_processor/interfaces.py +61 -0
  181. vllm/engine/output_processor/single_step.py +145 -0
  182. vllm/engine/output_processor/stop_checker.py +131 -0
  183. vllm/engine/output_processor/util.py +28 -0
  184. vllm/engine/protocol.py +343 -0
  185. vllm/entrypoints/__init__.py +0 -0
  186. vllm/entrypoints/api_server.py +178 -0
  187. vllm/entrypoints/chat_utils.py +1535 -0
  188. vllm/entrypoints/cli/__init__.py +12 -0
  189. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  190. vllm/entrypoints/cli/benchmark/base.py +25 -0
  191. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  192. vllm/entrypoints/cli/benchmark/main.py +58 -0
  193. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  194. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  195. vllm/entrypoints/cli/collect_env.py +36 -0
  196. vllm/entrypoints/cli/main.py +60 -0
  197. vllm/entrypoints/cli/openai.py +214 -0
  198. vllm/entrypoints/cli/run_batch.py +69 -0
  199. vllm/entrypoints/cli/serve.py +232 -0
  200. vllm/entrypoints/cli/types.py +29 -0
  201. vllm/entrypoints/constants.py +10 -0
  202. vllm/entrypoints/context.py +444 -0
  203. vllm/entrypoints/harmony_utils.py +431 -0
  204. vllm/entrypoints/launcher.py +168 -0
  205. vllm/entrypoints/llm.py +1579 -0
  206. vllm/entrypoints/logger.py +79 -0
  207. vllm/entrypoints/openai/__init__.py +0 -0
  208. vllm/entrypoints/openai/api_server.py +2011 -0
  209. vllm/entrypoints/openai/cli_args.py +281 -0
  210. vllm/entrypoints/openai/logits_processors.py +90 -0
  211. vllm/entrypoints/openai/protocol.py +2590 -0
  212. vllm/entrypoints/openai/run_batch.py +497 -0
  213. vllm/entrypoints/openai/serving_chat.py +1591 -0
  214. vllm/entrypoints/openai/serving_classification.py +176 -0
  215. vllm/entrypoints/openai/serving_completion.py +688 -0
  216. vllm/entrypoints/openai/serving_embedding.py +632 -0
  217. vllm/entrypoints/openai/serving_engine.py +996 -0
  218. vllm/entrypoints/openai/serving_models.py +288 -0
  219. vllm/entrypoints/openai/serving_pooling.py +277 -0
  220. vllm/entrypoints/openai/serving_responses.py +1690 -0
  221. vllm/entrypoints/openai/serving_score.py +479 -0
  222. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  223. vllm/entrypoints/openai/serving_transcription.py +136 -0
  224. vllm/entrypoints/openai/speech_to_text.py +388 -0
  225. vllm/entrypoints/openai/tool_parsers/__init__.py +51 -0
  226. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  227. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  228. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  229. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  230. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  231. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  232. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +418 -0
  233. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  234. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  235. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  236. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  237. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  238. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  239. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  240. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  241. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +73 -0
  242. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  243. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  244. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  245. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  246. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  247. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  248. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  249. vllm/entrypoints/renderer.py +395 -0
  250. vllm/entrypoints/score_utils.py +232 -0
  251. vllm/entrypoints/ssl.py +75 -0
  252. vllm/entrypoints/tool.py +139 -0
  253. vllm/entrypoints/tool_server.py +195 -0
  254. vllm/entrypoints/utils.py +328 -0
  255. vllm/env_override.py +23 -0
  256. vllm/envs.py +1354 -0
  257. vllm/executor/__init__.py +0 -0
  258. vllm/executor/executor_base.py +378 -0
  259. vllm/executor/mp_distributed_executor.py +244 -0
  260. vllm/executor/msgspec_utils.py +35 -0
  261. vllm/executor/multiproc_worker_utils.py +279 -0
  262. vllm/executor/ray_distributed_executor.py +699 -0
  263. vllm/executor/ray_utils.py +410 -0
  264. vllm/executor/uniproc_executor.py +152 -0
  265. vllm/forward_context.py +273 -0
  266. vllm/inputs/__init__.py +44 -0
  267. vllm/inputs/data.py +356 -0
  268. vllm/inputs/parse.py +151 -0
  269. vllm/inputs/preprocess.py +973 -0
  270. vllm/inputs/registry.py +251 -0
  271. vllm/logger.py +229 -0
  272. vllm/logging_utils/__init__.py +8 -0
  273. vllm/logging_utils/dump_input.py +81 -0
  274. vllm/logging_utils/formatter.py +79 -0
  275. vllm/logits_process.py +119 -0
  276. vllm/logprobs.py +28 -0
  277. vllm/lora/__init__.py +0 -0
  278. vllm/lora/layers/__init__.py +34 -0
  279. vllm/lora/layers/base.py +69 -0
  280. vllm/lora/layers/base_linear.py +184 -0
  281. vllm/lora/layers/column_parallel_linear.py +622 -0
  282. vllm/lora/layers/logits_processor.py +247 -0
  283. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  284. vllm/lora/layers/replicated_linear.py +61 -0
  285. vllm/lora/layers/row_parallel_linear.py +201 -0
  286. vllm/lora/layers/utils.py +60 -0
  287. vllm/lora/layers/vocal_parallel_embedding.py +172 -0
  288. vllm/lora/lora.py +199 -0
  289. vllm/lora/models.py +792 -0
  290. vllm/lora/ops/__init__.py +0 -0
  291. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  292. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  293. vllm/lora/ops/torch_ops/__init__.py +16 -0
  294. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  295. vllm/lora/ops/triton_ops/__init__.py +12 -0
  296. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  297. vllm/lora/ops/triton_ops/lora_expand_op.py +291 -0
  298. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  299. vllm/lora/ops/triton_ops/lora_shrink_op.py +245 -0
  300. vllm/lora/ops/triton_ops/utils.py +126 -0
  301. vllm/lora/ops/xla_ops/__init__.py +7 -0
  302. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  303. vllm/lora/peft_helper.py +127 -0
  304. vllm/lora/punica_wrapper/__init__.py +10 -0
  305. vllm/lora/punica_wrapper/punica_base.py +458 -0
  306. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  307. vllm/lora/punica_wrapper/punica_gpu.py +279 -0
  308. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  309. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  310. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  311. vllm/lora/punica_wrapper/utils.py +136 -0
  312. vllm/lora/request.py +99 -0
  313. vllm/lora/resolver.py +85 -0
  314. vllm/lora/utils.py +246 -0
  315. vllm/lora/worker_manager.py +256 -0
  316. vllm/model_executor/__init__.py +16 -0
  317. vllm/model_executor/custom_op.py +194 -0
  318. vllm/model_executor/layers/__init__.py +0 -0
  319. vllm/model_executor/layers/activation.py +575 -0
  320. vllm/model_executor/layers/attention_layer_base.py +23 -0
  321. vllm/model_executor/layers/fla/__init__.py +8 -0
  322. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  323. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  324. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  325. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  326. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  327. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  328. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  329. vllm/model_executor/layers/fla/ops/index.py +39 -0
  330. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  331. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  332. vllm/model_executor/layers/fla/ops/op.py +39 -0
  333. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  334. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  335. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  336. vllm/model_executor/layers/fused_moe/__init__.py +80 -0
  337. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +304 -0
  338. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +164 -0
  339. vllm/model_executor/layers/fused_moe/config.py +497 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  560. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +297 -0
  561. vllm/model_executor/layers/fused_moe/cutlass_moe.py +996 -0
  562. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +370 -0
  563. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  564. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +280 -0
  565. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +229 -0
  566. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +243 -0
  567. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +97 -0
  568. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1042 -0
  569. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +240 -0
  570. vllm/model_executor/layers/fused_moe/fused_moe.py +2081 -0
  571. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +247 -0
  572. vllm/model_executor/layers/fused_moe/layer.py +1951 -0
  573. vllm/model_executor/layers/fused_moe/modular_kernel.py +892 -0
  574. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  575. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  576. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  577. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  578. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +321 -0
  579. vllm/model_executor/layers/fused_moe/prepare_finalize.py +72 -0
  580. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +431 -0
  581. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  582. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  583. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +171 -0
  584. vllm/model_executor/layers/fused_moe/trtllm_moe.py +197 -0
  585. vllm/model_executor/layers/fused_moe/utils.py +270 -0
  586. vllm/model_executor/layers/layernorm.py +381 -0
  587. vllm/model_executor/layers/lightning_attn.py +661 -0
  588. vllm/model_executor/layers/linear.py +1567 -0
  589. vllm/model_executor/layers/logits_processor.py +199 -0
  590. vllm/model_executor/layers/mamba/__init__.py +0 -0
  591. vllm/model_executor/layers/mamba/abstract.py +45 -0
  592. vllm/model_executor/layers/mamba/linear_attn.py +432 -0
  593. vllm/model_executor/layers/mamba/mamba2_metadata.py +186 -0
  594. vllm/model_executor/layers/mamba/mamba_mixer.py +517 -0
  595. vllm/model_executor/layers/mamba/mamba_mixer2.py +803 -0
  596. vllm/model_executor/layers/mamba/mamba_utils.py +202 -0
  597. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  598. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +982 -0
  599. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  600. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  601. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  602. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +574 -0
  603. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  604. vllm/model_executor/layers/mamba/ops/ssd_combined.py +248 -0
  605. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +248 -0
  606. vllm/model_executor/layers/mamba/short_conv.py +270 -0
  607. vllm/model_executor/layers/mla.py +158 -0
  608. vllm/model_executor/layers/pooler.py +732 -0
  609. vllm/model_executor/layers/quantization/__init__.py +157 -0
  610. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  611. vllm/model_executor/layers/quantization/awq.py +228 -0
  612. vllm/model_executor/layers/quantization/awq_marlin.py +548 -0
  613. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  614. vllm/model_executor/layers/quantization/base_config.py +164 -0
  615. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  616. vllm/model_executor/layers/quantization/bitsandbytes.py +621 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +795 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1651 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  625. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +161 -0
  626. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  627. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  628. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  629. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +156 -0
  630. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  631. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  632. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +227 -0
  633. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +135 -0
  634. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +21 -0
  635. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  636. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  637. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  638. vllm/model_executor/layers/quantization/deepgemm.py +81 -0
  639. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  640. vllm/model_executor/layers/quantization/experts_int8.py +215 -0
  641. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  642. vllm/model_executor/layers/quantization/fp8.py +1179 -0
  643. vllm/model_executor/layers/quantization/gguf.py +597 -0
  644. vllm/model_executor/layers/quantization/gptq.py +300 -0
  645. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  646. vllm/model_executor/layers/quantization/gptq_marlin.py +700 -0
  647. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  648. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  649. vllm/model_executor/layers/quantization/inc.py +61 -0
  650. vllm/model_executor/layers/quantization/input_quant_fp8.py +103 -0
  651. vllm/model_executor/layers/quantization/ipex_quant.py +410 -0
  652. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  653. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  654. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  655. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  656. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  657. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  658. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  659. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  660. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  661. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  662. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  663. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  664. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  665. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +163 -0
  666. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  667. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  668. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  669. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  670. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  671. vllm/model_executor/layers/quantization/modelopt.py +1548 -0
  672. vllm/model_executor/layers/quantization/moe_wna16.py +473 -0
  673. vllm/model_executor/layers/quantization/mxfp4.py +951 -0
  674. vllm/model_executor/layers/quantization/petit.py +306 -0
  675. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  676. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  677. vllm/model_executor/layers/quantization/quark/quark.py +431 -0
  678. vllm/model_executor/layers/quantization/quark/quark_moe.py +434 -0
  679. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  680. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  681. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +112 -0
  682. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  683. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  684. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  685. vllm/model_executor/layers/quantization/rtn.py +456 -0
  686. vllm/model_executor/layers/quantization/schema.py +86 -0
  687. vllm/model_executor/layers/quantization/torchao.py +214 -0
  688. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  689. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  690. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  691. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  902. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  903. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +85 -0
  904. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +258 -0
  905. vllm/model_executor/layers/quantization/utils/fp8_utils.py +795 -0
  906. vllm/model_executor/layers/quantization/utils/gptq_utils.py +96 -0
  907. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  908. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  909. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  910. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  911. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  912. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  913. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  914. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  915. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +132 -0
  916. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  917. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  918. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  919. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  920. vllm/model_executor/layers/quantization/utils/quant_utils.py +627 -0
  921. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  922. vllm/model_executor/layers/resampler.py +270 -0
  923. vllm/model_executor/layers/rotary_embedding/__init__.py +190 -0
  924. vllm/model_executor/layers/rotary_embedding/base.py +156 -0
  925. vllm/model_executor/layers/rotary_embedding/common.py +105 -0
  926. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +140 -0
  927. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  928. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  929. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  930. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  931. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  932. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  933. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  934. vllm/model_executor/layers/rotary_embedding/mrope.py +1140 -0
  935. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  936. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  937. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  938. vllm/model_executor/layers/sampler.py +1198 -0
  939. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  940. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  941. vllm/model_executor/layers/utils.py +196 -0
  942. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  943. vllm/model_executor/model_loader/__init__.py +138 -0
  944. vllm/model_executor/model_loader/base_loader.py +52 -0
  945. vllm/model_executor/model_loader/bitsandbytes_loader.py +787 -0
  946. vllm/model_executor/model_loader/default_loader.py +278 -0
  947. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  948. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  949. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  950. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  951. vllm/model_executor/model_loader/tensorizer.py +743 -0
  952. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  953. vllm/model_executor/model_loader/tpu.py +114 -0
  954. vllm/model_executor/model_loader/utils.py +271 -0
  955. vllm/model_executor/model_loader/weight_utils.py +946 -0
  956. vllm/model_executor/models/__init__.py +30 -0
  957. vllm/model_executor/models/adapters.py +542 -0
  958. vllm/model_executor/models/aimv2.py +246 -0
  959. vllm/model_executor/models/apertus.py +582 -0
  960. vllm/model_executor/models/arcee.py +423 -0
  961. vllm/model_executor/models/arctic.py +560 -0
  962. vllm/model_executor/models/aria.py +662 -0
  963. vllm/model_executor/models/aya_vision.py +470 -0
  964. vllm/model_executor/models/baichuan.py +475 -0
  965. vllm/model_executor/models/bailing_moe.py +529 -0
  966. vllm/model_executor/models/bamba.py +582 -0
  967. vllm/model_executor/models/bart.py +1343 -0
  968. vllm/model_executor/models/bert.py +613 -0
  969. vllm/model_executor/models/bert_with_rope.py +687 -0
  970. vllm/model_executor/models/blip.py +339 -0
  971. vllm/model_executor/models/blip2.py +716 -0
  972. vllm/model_executor/models/bloom.py +374 -0
  973. vllm/model_executor/models/chameleon.py +1141 -0
  974. vllm/model_executor/models/chatglm.py +479 -0
  975. vllm/model_executor/models/clip.py +407 -0
  976. vllm/model_executor/models/cohere2_vision.py +484 -0
  977. vllm/model_executor/models/commandr.py +467 -0
  978. vllm/model_executor/models/config.py +434 -0
  979. vllm/model_executor/models/constant_size_cache.py +137 -0
  980. vllm/model_executor/models/dbrx.py +473 -0
  981. vllm/model_executor/models/deepseek.py +491 -0
  982. vllm/model_executor/models/deepseek_eagle.py +241 -0
  983. vllm/model_executor/models/deepseek_mtp.py +282 -0
  984. vllm/model_executor/models/deepseek_v2.py +1058 -0
  985. vllm/model_executor/models/deepseek_vl2.py +661 -0
  986. vllm/model_executor/models/donut.py +387 -0
  987. vllm/model_executor/models/dots1.py +547 -0
  988. vllm/model_executor/models/ernie45.py +43 -0
  989. vllm/model_executor/models/ernie45_moe.py +608 -0
  990. vllm/model_executor/models/ernie45_vl.py +1510 -0
  991. vllm/model_executor/models/ernie45_vl_moe.py +728 -0
  992. vllm/model_executor/models/ernie_mtp.py +287 -0
  993. vllm/model_executor/models/exaone.py +552 -0
  994. vllm/model_executor/models/exaone4.py +535 -0
  995. vllm/model_executor/models/fairseq2_llama.py +154 -0
  996. vllm/model_executor/models/falcon.py +511 -0
  997. vllm/model_executor/models/falcon_h1.py +739 -0
  998. vllm/model_executor/models/florence2.py +1107 -0
  999. vllm/model_executor/models/fuyu.py +401 -0
  1000. vllm/model_executor/models/gemma.py +428 -0
  1001. vllm/model_executor/models/gemma2.py +425 -0
  1002. vllm/model_executor/models/gemma3.py +542 -0
  1003. vllm/model_executor/models/gemma3_mm.py +723 -0
  1004. vllm/model_executor/models/gemma3n.py +830 -0
  1005. vllm/model_executor/models/gemma3n_mm.py +767 -0
  1006. vllm/model_executor/models/glm.py +23 -0
  1007. vllm/model_executor/models/glm4.py +305 -0
  1008. vllm/model_executor/models/glm4_1v.py +1669 -0
  1009. vllm/model_executor/models/glm4_moe.py +703 -0
  1010. vllm/model_executor/models/glm4_moe_mtp.py +306 -0
  1011. vllm/model_executor/models/glm4v.py +654 -0
  1012. vllm/model_executor/models/gpt2.py +383 -0
  1013. vllm/model_executor/models/gpt_bigcode.py +346 -0
  1014. vllm/model_executor/models/gpt_j.py +340 -0
  1015. vllm/model_executor/models/gpt_neox.py +333 -0
  1016. vllm/model_executor/models/gpt_oss.py +687 -0
  1017. vllm/model_executor/models/granite.py +498 -0
  1018. vllm/model_executor/models/granite_speech.py +799 -0
  1019. vllm/model_executor/models/granitemoe.py +541 -0
  1020. vllm/model_executor/models/granitemoehybrid.py +684 -0
  1021. vllm/model_executor/models/granitemoeshared.py +342 -0
  1022. vllm/model_executor/models/gritlm.py +262 -0
  1023. vllm/model_executor/models/grok1.py +550 -0
  1024. vllm/model_executor/models/h2ovl.py +536 -0
  1025. vllm/model_executor/models/hunyuan_v1.py +937 -0
  1026. vllm/model_executor/models/hyperclovax_vision.py +1206 -0
  1027. vllm/model_executor/models/idefics2_vision_model.py +416 -0
  1028. vllm/model_executor/models/idefics3.py +758 -0
  1029. vllm/model_executor/models/interfaces.py +854 -0
  1030. vllm/model_executor/models/interfaces_base.py +195 -0
  1031. vllm/model_executor/models/intern_vit.py +481 -0
  1032. vllm/model_executor/models/internlm2.py +453 -0
  1033. vllm/model_executor/models/internlm2_ve.py +148 -0
  1034. vllm/model_executor/models/interns1.py +832 -0
  1035. vllm/model_executor/models/interns1_vit.py +418 -0
  1036. vllm/model_executor/models/internvl.py +1423 -0
  1037. vllm/model_executor/models/jais.py +374 -0
  1038. vllm/model_executor/models/jamba.py +630 -0
  1039. vllm/model_executor/models/jina_vl.py +144 -0
  1040. vllm/model_executor/models/keye.py +1684 -0
  1041. vllm/model_executor/models/keye_vl1_5.py +601 -0
  1042. vllm/model_executor/models/kimi_vl.py +620 -0
  1043. vllm/model_executor/models/lfm2.py +558 -0
  1044. vllm/model_executor/models/llama.py +671 -0
  1045. vllm/model_executor/models/llama4.py +732 -0
  1046. vllm/model_executor/models/llama4_eagle.py +241 -0
  1047. vllm/model_executor/models/llama_eagle.py +171 -0
  1048. vllm/model_executor/models/llama_eagle3.py +292 -0
  1049. vllm/model_executor/models/llava.py +872 -0
  1050. vllm/model_executor/models/llava_next.py +572 -0
  1051. vllm/model_executor/models/llava_next_video.py +479 -0
  1052. vllm/model_executor/models/llava_onevision.py +945 -0
  1053. vllm/model_executor/models/mamba.py +310 -0
  1054. vllm/model_executor/models/mamba2.py +346 -0
  1055. vllm/model_executor/models/mamba_cache.py +83 -0
  1056. vllm/model_executor/models/medusa.py +219 -0
  1057. vllm/model_executor/models/midashenglm.py +788 -0
  1058. vllm/model_executor/models/mimo.py +191 -0
  1059. vllm/model_executor/models/mimo_mtp.py +273 -0
  1060. vllm/model_executor/models/minicpm.py +593 -0
  1061. vllm/model_executor/models/minicpm3.py +230 -0
  1062. vllm/model_executor/models/minicpm_eagle.py +391 -0
  1063. vllm/model_executor/models/minicpmo.py +804 -0
  1064. vllm/model_executor/models/minicpmv.py +1786 -0
  1065. vllm/model_executor/models/minimax_cache.py +36 -0
  1066. vllm/model_executor/models/minimax_text_01.py +1027 -0
  1067. vllm/model_executor/models/minimax_vl_01.py +431 -0
  1068. vllm/model_executor/models/mistral3.py +628 -0
  1069. vllm/model_executor/models/mixtral.py +494 -0
  1070. vllm/model_executor/models/mllama.py +1697 -0
  1071. vllm/model_executor/models/mllama4.py +1079 -0
  1072. vllm/model_executor/models/mlp_speculator.py +206 -0
  1073. vllm/model_executor/models/modernbert.py +374 -0
  1074. vllm/model_executor/models/module_mapping.py +72 -0
  1075. vllm/model_executor/models/molmo.py +1569 -0
  1076. vllm/model_executor/models/moonvit.py +663 -0
  1077. vllm/model_executor/models/motif.py +345 -0
  1078. vllm/model_executor/models/mpt.py +332 -0
  1079. vllm/model_executor/models/nano_nemotron_vl.py +1395 -0
  1080. vllm/model_executor/models/nemotron.py +509 -0
  1081. vllm/model_executor/models/nemotron_h.py +633 -0
  1082. vllm/model_executor/models/nemotron_nas.py +484 -0
  1083. vllm/model_executor/models/nemotron_vl.py +655 -0
  1084. vllm/model_executor/models/nvlm_d.py +203 -0
  1085. vllm/model_executor/models/olmo.py +406 -0
  1086. vllm/model_executor/models/olmo2.py +428 -0
  1087. vllm/model_executor/models/olmoe.py +485 -0
  1088. vllm/model_executor/models/opt.py +413 -0
  1089. vllm/model_executor/models/orion.py +350 -0
  1090. vllm/model_executor/models/ovis.py +572 -0
  1091. vllm/model_executor/models/ovis2_5.py +644 -0
  1092. vllm/model_executor/models/paligemma.py +414 -0
  1093. vllm/model_executor/models/persimmon.py +345 -0
  1094. vllm/model_executor/models/phi.py +357 -0
  1095. vllm/model_executor/models/phi3.py +19 -0
  1096. vllm/model_executor/models/phi3v.py +701 -0
  1097. vllm/model_executor/models/phi4_multimodal.py +1478 -0
  1098. vllm/model_executor/models/phi4flash.py +737 -0
  1099. vllm/model_executor/models/phi4mm.py +1281 -0
  1100. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1101. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1102. vllm/model_executor/models/phimoe.py +681 -0
  1103. vllm/model_executor/models/pixtral.py +1348 -0
  1104. vllm/model_executor/models/plamo2.py +1126 -0
  1105. vllm/model_executor/models/qwen.py +363 -0
  1106. vllm/model_executor/models/qwen2.py +526 -0
  1107. vllm/model_executor/models/qwen2_5_omni_thinker.py +985 -0
  1108. vllm/model_executor/models/qwen2_5_vl.py +1256 -0
  1109. vllm/model_executor/models/qwen2_audio.py +492 -0
  1110. vllm/model_executor/models/qwen2_moe.py +558 -0
  1111. vllm/model_executor/models/qwen2_rm.py +122 -0
  1112. vllm/model_executor/models/qwen2_vl.py +1512 -0
  1113. vllm/model_executor/models/qwen3.py +344 -0
  1114. vllm/model_executor/models/qwen3_moe.py +704 -0
  1115. vllm/model_executor/models/qwen3_next.py +1298 -0
  1116. vllm/model_executor/models/qwen3_next_mtp.py +285 -0
  1117. vllm/model_executor/models/qwen_vl.py +795 -0
  1118. vllm/model_executor/models/registry.py +891 -0
  1119. vllm/model_executor/models/roberta.py +252 -0
  1120. vllm/model_executor/models/rvl.py +103 -0
  1121. vllm/model_executor/models/seed_oss.py +488 -0
  1122. vllm/model_executor/models/siglip.py +524 -0
  1123. vllm/model_executor/models/siglip2navit.py +688 -0
  1124. vllm/model_executor/models/skyworkr1v.py +914 -0
  1125. vllm/model_executor/models/smolvlm.py +44 -0
  1126. vllm/model_executor/models/solar.py +506 -0
  1127. vllm/model_executor/models/stablelm.py +344 -0
  1128. vllm/model_executor/models/starcoder2.py +357 -0
  1129. vllm/model_executor/models/step3_text.py +521 -0
  1130. vllm/model_executor/models/step3_vl.py +1091 -0
  1131. vllm/model_executor/models/swin.py +475 -0
  1132. vllm/model_executor/models/tarsier.py +649 -0
  1133. vllm/model_executor/models/telechat2.py +151 -0
  1134. vllm/model_executor/models/teleflm.py +79 -0
  1135. vllm/model_executor/models/terratorch.py +294 -0
  1136. vllm/model_executor/models/transformers.py +883 -0
  1137. vllm/model_executor/models/ultravox.py +667 -0
  1138. vllm/model_executor/models/utils.py +770 -0
  1139. vllm/model_executor/models/vision.py +125 -0
  1140. vllm/model_executor/models/voxtral.py +789 -0
  1141. vllm/model_executor/models/whisper.py +966 -0
  1142. vllm/model_executor/models/zamba2.py +1056 -0
  1143. vllm/model_executor/parameter.py +599 -0
  1144. vllm/model_executor/sampling_metadata.py +597 -0
  1145. vllm/model_executor/utils.py +97 -0
  1146. vllm/model_executor/warmup/__init__.py +0 -0
  1147. vllm/model_executor/warmup/deep_gemm_warmup.py +223 -0
  1148. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1149. vllm/multimodal/__init__.py +35 -0
  1150. vllm/multimodal/audio.py +116 -0
  1151. vllm/multimodal/base.py +219 -0
  1152. vllm/multimodal/cache.py +507 -0
  1153. vllm/multimodal/hasher.py +110 -0
  1154. vllm/multimodal/image.py +130 -0
  1155. vllm/multimodal/inputs.py +979 -0
  1156. vllm/multimodal/parse.py +496 -0
  1157. vllm/multimodal/processing.py +1921 -0
  1158. vllm/multimodal/profiling.py +313 -0
  1159. vllm/multimodal/registry.py +375 -0
  1160. vllm/multimodal/utils.py +754 -0
  1161. vllm/multimodal/video.py +312 -0
  1162. vllm/outputs.py +517 -0
  1163. vllm/platforms/__init__.py +263 -0
  1164. vllm/platforms/cpu.py +353 -0
  1165. vllm/platforms/cuda.py +731 -0
  1166. vllm/platforms/interface.py +599 -0
  1167. vllm/platforms/rocm.py +504 -0
  1168. vllm/platforms/tpu.py +236 -0
  1169. vllm/platforms/xpu.py +243 -0
  1170. vllm/plugins/__init__.py +72 -0
  1171. vllm/plugins/io_processors/__init__.py +68 -0
  1172. vllm/plugins/io_processors/interface.py +67 -0
  1173. vllm/plugins/lora_resolvers/README.md +16 -0
  1174. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1175. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1176. vllm/pooling_params.py +183 -0
  1177. vllm/profiler/__init__.py +0 -0
  1178. vllm/profiler/layerwise_profile.py +375 -0
  1179. vllm/profiler/utils.py +148 -0
  1180. vllm/py.typed +2 -0
  1181. vllm/ray/__init__.py +0 -0
  1182. vllm/ray/lazy_utils.py +22 -0
  1183. vllm/ray/ray_env.py +72 -0
  1184. vllm/reasoning/__init__.py +25 -0
  1185. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1186. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1187. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1188. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1189. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1190. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1191. vllm/reasoning/mistral_reasoning_parser.py +47 -0
  1192. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1193. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1194. vllm/sampling_params.py +577 -0
  1195. vllm/scalar_type.py +349 -0
  1196. vllm/scripts.py +15 -0
  1197. vllm/sequence.py +1465 -0
  1198. vllm/tasks.py +11 -0
  1199. vllm/test_utils.py +130 -0
  1200. vllm/third_party/__init__.py +0 -0
  1201. vllm/third_party/pynvml.py +6140 -0
  1202. vllm/tracing.py +136 -0
  1203. vllm/transformers_utils/__init__.py +24 -0
  1204. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1205. vllm/transformers_utils/chat_templates/registry.py +71 -0
  1206. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1207. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1208. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1209. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1210. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1211. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1212. vllm/transformers_utils/config.py +1043 -0
  1213. vllm/transformers_utils/config_parser_base.py +20 -0
  1214. vllm/transformers_utils/configs/__init__.py +55 -0
  1215. vllm/transformers_utils/configs/arctic.py +207 -0
  1216. vllm/transformers_utils/configs/chatglm.py +72 -0
  1217. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1218. vllm/transformers_utils/configs/eagle.py +84 -0
  1219. vllm/transformers_utils/configs/falcon.py +90 -0
  1220. vllm/transformers_utils/configs/jais.py +238 -0
  1221. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1222. vllm/transformers_utils/configs/medusa.py +63 -0
  1223. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1224. vllm/transformers_utils/configs/mistral.py +165 -0
  1225. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1226. vllm/transformers_utils/configs/moonvit.py +33 -0
  1227. vllm/transformers_utils/configs/nemotron.py +205 -0
  1228. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1229. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1230. vllm/transformers_utils/configs/ovis.py +176 -0
  1231. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1232. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1233. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1234. vllm/transformers_utils/configs/speculators/base.py +91 -0
  1235. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1236. vllm/transformers_utils/configs/ultravox.py +120 -0
  1237. vllm/transformers_utils/detokenizer.py +169 -0
  1238. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1239. vllm/transformers_utils/dynamic_module.py +60 -0
  1240. vllm/transformers_utils/processor.py +245 -0
  1241. vllm/transformers_utils/processors/__init__.py +16 -0
  1242. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1243. vllm/transformers_utils/processors/ovis.py +420 -0
  1244. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1245. vllm/transformers_utils/runai_utils.py +99 -0
  1246. vllm/transformers_utils/s3_utils.py +90 -0
  1247. vllm/transformers_utils/tokenizer.py +293 -0
  1248. vllm/transformers_utils/tokenizer_base.py +149 -0
  1249. vllm/transformers_utils/tokenizer_group.py +132 -0
  1250. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1251. vllm/transformers_utils/tokenizers/mistral.py +520 -0
  1252. vllm/transformers_utils/utils.py +99 -0
  1253. vllm/triton_utils/__init__.py +16 -0
  1254. vllm/triton_utils/importing.py +95 -0
  1255. vllm/usage/__init__.py +0 -0
  1256. vllm/usage/usage_lib.py +259 -0
  1257. vllm/utils/__init__.py +3438 -0
  1258. vllm/utils/deep_gemm.py +212 -0
  1259. vllm/utils/flashinfer.py +372 -0
  1260. vllm/utils/jsontree.py +90 -0
  1261. vllm/utils/tensor_schema.py +236 -0
  1262. vllm/v1/__init__.py +0 -0
  1263. vllm/v1/attention/__init__.py +0 -0
  1264. vllm/v1/attention/backends/__init__.py +0 -0
  1265. vllm/v1/attention/backends/cpu_attn.py +922 -0
  1266. vllm/v1/attention/backends/flash_attn.py +800 -0
  1267. vllm/v1/attention/backends/flashinfer.py +1128 -0
  1268. vllm/v1/attention/backends/flex_attention.py +796 -0
  1269. vllm/v1/attention/backends/gdn_attn.py +320 -0
  1270. vllm/v1/attention/backends/linear_attn.py +68 -0
  1271. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1272. vllm/v1/attention/backends/mamba2_attn.py +224 -0
  1273. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1274. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1275. vllm/v1/attention/backends/mla/common.py +1608 -0
  1276. vllm/v1/attention/backends/mla/cutlass_mla.py +301 -0
  1277. vllm/v1/attention/backends/mla/flashattn_mla.py +273 -0
  1278. vllm/v1/attention/backends/mla/flashinfer_mla.py +110 -0
  1279. vllm/v1/attention/backends/mla/flashmla.py +213 -0
  1280. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1281. vllm/v1/attention/backends/mla/triton_mla.py +175 -0
  1282. vllm/v1/attention/backends/pallas.py +413 -0
  1283. vllm/v1/attention/backends/rocm_aiter_fa.py +548 -0
  1284. vllm/v1/attention/backends/short_conv_attn.py +82 -0
  1285. vllm/v1/attention/backends/tree_attn.py +450 -0
  1286. vllm/v1/attention/backends/triton_attn.py +430 -0
  1287. vllm/v1/attention/backends/utils.py +834 -0
  1288. vllm/v1/attention/backends/xformers.py +437 -0
  1289. vllm/v1/core/__init__.py +0 -0
  1290. vllm/v1/core/block_pool.py +330 -0
  1291. vllm/v1/core/encoder_cache_manager.py +333 -0
  1292. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1293. vllm/v1/core/kv_cache_manager.py +398 -0
  1294. vllm/v1/core/kv_cache_utils.py +1169 -0
  1295. vllm/v1/core/sched/__init__.py +0 -0
  1296. vllm/v1/core/sched/async_scheduler.py +47 -0
  1297. vllm/v1/core/sched/interface.py +158 -0
  1298. vllm/v1/core/sched/output.py +162 -0
  1299. vllm/v1/core/sched/request_queue.py +224 -0
  1300. vllm/v1/core/sched/scheduler.py +1287 -0
  1301. vllm/v1/core/sched/utils.py +69 -0
  1302. vllm/v1/core/single_type_kv_cache_manager.py +670 -0
  1303. vllm/v1/cudagraph_dispatcher.py +121 -0
  1304. vllm/v1/engine/__init__.py +202 -0
  1305. vllm/v1/engine/async_llm.py +757 -0
  1306. vllm/v1/engine/coordinator.py +357 -0
  1307. vllm/v1/engine/core.py +1245 -0
  1308. vllm/v1/engine/core_client.py +1333 -0
  1309. vllm/v1/engine/detokenizer.py +300 -0
  1310. vllm/v1/engine/exceptions.py +17 -0
  1311. vllm/v1/engine/llm_engine.py +332 -0
  1312. vllm/v1/engine/logprobs.py +201 -0
  1313. vllm/v1/engine/output_processor.py +558 -0
  1314. vllm/v1/engine/parallel_sampling.py +133 -0
  1315. vllm/v1/engine/processor.py +524 -0
  1316. vllm/v1/engine/utils.py +857 -0
  1317. vllm/v1/executor/__init__.py +0 -0
  1318. vllm/v1/executor/abstract.py +126 -0
  1319. vllm/v1/executor/multiproc_executor.py +683 -0
  1320. vllm/v1/executor/ray_distributed_executor.py +109 -0
  1321. vllm/v1/kv_cache_interface.py +275 -0
  1322. vllm/v1/metrics/__init__.py +0 -0
  1323. vllm/v1/metrics/loggers.py +717 -0
  1324. vllm/v1/metrics/prometheus.py +82 -0
  1325. vllm/v1/metrics/ray_wrappers.py +133 -0
  1326. vllm/v1/metrics/reader.py +246 -0
  1327. vllm/v1/metrics/stats.py +248 -0
  1328. vllm/v1/outputs.py +147 -0
  1329. vllm/v1/pool/__init__.py +0 -0
  1330. vllm/v1/pool/metadata.py +77 -0
  1331. vllm/v1/request.py +237 -0
  1332. vllm/v1/sample/__init__.py +0 -0
  1333. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1334. vllm/v1/sample/logits_processor/builtin.py +273 -0
  1335. vllm/v1/sample/logits_processor/interface.py +97 -0
  1336. vllm/v1/sample/logits_processor/state.py +161 -0
  1337. vllm/v1/sample/metadata.py +43 -0
  1338. vllm/v1/sample/ops/__init__.py +0 -0
  1339. vllm/v1/sample/ops/bad_words.py +39 -0
  1340. vllm/v1/sample/ops/logprobs.py +26 -0
  1341. vllm/v1/sample/ops/penalties.py +43 -0
  1342. vllm/v1/sample/ops/topk_topp_sampler.py +254 -0
  1343. vllm/v1/sample/rejection_sampler.py +623 -0
  1344. vllm/v1/sample/sampler.py +281 -0
  1345. vllm/v1/sample/tpu/__init__.py +0 -0
  1346. vllm/v1/sample/tpu/metadata.py +124 -0
  1347. vllm/v1/sample/tpu/sampler.py +213 -0
  1348. vllm/v1/serial_utils.py +395 -0
  1349. vllm/v1/spec_decode/__init__.py +0 -0
  1350. vllm/v1/spec_decode/eagle.py +740 -0
  1351. vllm/v1/spec_decode/medusa.py +66 -0
  1352. vllm/v1/spec_decode/metadata.py +62 -0
  1353. vllm/v1/spec_decode/metrics.py +191 -0
  1354. vllm/v1/spec_decode/ngram_proposer.py +157 -0
  1355. vllm/v1/spec_decode/utils.py +14 -0
  1356. vllm/v1/structured_output/__init__.py +297 -0
  1357. vllm/v1/structured_output/backend_guidance.py +245 -0
  1358. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1359. vllm/v1/structured_output/backend_outlines.py +320 -0
  1360. vllm/v1/structured_output/backend_types.py +134 -0
  1361. vllm/v1/structured_output/backend_xgrammar.py +323 -0
  1362. vllm/v1/structured_output/request.py +86 -0
  1363. vllm/v1/structured_output/utils.py +373 -0
  1364. vllm/v1/utils.py +382 -0
  1365. vllm/v1/worker/__init__.py +0 -0
  1366. vllm/v1/worker/block_table.py +221 -0
  1367. vllm/v1/worker/cpu_model_runner.py +163 -0
  1368. vllm/v1/worker/cpu_worker.py +183 -0
  1369. vllm/v1/worker/gpu_input_batch.py +821 -0
  1370. vllm/v1/worker/gpu_model_runner.py +3743 -0
  1371. vllm/v1/worker/gpu_worker.py +697 -0
  1372. vllm/v1/worker/kv_connector_model_runner_mixin.py +122 -0
  1373. vllm/v1/worker/lora_model_runner_mixin.py +192 -0
  1374. vllm/v1/worker/tpu_input_batch.py +585 -0
  1375. vllm/v1/worker/tpu_model_runner.py +1947 -0
  1376. vllm/v1/worker/tpu_worker.py +340 -0
  1377. vllm/v1/worker/utils.py +290 -0
  1378. vllm/v1/worker/worker_base.py +65 -0
  1379. vllm/v1/worker/xpu_model_runner.py +53 -0
  1380. vllm/v1/worker/xpu_worker.py +179 -0
  1381. vllm/version.py +41 -0
  1382. vllm/vllm_flash_attn/.gitkeep +0 -0
  1383. vllm/worker/__init__.py +0 -0
  1384. vllm/worker/cache_engine.py +145 -0
  1385. vllm/worker/enc_dec_model_runner.py +553 -0
  1386. vllm/worker/model_runner.py +2016 -0
  1387. vllm/worker/model_runner_base.py +307 -0
  1388. vllm/worker/utils.py +49 -0
  1389. vllm/worker/worker.py +670 -0
  1390. vllm/worker/worker_base.py +651 -0
  1391. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/METADATA +326 -0
  1392. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/RECORD +1395 -0
  1393. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/WHEEL +5 -0
  1394. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/entry_points.txt +5 -0
  1395. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,3921 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ # ruff: noqa: F401
5
+ import ast
6
+ import copy
7
+ import enum
8
+ import hashlib
9
+ import inspect
10
+ import json
11
+ import os
12
+ import textwrap
13
+ import warnings
14
+ from collections.abc import Mapping
15
+ from contextlib import contextmanager
16
+ from dataclasses import MISSING, Field, field, fields, is_dataclass, replace
17
+ from functools import cached_property, lru_cache
18
+ from importlib.util import find_spec
19
+ from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Literal, Optional,
20
+ Protocol, TypeVar, Union, cast, get_args)
21
+
22
+ import regex as re
23
+ import torch
24
+ from pydantic import (ConfigDict, SkipValidation, field_validator,
25
+ model_validator)
26
+ from pydantic.dataclasses import dataclass
27
+ from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE
28
+ from typing_extensions import Self, assert_never, runtime_checkable
29
+
30
+ import vllm.envs as envs
31
+ from vllm import version
32
+ from vllm.config.cache import (BlockSize, CacheConfig, CacheDType, MambaDType,
33
+ PrefixCachingHashAlgo)
34
+ from vllm.config.compilation import (CompilationConfig, CompilationLevel,
35
+ CUDAGraphMode, PassConfig)
36
+ from vllm.config.kv_events import KVEventsConfig
37
+ from vllm.config.kv_transfer import KVTransferConfig
38
+ from vllm.config.load import LoadConfig
39
+ from vllm.config.lora import LoRAConfig
40
+ from vllm.config.parallel import (DistributedExecutorBackend, EPLBConfig,
41
+ ParallelConfig)
42
+ from vllm.config.scheduler import SchedulerConfig, SchedulerPolicy
43
+ from vllm.config.utils import ConfigType, config
44
+ from vllm.logger import init_logger
45
+ from vllm.model_executor.layers.quantization import QuantizationMethods
46
+ from vllm.multimodal import MULTIMODAL_REGISTRY
47
+ from vllm.platforms import current_platform
48
+ from vllm.transformers_utils.config import (
49
+ ConfigFormat, get_config, get_hf_image_processor_config,
50
+ get_hf_text_config, get_pooling_config,
51
+ get_sentence_transformer_tokenizer_config, is_encoder_decoder,
52
+ is_interleaved, maybe_override_with_speculators_target_model,
53
+ try_get_generation_config, try_get_safetensors_metadata,
54
+ try_get_tokenizer_config, uses_mrope)
55
+ from vllm.transformers_utils.runai_utils import (ObjectStorageModel,
56
+ is_runai_obj_uri)
57
+ from vllm.transformers_utils.utils import maybe_model_redirect
58
+ from vllm.utils import (DEFAULT_MAX_NUM_BATCHED_TOKENS,
59
+ STR_DUAL_CHUNK_FLASH_ATTN_VAL, LayerBlockType,
60
+ LazyLoader, common_broadcastable_dtype, random_uuid)
61
+
62
+ if TYPE_CHECKING:
63
+ from _typeshed import DataclassInstance
64
+ from transformers.configuration_utils import PretrainedConfig
65
+
66
+ import vllm.model_executor.layers.quantization as me_quant
67
+ import vllm.model_executor.models as me_models
68
+ from vllm.model_executor.layers.quantization import QuantizationMethods
69
+ from vllm.model_executor.layers.quantization.base_config import (
70
+ QuantizationConfig)
71
+ from vllm.v1.sample.logits_processor import LogitsProcessor
72
+
73
+ HfOverrides = Union[dict, Callable[[type], type]]
74
+ else:
75
+ DataclassInstance = Any
76
+ PretrainedConfig = Any
77
+ QuantizationConfig = Any
78
+ QuantizationMethods = Any
79
+ BaseModelLoader = Any
80
+ LogitsProcessor = Any
81
+ HfOverrides = Union[dict[str, Any], Callable[[type], type]]
82
+
83
+ me_quant = LazyLoader("model_executor", globals(),
84
+ "vllm.model_executor.layers.quantization")
85
+ me_models = LazyLoader("model_executor", globals(),
86
+ "vllm.model_executor.models")
87
+
88
+ logger = init_logger(__name__)
89
+ DataclassInstanceT = TypeVar("DataclassInstanceT", bound=DataclassInstance)
90
+
91
+ TaskOption = Literal["auto", "generate", "embedding", "embed", "classify",
92
+ "score", "reward", "transcription", "draft"]
93
+
94
+ _ResolvedTask = Literal["generate", "transcription", "encode", "embed",
95
+ "classify", "reward", "draft"]
96
+
97
+ RunnerOption = Literal["auto", "generate", "pooling", "draft"]
98
+
99
+ RunnerType = Literal["generate", "pooling", "draft"]
100
+
101
+ ConvertOption = Literal["auto", "none", "embed", "classify", "reward"]
102
+
103
+ ConvertType = Literal["none", "embed", "classify", "reward"]
104
+
105
+ _RUNNER_TASKS: dict[RunnerType, list[TaskOption]] = {
106
+ "generate": ["generate", "transcription"],
107
+ "pooling": ["embedding", "embed", "classify", "score", "reward"],
108
+ "draft": ["draft"],
109
+ }
110
+
111
+ _RUNNER_CONVERTS: dict[RunnerType, list[ConvertType]] = {
112
+ "generate": [],
113
+ "pooling": ["embed", "classify", "reward"],
114
+ "draft": [],
115
+ }
116
+
117
+ # Some model suffixes are based on auto classes from Transformers:
118
+ # https://huggingface.co/docs/transformers/en/model_doc/auto
119
+ # NOTE: Items higher on this list priority over lower ones
120
+ _SUFFIX_TO_DEFAULTS: list[tuple[str, tuple[RunnerType, ConvertType]]] = [
121
+ ("ForCausalLM", ("generate", "none")),
122
+ ("ForConditionalGeneration", ("generate", "none")),
123
+ ("ChatModel", ("generate", "none")),
124
+ ("LMHeadModel", ("generate", "none")),
125
+ ("ForTextEncoding", ("pooling", "embed")),
126
+ ("EmbeddingModel", ("pooling", "embed")),
127
+ ("ForSequenceClassification", ("pooling", "classify")),
128
+ ("ForAudioClassification", ("pooling", "classify")),
129
+ ("ForImageClassification", ("pooling", "classify")),
130
+ ("ForVideoClassification", ("pooling", "classify")),
131
+ ("ClassificationModel", ("pooling", "classify")),
132
+ ("ForRewardModeling", ("pooling", "reward")),
133
+ ("RewardModel", ("pooling", "reward")),
134
+ # Let other `*Model`s take priority
135
+ ("Model", ("pooling", "embed")),
136
+ ]
137
+
138
+
139
+ def iter_architecture_defaults():
140
+ yield from _SUFFIX_TO_DEFAULTS
141
+
142
+
143
+ def try_match_architecture_defaults(
144
+ architecture: str,
145
+ *,
146
+ runner_type: Optional[RunnerType] = None,
147
+ convert_type: Optional[ConvertType] = None,
148
+ ) -> Optional[tuple[str, tuple[RunnerType, ConvertType]]]:
149
+ for suffix, (default_runner_type,
150
+ default_convert_type) in iter_architecture_defaults():
151
+ if ((runner_type is None or runner_type == default_runner_type) and
152
+ (convert_type is None or convert_type == default_convert_type)
153
+ and architecture.endswith(suffix)):
154
+ return suffix, (default_runner_type, default_convert_type)
155
+
156
+ return None
157
+
158
+
159
+ @runtime_checkable
160
+ class SupportsHash(Protocol):
161
+
162
+ def compute_hash(self) -> str:
163
+ ...
164
+
165
+
166
+ class SupportsMetricsInfo(Protocol):
167
+
168
+ def metrics_info(self) -> dict[str, str]:
169
+ ...
170
+
171
+
172
+ class ModelImpl(str, enum.Enum):
173
+ AUTO = "auto"
174
+ VLLM = "vllm"
175
+ TRANSFORMERS = "transformers"
176
+ TERRATORCH = "terratorch"
177
+
178
+
179
+ def get_attr_docs(cls: type[Any]) -> dict[str, str]:
180
+ """
181
+ Get any docstrings placed after attribute assignments in a class body.
182
+
183
+ https://davidism.com/mit-license/
184
+ """
185
+
186
+ def pairwise(iterable):
187
+ """
188
+ Manually implement https://docs.python.org/3/library/itertools.html#itertools.pairwise
189
+
190
+ Can be removed when Python 3.9 support is dropped.
191
+ """
192
+ iterator = iter(iterable)
193
+ a = next(iterator, None)
194
+
195
+ for b in iterator:
196
+ yield a, b
197
+ a = b
198
+
199
+ try:
200
+ cls_node = ast.parse(textwrap.dedent(inspect.getsource(cls))).body[0]
201
+ except (OSError, KeyError, TypeError):
202
+ # HACK: Python 3.13+ workaround - set missing __firstlineno__
203
+ # Workaround can be removed after we upgrade to pydantic==2.12.0
204
+ with open(inspect.getfile(cls)) as f:
205
+ for i, line in enumerate(f):
206
+ if f"class {cls.__name__}" in line and ":" in line:
207
+ cls.__firstlineno__ = i + 1
208
+ break
209
+ cls_node = ast.parse(textwrap.dedent(inspect.getsource(cls))).body[0]
210
+
211
+ if not isinstance(cls_node, ast.ClassDef):
212
+ raise TypeError("Given object was not a class.")
213
+
214
+ out = {}
215
+
216
+ # Consider each pair of nodes.
217
+ for a, b in pairwise(cls_node.body):
218
+ # Must be an assignment then a constant string.
219
+ if (not isinstance(a, (ast.Assign, ast.AnnAssign))
220
+ or not isinstance(b, ast.Expr)
221
+ or not isinstance(b.value, ast.Constant)
222
+ or not isinstance(b.value.value, str)):
223
+ continue
224
+
225
+ doc = inspect.cleandoc(b.value.value)
226
+
227
+ # An assignment can have multiple targets (a = b = v), but an
228
+ # annotated assignment only has one target.
229
+ targets = a.targets if isinstance(a, ast.Assign) else [a.target]
230
+
231
+ for target in targets:
232
+ # Must be assigning to a plain name.
233
+ if not isinstance(target, ast.Name):
234
+ continue
235
+
236
+ out[target.id] = doc
237
+
238
+ return out
239
+
240
+
241
+ def get_field(cls: ConfigType, name: str) -> Field:
242
+ """Get the default factory field of a dataclass by name. Used for getting
243
+ default factory fields in `EngineArgs`."""
244
+ if not is_dataclass(cls):
245
+ raise TypeError("The given class is not a dataclass.")
246
+ cls_fields = {f.name: f for f in fields(cls)}
247
+ if name not in cls_fields:
248
+ raise ValueError(f"Field '{name}' not found in {cls.__name__}.")
249
+ named_field: Field = cls_fields[name]
250
+ if (default_factory := named_field.default_factory) is not MISSING:
251
+ return field(default_factory=default_factory)
252
+ if (default := named_field.default) is not MISSING:
253
+ return field(default=default)
254
+ raise ValueError(
255
+ f"{cls.__name__}.{name} must have a default value or default factory.")
256
+
257
+
258
+ def is_init_field(cls: ConfigType, name: str) -> bool:
259
+ return next(f for f in fields(cls) if f.name == name).init
260
+
261
+
262
+ TokenizerMode = Literal["auto", "slow", "mistral", "custom"]
263
+ ModelDType = Literal["auto", "half", "float16", "bfloat16", "float", "float32"]
264
+ MMEncoderTPMode = Literal["weights", "data"]
265
+
266
+
267
+ class LogprobsMode(enum.Enum):
268
+ RAW_LOGITS = "raw_logits"
269
+ RAW_LOGPROBS = "raw_logprobs"
270
+ PROCESSED_LOGITS = "processed_logits"
271
+ PROCESSED_LOGPROBS = "processed_logprobs"
272
+
273
+
274
+ @config
275
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
276
+ class ModelConfig:
277
+ """Configuration for the model."""
278
+
279
+ model: str = "Qwen/Qwen3-0.6B"
280
+ """Name or path of the Hugging Face model to use. It is also used as the
281
+ content for `model_name` tag in metrics output when `served_model_name` is
282
+ not specified."""
283
+ runner: RunnerOption = "auto"
284
+ """The type of model runner to use. Each vLLM instance only supports one
285
+ model runner, even if the same model can be used for multiple types."""
286
+ convert: ConvertOption = "auto"
287
+ """Convert the model using adapters defined in
288
+ [vllm.model_executor.models.adapters][]. The most common use case is to
289
+ adapt a text generation model to be used for pooling tasks."""
290
+ task: Optional[TaskOption] = None
291
+ """[DEPRECATED] The task to use the model for. If the model supports more
292
+ than one model runner, this is used to select which model runner to run.
293
+
294
+ Note that the model may support other tasks using the same model runner.
295
+ """
296
+ tokenizer: SkipValidation[str] = None # type: ignore
297
+ """Name or path of the Hugging Face tokenizer to use. If unspecified, model
298
+ name or path will be used."""
299
+ tokenizer_mode: TokenizerMode = "auto"
300
+ """Tokenizer mode:\n
301
+ - "auto" will use the fast tokenizer if available.\n
302
+ - "slow" will always use the slow tokenizer.\n
303
+ - "mistral" will always use the tokenizer from `mistral_common`.\n
304
+ - "custom" will use --tokenizer to select the preregistered tokenizer."""
305
+ trust_remote_code: bool = False
306
+ """Trust remote code (e.g., from HuggingFace) when downloading the model
307
+ and tokenizer."""
308
+ dtype: Union[ModelDType, torch.dtype] = "auto"
309
+ """Data type for model weights and activations:\n
310
+ - "auto" will use FP16 precision for FP32 and FP16 models, and BF16
311
+ precision for BF16 models.\n
312
+ - "half" for FP16. Recommended for AWQ quantization.\n
313
+ - "float16" is the same as "half".\n
314
+ - "bfloat16" for a balance between precision and range.\n
315
+ - "float" is shorthand for FP32 precision.\n
316
+ - "float32" for FP32 precision."""
317
+ seed: Optional[int] = None
318
+ """Random seed for reproducibility. Initialized to None in V0, but
319
+ initialized to 0 in V1."""
320
+ hf_config_path: Optional[str] = None
321
+ """Name or path of the Hugging Face config to use. If unspecified, model
322
+ name or path will be used."""
323
+ allowed_local_media_path: str = ""
324
+ """Allowing API requests to read local images or videos from directories
325
+ specified by the server file system. This is a security risk. Should only
326
+ be enabled in trusted environments."""
327
+ revision: Optional[str] = None
328
+ """The specific model version to use. It can be a branch name, a tag name,
329
+ or a commit id. If unspecified, will use the default version."""
330
+ code_revision: Optional[str] = None
331
+ """The specific revision to use for the model code on the Hugging Face Hub.
332
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
333
+ use the default version."""
334
+ rope_scaling: dict[str, Any] = field(default_factory=dict)
335
+ """RoPE scaling configuration. For example,
336
+ `{"rope_type":"dynamic","factor":2.0}`."""
337
+ rope_theta: Optional[float] = None
338
+ """RoPE theta. Use with `rope_scaling`. In some cases, changing the RoPE
339
+ theta improves the performance of the scaled model."""
340
+ tokenizer_revision: Optional[str] = None
341
+ """The specific revision to use for the tokenizer on the Hugging Face Hub.
342
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
343
+ use the default version."""
344
+ max_model_len: SkipValidation[int] = None # type: ignore
345
+ """Model context length (prompt and output). If unspecified, will be
346
+ automatically derived from the model config.
347
+
348
+ When passing via `--max-model-len`, supports k/m/g/K/M/G in human-readable
349
+ format. Examples:\n
350
+ - 1k -> 1000\n
351
+ - 1K -> 1024\n
352
+ - 25.6k -> 25,600"""
353
+ spec_target_max_model_len: Optional[int] = None
354
+ """Specify the maximum length for spec decoding draft models."""
355
+ quantization: SkipValidation[Optional[QuantizationMethods]] = None
356
+ """Method used to quantize the weights. If `None`, we first check the
357
+ `quantization_config` attribute in the model config file. If that is
358
+ `None`, we assume the model weights are not quantized and use `dtype` to
359
+ determine the data type of the weights."""
360
+ enforce_eager: bool = False
361
+ """Whether to always use eager-mode PyTorch. If True, we will disable CUDA
362
+ graph and always execute the model in eager mode. If False, we will use
363
+ CUDA graph and eager execution in hybrid for maximal performance and
364
+ flexibility."""
365
+ max_seq_len_to_capture: int = 8192
366
+ """Maximum sequence len covered by CUDA graphs. When a sequence has context
367
+ length larger than this, we fall back to eager mode. Additionally for
368
+ encoder-decoder models, if the sequence length of the encoder input is
369
+ larger than this, we fall back to the eager mode."""
370
+ max_logprobs: int = 20
371
+ """Maximum number of log probabilities to return when `logprobs` is
372
+ specified in `SamplingParams`. The default value comes the default for the
373
+ OpenAI Chat Completions API. -1 means no cap, i.e. all (output_length *
374
+ vocab_size) logprobs are allowed to be returned and it may cause OOM."""
375
+ logprobs_mode: LogprobsMode = LogprobsMode.RAW_LOGPROBS
376
+ """Indicates the content returned in the logprobs and prompt_logprobs.
377
+ Supported mode:
378
+ 1) raw_logprobs, 2) processed_logprobs, 3) raw_logits, 4) processed_logits.
379
+ Raw means the values before applying any logit processors, like bad words.
380
+ Processed means the values after applying all processors, including
381
+ temperature and top_k/top_p.
382
+ """
383
+ disable_sliding_window: bool = False
384
+ """Whether to disable sliding window. If True, we will disable the sliding
385
+ window functionality of the model, capping to sliding window size. If the
386
+ model does not support sliding window, this argument is ignored."""
387
+ disable_cascade_attn: bool = False
388
+ """Disable cascade attention for V1. While cascade attention does not
389
+ change the mathematical correctness, disabling it could be useful for
390
+ preventing potential numerical issues. Note that even if this is set to
391
+ False, cascade attention will be only used when the heuristic tells that
392
+ it's beneficial."""
393
+ skip_tokenizer_init: bool = False
394
+ """Skip initialization of tokenizer and detokenizer. Expects valid
395
+ `prompt_token_ids` and `None` for prompt from the input. The generated
396
+ output will contain token ids."""
397
+ enable_prompt_embeds: bool = False
398
+ """If `True`, enables passing text embeddings as inputs via the
399
+ `prompt_embeds` key. Note that enabling this will double the time required
400
+ for graph compilation."""
401
+ served_model_name: Optional[Union[str, list[str]]] = None
402
+ """The model name(s) used in the API. If multiple names are provided, the
403
+ server will respond to any of the provided names. The model name in the
404
+ model field of a response will be the first name in this list. If not
405
+ specified, the model name will be the same as the `--model` argument. Noted
406
+ that this name(s) will also be used in `model_name` tag content of
407
+ prometheus metrics, if multiple names provided, metrics tag will take the
408
+ first one."""
409
+ limit_mm_per_prompt: dict[str, int] = field(default_factory=dict)
410
+ """Maximum number of data items per modality per prompt. Only applicable
411
+ for multimodal models."""
412
+ interleave_mm_strings: bool = False
413
+ """Enable fully interleaved support for multimodal prompts, while using
414
+ --chat-template-content-format=string. Defaults to False."""
415
+ skip_mm_profiling: bool = False
416
+ """When enabled, skips multimodal memory profiling and only profiles with
417
+ language backbone model during engine initialization.
418
+ """
419
+ media_io_kwargs: dict[str, dict[str, Any]] = field(default_factory=dict)
420
+ """Additional args passed to process media inputs, keyed by modalities.
421
+ For example, to set num_frames for video, set
422
+ `--media-io-kwargs '{"video": {"num_frames": 40} }'` """
423
+ use_async_output_proc: bool = True
424
+ """Whether to use async output processor."""
425
+ config_format: Union[str, ConfigFormat] = "auto"
426
+ """The format of the model config to load:\n
427
+ - "auto" will try to load the config in hf format if available else it
428
+ will try to load in mistral format.\n
429
+ - "hf" will load the config in hf format.\n
430
+ - "mistral" will load the config in mistral format."""
431
+ hf_token: Optional[Union[bool, str]] = None
432
+ """The token to use as HTTP bearer authorization for remote files . If
433
+ `True`, will use the token generated when running `huggingface-cli login`
434
+ (stored in `~/.huggingface`)."""
435
+ hf_overrides: HfOverrides = field(default_factory=dict)
436
+ """If a dictionary, contains arguments to be forwarded to the Hugging Face
437
+ config. If a callable, it is called to update the HuggingFace config."""
438
+ mm_processor_kwargs: Optional[dict[str, Any]] = None
439
+ """Arguments to be forwarded to the model's processor for multi-modal data,
440
+ e.g., image processor. Overrides for the multi-modal processor obtained
441
+ from `AutoProcessor.from_pretrained`. The available overrides depend on the
442
+ model that is being run. For example, for Phi-3-Vision: `{"num_crops": 4}`.
443
+ """
444
+ mm_processor_cache_gb: float = 4
445
+ """The size (in GiB) of the multi-modal processor cache, which is used to
446
+ avoid re-processing past multi-modal inputs.
447
+
448
+ This cache is duplicated for each API process and engine core process,
449
+ resulting in a total memory usage of
450
+ `mm_processor_cache_gb * (api_server_count + data_parallel_size)`.
451
+
452
+ Set to `0` to disable this cache completely (not recommended)."""
453
+ mm_encoder_tp_mode: MMEncoderTPMode = "weights"
454
+ """Indicates how to optimize multi-modal encoder inference using
455
+ tensor parallelism (TP).
456
+
457
+ - `"weights"`: Within the same vLLM engine, split the weights of
458
+ each layer across TP ranks. (default TP behavior)
459
+ - `"data"`: Within the same vLLM engine, split the batched input data
460
+ across TP ranks to process the data in parallel, while hosting
461
+ the full weights on each TP rank.
462
+ This batch-level DP is not to be confused with API request-level
463
+ DP (which is controlled by `--data-parallel-size`).
464
+ This is only supported on a per-model basis and falls back to
465
+ `"weights"` if the encoder does not support DP."""
466
+ pooler_config: Optional["PoolerConfig"] = field(init=False)
467
+ """Pooler config which controls the behaviour of output pooling in pooling
468
+ models."""
469
+ override_pooler_config: Optional[Union[dict, "PoolerConfig"]] = None
470
+ """Initialize non-default pooling config or override default pooling config
471
+ for the pooling model. e.g. `{"pooling_type": "mean", "normalize": false}`.
472
+ """
473
+ logits_processor_pattern: Optional[str] = None
474
+ """Optional regex pattern specifying valid logits processor qualified names
475
+ that can be passed with the `logits_processors` extra completion argument.
476
+ Defaults to `None`, which allows no processors."""
477
+ generation_config: str = "auto"
478
+ """The folder path to the generation config. Defaults to `"auto"`, the
479
+ generation config will be loaded from model path. If set to `"vllm"`, no
480
+ generation config is loaded, vLLM defaults will be used. If set to a folder
481
+ path, the generation config will be loaded from the specified folder path.
482
+ If `max_new_tokens` is specified in generation config, then it sets a
483
+ server-wide limit on the number of output tokens for all requests."""
484
+ override_generation_config: dict[str, Any] = field(default_factory=dict)
485
+ """Overrides or sets generation config. e.g. `{"temperature": 0.5}`. If
486
+ used with `--generation-config auto`, the override parameters will be
487
+ merged with the default config from the model. If used with
488
+ `--generation-config vllm`, only the override parameters are used."""
489
+ enable_sleep_mode: bool = False
490
+ """Enable sleep mode for the engine (only cuda platform is supported)."""
491
+ model_impl: Union[str, ModelImpl] = ModelImpl.AUTO.value
492
+ """Which implementation of the model to use:\n
493
+ - "auto" will try to use the vLLM implementation, if it exists, and fall
494
+ back to the Transformers implementation if no vLLM implementation is
495
+ available.\n
496
+ - "vllm" will use the vLLM model implementation.\n
497
+ - "transformers" will use the Transformers model implementation.\n
498
+ - "terratorch" will use the TerraTorch model implementation.
499
+ """
500
+ override_attention_dtype: Optional[str] = None
501
+ """Override dtype for attention"""
502
+ logits_processors: Optional[list[Union[str, type[LogitsProcessor]]]] = None
503
+ """One or more logits processors' fully-qualified class names or class
504
+ definitions"""
505
+ io_processor_plugin: Optional[str] = None
506
+ """IOProcessor plugin name to load at model startup"""
507
+
508
+ def compute_hash(self) -> str:
509
+ """
510
+ WARNING: Whenever a new field is added to this config,
511
+ ensure that it is included in the factors list if
512
+ it affects the computation graph.
513
+
514
+ Provide a hash that uniquely identifies all the configs
515
+ that affect the structure of the computation
516
+ graph from input ids/embeddings to the final hidden states,
517
+ excluding anything before input ids/embeddings and after
518
+ the final hidden states.
519
+ """
520
+ factors: list[Any] = []
521
+ factors.append(self.model)
522
+ factors.append(self.dtype)
523
+ factors.append(self.quantization)
524
+ factors.append(self.revision)
525
+ factors.append(self.code_revision)
526
+ factors.append(self.max_model_len)
527
+ factors.append(self.max_logprobs)
528
+ factors.append(self.disable_sliding_window)
529
+ factors.append(self.trust_remote_code)
530
+ factors.append(self.generation_config)
531
+ factors.append(self.model_impl)
532
+ factors.append(self.override_generation_config)
533
+ factors.append(self.rope_scaling)
534
+ factors.append(self.rope_theta)
535
+ # hf_config can control how the model looks!
536
+ factors.append(self.hf_config.to_json_string())
537
+ str_factors = str(factors)
538
+ assert_hashable(str_factors)
539
+ return hashlib.sha256(str(factors).encode()).hexdigest()
540
+
541
+ def __post_init__(self) -> None:
542
+ # Set the default seed to 0 in V1.
543
+ # NOTE(woosuk): In V0, we set the default seed to None because the
544
+ # driver worker shares the same process as the user process, and thus
545
+ # setting a seed affects the user process as well.
546
+ # In V1, we use separate processes for workers (unless
547
+ # VLLM_ENABLE_V1_MULTIPROCESSING=0), so setting a seed here
548
+ # doesn't affect the user process. However, without a consistent seed,
549
+ # different tensor parallel workers would sample different tokens,
550
+ # leading to inconsistent results.
551
+ if envs.VLLM_USE_V1 and self.seed is None:
552
+ self.seed = 0
553
+ if not envs.VLLM_ENABLE_V1_MULTIPROCESSING:
554
+ logger.warning(
555
+ "The global random seed is set to %d. Since "
556
+ "VLLM_ENABLE_V1_MULTIPROCESSING is set to False, this may "
557
+ "affect the random state of the Python process that "
558
+ "launched vLLM.", self.seed)
559
+
560
+ # Keep set served_model_name before maybe_model_redirect(self.model)
561
+ self.served_model_name = get_served_model_name(self.model,
562
+ self.served_model_name)
563
+ self.model = maybe_model_redirect(self.model)
564
+ # The tokenizer is consistent with the model by default.
565
+ if self.tokenizer is None:
566
+ self.tokenizer = self.model
567
+ if self.tokenizer_revision is None:
568
+ self.tokenizer_revision = self.revision
569
+ self.tokenizer = maybe_model_redirect(self.tokenizer)
570
+
571
+ if isinstance(self.hf_config_path, str):
572
+ self.hf_config_path = maybe_model_redirect(self.hf_config_path)
573
+
574
+ if callable(self.hf_overrides):
575
+ hf_overrides_kw = {}
576
+ hf_overrides_fn = self.hf_overrides
577
+ else:
578
+ hf_overrides_kw = self.hf_overrides
579
+ hf_overrides_fn = None
580
+
581
+ if self.rope_scaling:
582
+ hf_override: dict[str, Any] = {"rope_scaling": self.rope_scaling}
583
+ hf_overrides_kw.update(hf_override)
584
+ hf_overrides_str = json.dumps(hf_overrides_kw)
585
+ msg = (
586
+ "`--rope-scaling` will be removed in a future release. "
587
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
588
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
589
+ if self.rope_theta is not None:
590
+ hf_override = {"rope_theta": self.rope_theta}
591
+ hf_overrides_kw.update(hf_override)
592
+ hf_overrides_str = json.dumps(hf_overrides_kw)
593
+ msg = (
594
+ "`--rope-theta` will be removed in a future release. "
595
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
596
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
597
+
598
+ self.maybe_pull_model_tokenizer_for_runai(self.model, self.tokenizer)
599
+
600
+ if self.runner != "draft":
601
+ # If we're not running the draft model, check for speculators config
602
+ # If speculators config, set model / tokenizer to be target model
603
+ self.model, self.tokenizer = maybe_override_with_speculators_target_model( # noqa: E501
604
+ model=self.model,
605
+ tokenizer=self.tokenizer,
606
+ revision=self.revision,
607
+ trust_remote_code=self.trust_remote_code)
608
+
609
+ if (backend := envs.VLLM_ATTENTION_BACKEND
610
+ ) and backend == "FLASHINFER" and find_spec("flashinfer") is None:
611
+ raise ValueError(
612
+ "VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
613
+ "module was not found. See "
614
+ "https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile " # noqa: E501
615
+ "for instructions on how to install it.")
616
+
617
+ from vllm.platforms import current_platform
618
+
619
+ if (self.override_attention_dtype is not None
620
+ and not current_platform.is_rocm()):
621
+ warnings.warn(
622
+ "override-attention-dtype is set but not using ROCm platform",
623
+ stacklevel=2)
624
+
625
+ if (self.enable_sleep_mode
626
+ and not current_platform.is_sleep_mode_available()):
627
+ raise ValueError(
628
+ "Sleep mode is not supported on current platform.")
629
+
630
+ hf_config = get_config(self.hf_config_path or self.model,
631
+ self.trust_remote_code,
632
+ self.revision,
633
+ self.code_revision,
634
+ self.config_format,
635
+ hf_overrides_kw=hf_overrides_kw,
636
+ hf_overrides_fn=hf_overrides_fn)
637
+
638
+ self.hf_config = hf_config
639
+ self.hf_text_config = get_hf_text_config(self.hf_config)
640
+ self.attention_chunk_size = getattr(self.hf_text_config,
641
+ "attention_chunk_size", None)
642
+ self.encoder_config = self._get_encoder_config()
643
+ self.hf_image_processor_config = get_hf_image_processor_config(
644
+ self.model, hf_token=self.hf_token, revision=self.revision)
645
+
646
+ architectures = self.architectures
647
+ registry = self.registry
648
+ is_generative_model = registry.is_text_generation_model(
649
+ architectures, self)
650
+ is_pooling_model = registry.is_pooling_model(architectures, self)
651
+
652
+ def _task_to_convert(task: TaskOption) -> ConvertType:
653
+ if task == "embedding" or task == "embed":
654
+ return "embed"
655
+ if task == "classify":
656
+ return "classify"
657
+ if task == "reward":
658
+ return "reward"
659
+ if task == "score":
660
+ new_task = self._get_default_pooling_task(architectures)
661
+ return "classify" if new_task == "classify" else "embed"
662
+
663
+ return "none"
664
+
665
+ if self.task is not None:
666
+ runner: RunnerOption = "auto"
667
+ convert: ConvertOption = "auto"
668
+ msg_prefix = ("The 'task' option has been deprecated and will be "
669
+ "removed in v0.13.0 or v1.0, whichever comes first.")
670
+ msg_hint = "Please remove this option."
671
+
672
+ is_generative_task = self.task in _RUNNER_TASKS["generate"]
673
+ is_pooling_task = self.task in _RUNNER_TASKS["pooling"]
674
+
675
+ if is_generative_model and is_pooling_model:
676
+ if is_generative_task:
677
+ runner = "generate"
678
+ convert = "auto"
679
+ msg_hint = ("Please replace this option with `--runner "
680
+ "generate` to continue using this model "
681
+ "as a generative model.")
682
+ elif is_pooling_task:
683
+ runner = "pooling"
684
+ convert = "auto"
685
+ msg_hint = ("Please replace this option with `--runner "
686
+ "pooling` to continue using this model "
687
+ "as a pooling model.")
688
+ else: # task == "auto"
689
+ pass
690
+ elif is_generative_model or is_pooling_model:
691
+ if is_generative_task:
692
+ runner = "generate"
693
+ convert = "auto"
694
+ msg_hint = "Please remove this option"
695
+ elif is_pooling_task:
696
+ runner = "pooling"
697
+ convert = _task_to_convert(self.task)
698
+ msg_hint = ("Please replace this option with `--convert "
699
+ f"{convert}` to continue using this model "
700
+ "as a pooling model.")
701
+ else: # task == "auto"
702
+ pass
703
+ else:
704
+ raise AssertionError("The model should be a generative or "
705
+ "pooling model when task is set to "
706
+ f"{self.task!r}.")
707
+
708
+ self.runner = runner
709
+ self.convert = convert
710
+
711
+ msg = f"{msg_prefix} {msg_hint}"
712
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
713
+
714
+ self.runner_type = self._get_runner_type(architectures, self.runner)
715
+ self.convert_type = self._get_convert_type(architectures,
716
+ self.runner_type,
717
+ self.convert)
718
+
719
+ if self.runner_type == "generate" and not is_generative_model:
720
+ generate_converts = _RUNNER_CONVERTS["generate"]
721
+ if self.convert_type not in generate_converts:
722
+ # Currently we don't have any converters for generative models
723
+ raise ValueError(
724
+ "This model does not support `--runner generate`.")
725
+ if self.runner_type == "pooling" and not is_pooling_model:
726
+ pooling_converts = _RUNNER_CONVERTS["pooling"]
727
+ if self.convert_type not in pooling_converts:
728
+ convert_option = "<" + "|".join(pooling_converts) + ">"
729
+ raise ValueError(
730
+ "This model does not support `--runner pooling`. "
731
+ f"You can pass `--convert {convert_option} to adapt "
732
+ "it into a pooling model.")
733
+
734
+ self.supported_tasks = self._get_supported_tasks(
735
+ architectures, self.runner_type, self.convert_type)
736
+
737
+ # Note: Initialize these attributes early because transformers fallback
738
+ # may fail to load dynamic modules in child processes
739
+ model_info, arch = registry.inspect_model_cls(architectures, self)
740
+ self._model_info = model_info
741
+ self._architecture = arch
742
+ logger.info("Resolved architecture: %s", arch)
743
+
744
+ self.pooler_config = self._init_pooler_config()
745
+
746
+ self.dtype: torch.dtype = _get_and_verify_dtype(
747
+ self.model,
748
+ self.hf_config,
749
+ self.dtype,
750
+ is_pooling_model=self.runner_type == "pooling",
751
+ revision=self.revision,
752
+ )
753
+
754
+ # Interleaved attention is not supported by some backends in V0
755
+ if (not self.disable_sliding_window
756
+ and is_interleaved(self.hf_text_config)
757
+ and not envs.VLLM_USE_V1
758
+ and (backend := envs.VLLM_ATTENTION_BACKEND)
759
+ in ("XFORMERS", "FLASHINFER")):
760
+ logger.warning_once(
761
+ "%s has interleaved attention, which is currently not "
762
+ "supported by the %s backend. Disabling sliding window and "
763
+ "capping the max length to the sliding window size (%d).",
764
+ self.hf_text_config.model_type,
765
+ backend,
766
+ self.hf_text_config.sliding_window,
767
+ )
768
+ self.disable_sliding_window = True
769
+
770
+ self.original_max_model_len = self.max_model_len
771
+ self.max_model_len = self.get_and_verify_max_len(self.max_model_len)
772
+ self.multimodal_config = self._init_multimodal_config()
773
+
774
+ if self.disable_sliding_window:
775
+ # Set after get_and_verify_max_len to ensure that max_model_len
776
+ # can be correctly capped to sliding window size
777
+ self.hf_text_config.sliding_window = None
778
+
779
+ if not self.skip_tokenizer_init:
780
+ self._verify_tokenizer_mode()
781
+
782
+ # Avoid running try_verify_and_update_config multiple times
783
+ self.config_updated = False
784
+
785
+ self._verify_quantization()
786
+ self._verify_cuda_graph()
787
+ self._verify_bnb_config()
788
+
789
+ @field_validator("quantization", mode="before")
790
+ @classmethod
791
+ def validate_quantization_before(cls, value: Any) -> Any:
792
+ if isinstance(value, str):
793
+ return value.lower()
794
+ return value
795
+
796
+ @model_validator(mode="after")
797
+ def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
798
+ if not isinstance(self.tokenizer, str):
799
+ raise ValueError("tokenizer must be a string after __post_init__.")
800
+ if not isinstance(self.max_model_len, int):
801
+ raise ValueError(
802
+ "max_model_len must be an integer after __post_init__.")
803
+ return self
804
+
805
+ def _get_transformers_backend_cls(self) -> str:
806
+ """Determine which Transformers backend class will be used if
807
+ `model_impl` is set to `transformers` or `auto`."""
808
+ if getattr(self, "runner_type", self.runner) == "pooling":
809
+ return "TransformersModel"
810
+ if self.hf_config != self.hf_text_config:
811
+ # If 'hf_text_config' is the same as 'hf_config'. If not, it is
812
+ # probably a composite config, i.e. multimodal
813
+ return "TransformersForMultimodalLM"
814
+ return "TransformersForCausalLM"
815
+
816
+ def using_transformers_backend(self) -> bool:
817
+ """Check if the model is using the Transformers backend class."""
818
+ return self.architecture == self._get_transformers_backend_cls()
819
+
820
+ @property
821
+ def registry(self):
822
+ return me_models.ModelRegistry
823
+
824
+ @property
825
+ def architectures(self) -> list[str]:
826
+ return getattr(self.hf_config, "architectures", [])
827
+
828
+ @property
829
+ def architecture(self) -> str:
830
+ """The architecture vllm actually used."""
831
+ return self._architecture
832
+
833
+ def maybe_pull_model_tokenizer_for_runai(self, model: str,
834
+ tokenizer: str) -> None:
835
+ """Pull model/tokenizer from Object Storage to temporary
836
+ directory when needed.
837
+
838
+ Args:
839
+ model: Model name or path
840
+ tokenizer: Tokenizer name or path
841
+ """
842
+ if not (is_runai_obj_uri(model) or is_runai_obj_uri(tokenizer)):
843
+ return
844
+
845
+ if is_runai_obj_uri(model):
846
+ object_storage_model = ObjectStorageModel()
847
+ object_storage_model.pull_files(
848
+ model, allow_pattern=["*.model", "*.py", "*.json"])
849
+ self.model_weights = model
850
+ self.model = object_storage_model.dir
851
+
852
+ # If tokenizer is same as model, download to same directory
853
+ if model == tokenizer:
854
+ object_storage_model.pull_files(model,
855
+ ignore_pattern=[
856
+ "*.pt", "*.safetensors",
857
+ "*.bin", "*.tensors"
858
+ ])
859
+ self.tokenizer = object_storage_model.dir
860
+ return
861
+
862
+ # Only download tokenizer if needed and not already handled
863
+ if is_runai_obj_uri(tokenizer):
864
+ object_storage_tokenizer = ObjectStorageModel()
865
+ object_storage_tokenizer.pull_files(
866
+ model,
867
+ ignore_pattern=["*.pt", "*.safetensors", "*.bin", "*.tensors"])
868
+ self.tokenizer = object_storage_tokenizer.dir
869
+
870
+ def _init_multimodal_config(self) -> Optional["MultiModalConfig"]:
871
+ if self._model_info.supports_multimodal:
872
+ if (self.mm_encoder_tp_mode == "data" and
873
+ not self._model_info.supports_multimodal_encoder_tp_data):
874
+ logger.warning_once(
875
+ "This model does not support `--mm-encoder-tp-mode data`. "
876
+ "Falling back to `--mm-encoder-tp-mode weights`.")
877
+ self.mm_encoder_tp_mode = "weights"
878
+
879
+ return MultiModalConfig(
880
+ limit_per_prompt=self.limit_mm_per_prompt,
881
+ media_io_kwargs=self.media_io_kwargs,
882
+ mm_processor_kwargs=self.mm_processor_kwargs,
883
+ mm_processor_cache_gb=self.mm_processor_cache_gb,
884
+ mm_encoder_tp_mode=self.mm_encoder_tp_mode,
885
+ interleave_mm_strings=self.interleave_mm_strings,
886
+ skip_mm_profiling=self.skip_mm_profiling,
887
+ )
888
+
889
+ return None
890
+
891
+ def _get_encoder_config(self):
892
+ return get_sentence_transformer_tokenizer_config(
893
+ self.model, self.revision)
894
+
895
+ def _init_pooler_config(self) -> Optional["PoolerConfig"]:
896
+ if self.runner_type == "pooling":
897
+ if isinstance(self.override_pooler_config, dict):
898
+ self.override_pooler_config = PoolerConfig(
899
+ **self.override_pooler_config)
900
+
901
+ pooler_config = self.override_pooler_config or PoolerConfig()
902
+
903
+ base_config = get_pooling_config(self.model, self.revision)
904
+ if base_config is not None:
905
+ # Only set values that are not overridden by the user
906
+ for k, v in base_config.items():
907
+ if getattr(pooler_config, k) is None:
908
+ setattr(pooler_config, k, v)
909
+
910
+ default_pooling_type = self._model_info.default_pooling_type
911
+ if pooler_config.pooling_type is None:
912
+ pooler_config.pooling_type = default_pooling_type
913
+
914
+ return pooler_config
915
+
916
+ return None
917
+
918
+ def _verify_tokenizer_mode(self) -> None:
919
+ tokenizer_mode = cast(TokenizerMode, self.tokenizer_mode.lower())
920
+ if tokenizer_mode not in get_args(TokenizerMode):
921
+ raise ValueError(
922
+ f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
923
+ f"one of {get_args(TokenizerMode)}.")
924
+ self.tokenizer_mode = tokenizer_mode
925
+
926
+ def _get_default_runner_type(
927
+ self,
928
+ architectures: list[str],
929
+ ) -> RunnerType:
930
+ registry = self.registry
931
+
932
+ # Some Sentence Transformers models use *ForCausalLM archs
933
+ if get_pooling_config(self.model, self.revision):
934
+ return "pooling"
935
+
936
+ for arch in architectures:
937
+ if arch in registry.get_supported_archs():
938
+ if registry.is_pooling_model(architectures, self):
939
+ return "pooling"
940
+ if registry.is_text_generation_model(architectures, self):
941
+ return "generate"
942
+
943
+ match = try_match_architecture_defaults(arch)
944
+ if match:
945
+ _, (runner_type, _) = match
946
+ return runner_type
947
+
948
+ return "generate"
949
+
950
+ def _get_runner_type(
951
+ self,
952
+ architectures: list[str],
953
+ runner: RunnerOption,
954
+ ) -> RunnerType:
955
+ if runner != "auto":
956
+ return runner
957
+
958
+ runner_type = self._get_default_runner_type(architectures)
959
+
960
+ # Don't log the most common case
961
+ if runner_type != "generate":
962
+ logger.info(
963
+ "Resolved `--runner auto` to `--runner %s`. "
964
+ "Pass the value explicitly to silence this message.",
965
+ runner_type)
966
+
967
+ return runner_type
968
+
969
+ def _get_default_convert_type(
970
+ self,
971
+ architectures: list[str],
972
+ runner_type: RunnerType,
973
+ ) -> ConvertType:
974
+ registry = self.registry
975
+
976
+ for arch in architectures:
977
+ if arch in registry.get_supported_archs():
978
+ if (runner_type == "generate"
979
+ and registry.is_text_generation_model(
980
+ architectures, self)):
981
+ return "none"
982
+ if (runner_type == "pooling"
983
+ and registry.is_pooling_model(architectures, self)):
984
+ return "none"
985
+
986
+ match = try_match_architecture_defaults(arch,
987
+ runner_type=runner_type)
988
+ if match:
989
+ _, (_, convert_type) = match
990
+ return convert_type
991
+
992
+ # This is to handle Sentence Transformers models that use *ForCausalLM
993
+ # and also multi-modal pooling models which are not defined as
994
+ # Sentence Transformers models
995
+ if runner_type == "pooling":
996
+ return "embed"
997
+
998
+ return "none"
999
+
1000
+ def _get_convert_type(
1001
+ self,
1002
+ architectures: list[str],
1003
+ runner_type: RunnerType,
1004
+ convert: ConvertOption,
1005
+ ) -> ConvertType:
1006
+ if convert != "auto":
1007
+ return convert
1008
+
1009
+ convert_type = self._get_default_convert_type(architectures,
1010
+ runner_type)
1011
+
1012
+ # Don't log the most common case
1013
+ if convert_type != "none":
1014
+ logger.info(
1015
+ "Resolved `--convert auto` to `--convert %s`. "
1016
+ "Pass the value explicitly to silence this message.",
1017
+ convert_type)
1018
+
1019
+ return convert_type
1020
+
1021
+ def _get_supported_generation_tasks(
1022
+ self,
1023
+ architectures: list[str],
1024
+ convert_type: ConvertType,
1025
+ ) -> list[_ResolvedTask]:
1026
+ registry = self.registry
1027
+
1028
+ if registry.is_transcription_only_model(architectures, self):
1029
+ return ["transcription"]
1030
+
1031
+ # TODO: Use get_supported_generation_tasks once V0 is removed
1032
+ supported_tasks = list[_ResolvedTask]()
1033
+ if (registry.is_text_generation_model(architectures, self)
1034
+ or convert_type in _RUNNER_CONVERTS["generate"]):
1035
+ supported_tasks.append("generate")
1036
+
1037
+ if registry.is_transcription_model(architectures, self):
1038
+ supported_tasks.append("transcription")
1039
+
1040
+ return supported_tasks
1041
+
1042
+ def _get_default_pooling_task(
1043
+ self,
1044
+ architectures: list[str],
1045
+ ) -> Literal["embed", "classify", "reward"]:
1046
+ if self.registry.is_cross_encoder_model(architectures, self):
1047
+ return "classify"
1048
+
1049
+ for arch in architectures:
1050
+ match = try_match_architecture_defaults(arch,
1051
+ runner_type="pooling")
1052
+ if match:
1053
+ _, (_, convert_type) = match
1054
+ assert convert_type != "none"
1055
+ return convert_type
1056
+
1057
+ return "embed"
1058
+
1059
+ def _get_supported_pooling_tasks(
1060
+ self,
1061
+ architectures: list[str],
1062
+ convert_type: ConvertType,
1063
+ ) -> list[_ResolvedTask]:
1064
+ registry = self.registry
1065
+
1066
+ # TODO: Use get_supported_pooling_tasks once V0 is removed
1067
+ supported_tasks = list[_ResolvedTask]()
1068
+ if (registry.is_pooling_model(architectures, self)
1069
+ or convert_type in _RUNNER_CONVERTS["pooling"]):
1070
+ supported_tasks.append("encode")
1071
+
1072
+ extra_task = (self._get_default_pooling_task(architectures)
1073
+ if convert_type == "none" else convert_type)
1074
+ supported_tasks.append(extra_task)
1075
+
1076
+ return supported_tasks
1077
+
1078
+ def _get_supported_tasks(
1079
+ self,
1080
+ architectures: list[str],
1081
+ runner_type: RunnerType,
1082
+ convert_type: ConvertType,
1083
+ ) -> list[_ResolvedTask]:
1084
+ if runner_type == "generate":
1085
+ return self._get_supported_generation_tasks(
1086
+ architectures, convert_type)
1087
+ if runner_type == "pooling":
1088
+ return self._get_supported_pooling_tasks(architectures,
1089
+ convert_type)
1090
+ if runner_type == "draft":
1091
+ return ["draft"]
1092
+
1093
+ assert_never(runner_type)
1094
+
1095
+ def _parse_quant_hf_config(self, hf_config: PretrainedConfig):
1096
+ quant_cfg = getattr(hf_config, "quantization_config", None)
1097
+ if quant_cfg is None:
1098
+ # compressed-tensors uses a "compression_config" key
1099
+ quant_cfg = getattr(hf_config, "compression_config", None)
1100
+
1101
+ else:
1102
+ # Set quant_method for ModelOpt models.
1103
+ producer_name = quant_cfg.get("producer", {}).get("name")
1104
+ if producer_name == "modelopt":
1105
+ quant_algo = quant_cfg.get("quantization",
1106
+ {}).get("quant_algo")
1107
+ if quant_algo == "FP8":
1108
+ quant_cfg["quant_method"] = "modelopt"
1109
+ elif quant_algo == "NVFP4":
1110
+ quant_cfg["quant_method"] = "modelopt_fp4"
1111
+ elif quant_algo is not None:
1112
+ raise ValueError(
1113
+ f"Unknown ModelOpt quant algo: {quant_algo}")
1114
+
1115
+ return quant_cfg
1116
+
1117
+ def _verify_quantization(self) -> None:
1118
+ supported_quantization = me_quant.QUANTIZATION_METHODS
1119
+ optimized_quantization_methods = [
1120
+ "fp8",
1121
+ "modelopt",
1122
+ "gptq_marlin_24",
1123
+ "gptq_marlin",
1124
+ "awq_marlin",
1125
+ "fbgemm_fp8",
1126
+ "compressed-tensors",
1127
+ "experts_int8",
1128
+ "quark",
1129
+ "modelopt_fp4",
1130
+ "bitblas",
1131
+ "gptq_bitblas",
1132
+ "inc",
1133
+ "petit_nvfp4",
1134
+ ]
1135
+ if self.quantization is not None:
1136
+ self.quantization = cast(me_quant.QuantizationMethods,
1137
+ self.quantization)
1138
+
1139
+ # Parse quantization method from the HF model config, if available.
1140
+ quant_cfg = self._parse_quant_hf_config(self.hf_config)
1141
+ if quant_cfg is None and (text_config := getattr(
1142
+ self.hf_config, "text_config", None)):
1143
+ # Check the text config as well for multi-modal models.
1144
+ quant_cfg = self._parse_quant_hf_config(text_config)
1145
+
1146
+ if quant_cfg is not None:
1147
+ # Use the community standard 'quant_method'
1148
+ quant_method = quant_cfg.get("quant_method", "").lower()
1149
+
1150
+ # Normalize library names
1151
+ quant_method = quant_method.replace("compressed_tensors",
1152
+ "compressed-tensors")
1153
+
1154
+ quant_cfg["quant_method"] = quant_method
1155
+
1156
+ # Quantization methods which are overrides (i.e. they have a
1157
+ # `override_quantization_method` method) must be checked in order
1158
+ # of preference (this is particularly important for GPTQ).
1159
+ overrides = [
1160
+ "bitblas",
1161
+ "gptq_marlin_24",
1162
+ "gptq_marlin",
1163
+ "gptq_bitblas",
1164
+ "awq_marlin",
1165
+ "ipex",
1166
+ "moe_wna16",
1167
+ "modelopt",
1168
+ "modelopt_fp4",
1169
+ "petit_nvfp4",
1170
+ ]
1171
+ quantization_methods = [
1172
+ q for q in supported_quantization if q not in overrides
1173
+ ]
1174
+ # Any custom overrides will be in quantization_methods so we place
1175
+ # them at the start of the list so custom overrides have preference
1176
+ # over the built-in ones.
1177
+ quantization_methods = quantization_methods + overrides
1178
+
1179
+ # Detect which checkpoint is it
1180
+ for name in quantization_methods:
1181
+ method = me_quant.get_quantization_config(name)
1182
+ quantization_override = method.override_quantization_method(
1183
+ quant_cfg, self.quantization)
1184
+ if quantization_override is not None:
1185
+ # Raise error if the override is not custom (custom would
1186
+ # be in QUANTIZATION_METHODS but not QuantizationMethods)
1187
+ # and hasn't been added to the overrides list.
1188
+ if (name in get_args(me_quant.QuantizationMethods)
1189
+ and name not in overrides):
1190
+ raise ValueError(
1191
+ f"Quantization method {name} is an override but "
1192
+ "is has not been added to the `overrides` list "
1193
+ "above. This is necessary to ensure that the "
1194
+ "overrides are checked in order of preference.")
1195
+ quant_method = quantization_override
1196
+ self.quantization = quantization_override
1197
+ break
1198
+
1199
+ # Verify quantization configurations.
1200
+ if self.quantization is None:
1201
+ self.quantization = quant_method
1202
+ elif self.quantization != quant_method:
1203
+ raise ValueError(
1204
+ "Quantization method specified in the model config "
1205
+ f"({quant_method}) does not match the quantization "
1206
+ f"method specified in the `quantization` argument "
1207
+ f"({self.quantization}).")
1208
+
1209
+ if self.quantization is not None:
1210
+ if self.quantization not in supported_quantization:
1211
+ raise ValueError(
1212
+ f"Unknown quantization method: {self.quantization}. Must "
1213
+ f"be one of {supported_quantization}.")
1214
+ from vllm.platforms import current_platform
1215
+ current_platform.verify_quantization(self.quantization)
1216
+ if self.quantization not in optimized_quantization_methods:
1217
+ logger.warning(
1218
+ "%s quantization is not fully "
1219
+ "optimized yet. The speed can be slower than "
1220
+ "non-quantized models.", self.quantization)
1221
+
1222
+ def _verify_cuda_graph(self) -> None:
1223
+ # The `max_seq_len_to_capture` was incorrectly
1224
+ # based on the encoder's input length (448)
1225
+ # but not the decoder's larger input length (1500).
1226
+ # This change ensures the CUDA Graph captures the correct,
1227
+ # larger sequence length, allowing it to work as intended.
1228
+ effective_max_seq_len = self.max_model_len
1229
+ if self.is_encoder_decoder:
1230
+ effective_max_seq_len = max(
1231
+ effective_max_seq_len,
1232
+ getattr(self.hf_config, "max_source_positions", 0))
1233
+ self.max_seq_len_to_capture = min(self.max_seq_len_to_capture,
1234
+ effective_max_seq_len)
1235
+ # CUDAGraph capture not supported for enc-dec models and mllama on ROCm
1236
+ ROCM_UNSUPPORTED_MODELS = ['mllama']
1237
+ unsupported_rocm = (self.hf_config.model_type
1238
+ in ROCM_UNSUPPORTED_MODELS
1239
+ or self.is_encoder_decoder)
1240
+
1241
+ if (unsupported_rocm and not self.enforce_eager
1242
+ and current_platform.is_rocm()):
1243
+ logger.warning(
1244
+ "CUDA graph is not supported for %s on ROCm yet, fallback "
1245
+ "to eager mode.", self.hf_config.model_type)
1246
+ self.enforce_eager = True
1247
+
1248
+ def _verify_bnb_config(self) -> None:
1249
+ """
1250
+ The current version of bitsandbytes (0.46.1) with 8-bit models does not
1251
+ yet support CUDA graph.
1252
+ # TODO Remove this when bitsandbytes supports.
1253
+ """
1254
+ is_bitsandbytes = self.quantization == "bitsandbytes"
1255
+ has_quantization_config = (getattr(self.hf_config,
1256
+ "quantization_config", None)
1257
+ is not None)
1258
+ is_8bit = (self.hf_config.quantization_config.get(
1259
+ "load_in_8bit", False) if has_quantization_config else False)
1260
+ if all([
1261
+ is_bitsandbytes,
1262
+ has_quantization_config,
1263
+ is_8bit,
1264
+ not self.enforce_eager,
1265
+ ]):
1266
+ logger.warning(
1267
+ "CUDA graph is not supported on BitsAndBytes 8bit yet, "
1268
+ "fallback to the eager mode.")
1269
+
1270
+ self.enforce_eager = True
1271
+
1272
+ def _verify_with_expert_parallelism(self) -> None:
1273
+ num_expert_names = [
1274
+ "moe_num_experts", # Dbrx
1275
+ "num_experts", # Jamba
1276
+ "n_routed_experts", # DeepSeek
1277
+ "num_local_experts", # Mixtral
1278
+ ]
1279
+ num_experts = 0
1280
+ for name in num_expert_names:
1281
+ num_experts = getattr(self.hf_text_config, name, 0)
1282
+ if num_experts > 0:
1283
+ break
1284
+ if num_experts < 1:
1285
+ raise ValueError(
1286
+ "Number of experts in the model must be greater than 0 "
1287
+ "when expert parallelism is enabled.")
1288
+
1289
+ def verify_dual_chunk_attention_config(
1290
+ self,
1291
+ load_config: "LoadConfig",
1292
+ ) -> None:
1293
+ if hasattr(self.hf_config, "dual_chunk_attention_config"):
1294
+ # Try loading the sparse attention config
1295
+ from vllm.model_executor.model_loader.weight_utils import (
1296
+ get_sparse_attention_config)
1297
+ sparse_attn_config = get_sparse_attention_config(self, load_config)
1298
+ if sparse_attn_config:
1299
+ self.hf_config.dual_chunk_attention_config[
1300
+ "sparse_attention_config"] = sparse_attn_config
1301
+ if "sparse_attention_enabled" not in \
1302
+ self.hf_config.dual_chunk_attention_config:
1303
+ self.hf_config.dual_chunk_attention_config[
1304
+ "sparse_attention_enabled"] = True
1305
+
1306
+ if envs.VLLM_ATTENTION_BACKEND != STR_DUAL_CHUNK_FLASH_ATTN_VAL:
1307
+ raise ValueError("please set VLLM_ATTENTION_BACKEND to "
1308
+ f"{STR_DUAL_CHUNK_FLASH_ATTN_VAL}")
1309
+
1310
+ def verify_async_output_proc(self, parallel_config, speculative_config,
1311
+ device_config) -> None:
1312
+ if not self.use_async_output_proc:
1313
+ # Nothing to check
1314
+ return
1315
+
1316
+ if parallel_config.pipeline_parallel_size > 1:
1317
+ self.use_async_output_proc = False
1318
+ return
1319
+
1320
+ # Reminder: Please update docs/features/compatibility_matrix.md
1321
+ # If the feature combo become valid
1322
+ from vllm.platforms import current_platform
1323
+ if not current_platform.is_async_output_supported(self.enforce_eager):
1324
+ self.use_async_output_proc = False
1325
+ return
1326
+
1327
+ if envs.VLLM_USE_RAY_SPMD_WORKER:
1328
+ self.use_async_output_proc = False
1329
+ return
1330
+
1331
+ # Async postprocessor is not necessary for pooling models
1332
+ # since there is no token generation
1333
+ if self.runner_type == "pooling":
1334
+ self.use_async_output_proc = False
1335
+
1336
+ # Reminder: Please update docs/features/compatibility_matrix.md
1337
+ # If the feature combo become valid
1338
+ if speculative_config:
1339
+ self.use_async_output_proc = False
1340
+
1341
+ def verify_with_parallel_config(
1342
+ self,
1343
+ parallel_config: "ParallelConfig",
1344
+ ) -> None:
1345
+
1346
+ if parallel_config.distributed_executor_backend == "external_launcher":
1347
+ assert self.seed is not None, (
1348
+ "Seed must be set when using external launcher backend to "
1349
+ "make sure sampling results are the same across workers.")
1350
+
1351
+ total_num_attention_heads = getattr(self.hf_text_config,
1352
+ "num_attention_heads", 0)
1353
+ tensor_parallel_size = parallel_config.tensor_parallel_size
1354
+ if total_num_attention_heads % tensor_parallel_size != 0:
1355
+ raise ValueError(
1356
+ f"Total number of attention heads ({total_num_attention_heads})"
1357
+ " must be divisible by tensor parallel size "
1358
+ f"({tensor_parallel_size}).")
1359
+
1360
+ if parallel_config.enable_expert_parallel:
1361
+ self._verify_with_expert_parallelism()
1362
+
1363
+ pipeline_parallel_size = parallel_config.pipeline_parallel_size
1364
+ if pipeline_parallel_size > 1:
1365
+ if not self.registry.is_pp_supported_model(self.architectures,
1366
+ self):
1367
+ raise NotImplementedError(
1368
+ "Pipeline parallelism is not supported for this model. "
1369
+ "Supported models implement the `SupportsPP` interface.")
1370
+
1371
+ if self.use_async_output_proc:
1372
+ self.use_async_output_proc = False
1373
+
1374
+ def get_sliding_window(self) -> Optional[int]:
1375
+ """Get the sliding window size from the HF text config if present."""
1376
+ return getattr(self.hf_text_config, "sliding_window", None)
1377
+
1378
+ def get_vocab_size(self) -> int:
1379
+ return getattr(self.hf_text_config, "vocab_size", 0)
1380
+
1381
+ def get_hidden_size(self) -> int:
1382
+ return getattr(self.hf_text_config, "hidden_size", 0)
1383
+
1384
+ @property
1385
+ def is_deepseek_mla(self) -> bool:
1386
+ if not hasattr(self.hf_text_config, "model_type"):
1387
+ return False
1388
+ elif self.hf_text_config.model_type in \
1389
+ ('deepseek_v2', 'deepseek_v3', 'deepseek_mtp', 'kimi_k2'):
1390
+ return self.hf_text_config.kv_lora_rank is not None
1391
+ elif self.hf_text_config.model_type == 'eagle':
1392
+ # if the model is an EAGLE module, check for the
1393
+ # underlying architecture
1394
+ return self.hf_text_config.model.model_type in \
1395
+ ('deepseek_v2', 'deepseek_v3') \
1396
+ and self.hf_text_config.kv_lora_rank is not None
1397
+ return False
1398
+
1399
+ def get_head_size(self) -> int:
1400
+ # TODO remove hard code
1401
+ if self.is_deepseek_mla:
1402
+ qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim",
1403
+ 0)
1404
+ if self.use_mla:
1405
+ return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
1406
+ else:
1407
+ qk_nope_head_dim = getattr(self.hf_text_config,
1408
+ "qk_nope_head_dim", 0)
1409
+ if qk_rope_head_dim and qk_nope_head_dim:
1410
+ return qk_rope_head_dim + qk_nope_head_dim
1411
+
1412
+ if hasattr(self.hf_text_config,
1413
+ "model_type") and (self.hf_text_config.model_type
1414
+ == "zamba2"):
1415
+ return self.hf_text_config.attention_head_dim
1416
+
1417
+ if self.is_attention_free:
1418
+ return 0
1419
+
1420
+ # NOTE: Some configs may set head_dim=None in the config
1421
+ if getattr(self.hf_text_config, "head_dim", None) is not None:
1422
+ return self.hf_text_config.head_dim
1423
+
1424
+ # NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head`
1425
+ if getattr(self.hf_text_config, "hidden_size_per_head",
1426
+ None) is not None:
1427
+ return self.hf_text_config.hidden_size_per_head
1428
+
1429
+ # FIXME(woosuk): This may not be true for all models.
1430
+ return (self.hf_text_config.hidden_size //
1431
+ self.hf_text_config.num_attention_heads)
1432
+
1433
+ def get_total_num_kv_heads(self) -> int:
1434
+ """Returns the total number of KV heads."""
1435
+ # For GPTBigCode & Falcon:
1436
+ # NOTE: for falcon, when new_decoder_architecture is True, the
1437
+ # multi_query flag is ignored and we use n_head_kv for the number of
1438
+ # KV heads.
1439
+ falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
1440
+ new_decoder_arch_falcon = (
1441
+ self.hf_config.model_type in falcon_model_types
1442
+ and getattr(self.hf_config, "new_decoder_architecture", False))
1443
+ if not new_decoder_arch_falcon and getattr(self.hf_text_config,
1444
+ "multi_query", False):
1445
+ # Multi-query attention, only one KV head.
1446
+ # Currently, tensor parallelism is not supported in this case.
1447
+ return 1
1448
+
1449
+ # For DBRX and MPT
1450
+ if self.hf_config.model_type == "mpt":
1451
+ if "kv_n_heads" in self.hf_config.attn_config:
1452
+ return self.hf_config.attn_config["kv_n_heads"]
1453
+ return self.hf_config.num_attention_heads
1454
+ if self.hf_config.model_type == "dbrx":
1455
+ return getattr(self.hf_config.attn_config, "kv_n_heads",
1456
+ self.hf_config.num_attention_heads)
1457
+
1458
+ if self.hf_config.model_type == "nemotron-nas":
1459
+ for block in self.hf_config.block_configs:
1460
+ if not block.attention.no_op:
1461
+ return self.hf_config.num_attention_heads \
1462
+ // block.attention.n_heads_in_group
1463
+
1464
+ raise RuntimeError("Couldn't determine number of kv heads")
1465
+
1466
+ if self.is_attention_free:
1467
+ return 0
1468
+
1469
+ attributes = [
1470
+ # For Falcon:
1471
+ "n_head_kv",
1472
+ "num_kv_heads",
1473
+ # For LLaMA-2:
1474
+ "num_key_value_heads",
1475
+ # For ChatGLM:
1476
+ "multi_query_group_num",
1477
+ ]
1478
+ for attr in attributes:
1479
+ num_kv_heads = getattr(self.hf_text_config, attr, None)
1480
+ if num_kv_heads is not None:
1481
+ return num_kv_heads
1482
+
1483
+ # For non-grouped-query attention models, the number of KV heads is
1484
+ # equal to the number of attention heads.
1485
+ return self.hf_text_config.num_attention_heads
1486
+
1487
+ def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int:
1488
+ """Returns the number of KV heads per GPU."""
1489
+ if self.use_mla:
1490
+ # When using MLA during decode it becomes MQA
1491
+ return 1
1492
+
1493
+ total_num_kv_heads = self.get_total_num_kv_heads()
1494
+ # If tensor parallelism is used, we divide the number of KV heads by
1495
+ # the tensor parallel size. We will replicate the KV heads in the
1496
+ # case where the number of KV heads is smaller than the tensor
1497
+ # parallel size so each GPU has at least one KV head.
1498
+ return max(1,
1499
+ total_num_kv_heads // parallel_config.tensor_parallel_size)
1500
+
1501
+ def get_num_attention_heads(self,
1502
+ parallel_config: "ParallelConfig") -> int:
1503
+ num_heads = getattr(self.hf_text_config, "num_attention_heads", 0)
1504
+ return num_heads // parallel_config.tensor_parallel_size
1505
+
1506
+ def get_layers_start_end_indices(
1507
+ self, parallel_config: "ParallelConfig") -> tuple[int, int]:
1508
+ from vllm.distributed.utils import get_pp_indices
1509
+ if (self.hf_text_config.model_type == "deepseek_mtp"
1510
+ or self.hf_config.model_type == "mimo_mtp"
1511
+ or self.hf_config.model_type == "glm4_moe_mtp"
1512
+ or self.hf_config.model_type == "ernie_mtp"
1513
+ or self.hf_config.model_type == "qwen3_next_mtp"):
1514
+ total_num_hidden_layers = getattr(self.hf_text_config,
1515
+ "num_nextn_predict_layers", 0)
1516
+ else:
1517
+ total_num_hidden_layers = getattr(self.hf_text_config,
1518
+ "num_hidden_layers", 0)
1519
+ # the layout order is: DP x PP x TP
1520
+ pp_rank = (parallel_config.rank // parallel_config.tensor_parallel_size
1521
+ ) % parallel_config.pipeline_parallel_size
1522
+ pp_size = parallel_config.pipeline_parallel_size
1523
+ start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
1524
+ return start, end
1525
+
1526
+ def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
1527
+ start, end = self.get_layers_start_end_indices(parallel_config)
1528
+ return end - start
1529
+
1530
+ def get_num_layers_by_block_type(
1531
+ self,
1532
+ parallel_config: "ParallelConfig",
1533
+ block_type: LayerBlockType = LayerBlockType.attention,
1534
+ ) -> int:
1535
+ # This function relies on 'layers_block_type' in hf_config,
1536
+ # for w/o this attribute, we will need to have workarounds like so
1537
+ attn_block_type = block_type == LayerBlockType.attention
1538
+ is_transformer = not self.is_hybrid and \
1539
+ not self.has_noops and \
1540
+ not self.is_attention_free
1541
+ start, end = self.get_layers_start_end_indices(parallel_config)
1542
+
1543
+ if is_transformer:
1544
+ # Handle the basic case first
1545
+ return end - start if attn_block_type else 0
1546
+ elif self.is_attention_free:
1547
+ # Attention free
1548
+ # Note that this code assumes there
1549
+ # is only one type of attention-free block type.
1550
+ return 0 if attn_block_type else end - start
1551
+ elif self.has_noops:
1552
+ block_configs = self.hf_config.block_configs
1553
+ return sum(not bc.attention.no_op
1554
+ for bc in block_configs[start:end])
1555
+ else:
1556
+ # Hybrid model Jamba
1557
+ layers_block_type_value = getattr(self.hf_text_config,
1558
+ "layers_block_type", None)
1559
+ if layers_block_type_value is not None:
1560
+ if hasattr(self.hf_text_config,
1561
+ "model_type") and (self.hf_text_config.model_type
1562
+ == "zamba2"):
1563
+ if attn_block_type:
1564
+ return sum(t == "hybrid"
1565
+ for t in layers_block_type_value[start:end])
1566
+ else:
1567
+ return self.get_num_layers(parallel_config)
1568
+ return sum(t == block_type.value
1569
+ for t in layers_block_type_value[start:end])
1570
+
1571
+ # Hybrid model Minimax
1572
+ attn_type_list = getattr(self.hf_config, "attn_type_list", None)
1573
+ if attn_type_list:
1574
+ return sum(t == 1 for t in attn_type_list[start:end])
1575
+
1576
+ # Hybrid model Qwen3Next
1577
+ layer_types_value = getattr(self.hf_config, "layer_types", None)
1578
+ if layer_types_value is not None:
1579
+ if getattr(block_type, "value", block_type) == "attention":
1580
+ return sum(t == "full_attention"
1581
+ for t in layer_types_value[start:end])
1582
+ elif getattr(block_type, "value",
1583
+ block_type) == "linear_attention":
1584
+ return sum(t == "linear_attention"
1585
+ for t in layer_types_value[start:end])
1586
+ else:
1587
+ return sum(t == getattr(block_type, "value", block_type)
1588
+ for t in layer_types_value[start:end])
1589
+
1590
+ if (layers_block_type_value is None and attn_type_list is None
1591
+ and layer_types_value is None):
1592
+ raise ValueError(
1593
+ "The model is an hybrid without a"
1594
+ "layers_block_type or an attn_type_list, or a layer_types "
1595
+ "in the hf_config, cannot determine the num of "
1596
+ f"{block_type.value} layers")
1597
+
1598
+ def get_mamba_chunk_size(self) -> Optional[int]:
1599
+ """
1600
+ Returns the mamba chunk size if it exists
1601
+ """
1602
+ # used by e.g. Bamba, FalconH1, Granite, PLaMo2
1603
+ chunk_size = getattr(self.hf_text_config, "mamba_chunk_size", None)
1604
+ if chunk_size is None:
1605
+ # used by e.g. Mamba2, NemotronH, Zamba
1606
+ chunk_size = getattr(self.hf_text_config, "chunk_size", None)
1607
+ return chunk_size
1608
+
1609
+ def get_multimodal_config(self) -> "MultiModalConfig":
1610
+ """
1611
+ Get the multimodal configuration of the model.
1612
+
1613
+ Raises:
1614
+ ValueError: If the model is not multimodal.
1615
+ """
1616
+ if self.multimodal_config is None:
1617
+ raise ValueError("The model is not multimodal.")
1618
+
1619
+ return self.multimodal_config
1620
+
1621
+ def try_get_generation_config(self) -> dict[str, Any]:
1622
+ """
1623
+ This method attempts to retrieve the non-default values of the
1624
+ generation config for this model.
1625
+
1626
+ The generation config can contain information about special tokens, as
1627
+ well as sampling parameters. Which is why this method exists separately
1628
+ to `get_diff_sampling_param`.
1629
+
1630
+ Returns:
1631
+ A dictionary containing the non-default generation config.
1632
+ """
1633
+ if self.generation_config in {"auto", "vllm"}:
1634
+ config = try_get_generation_config(
1635
+ self.hf_config_path or self.model,
1636
+ trust_remote_code=self.trust_remote_code,
1637
+ revision=self.revision,
1638
+ )
1639
+ else:
1640
+ config = try_get_generation_config(
1641
+ self.generation_config,
1642
+ trust_remote_code=self.trust_remote_code,
1643
+ )
1644
+
1645
+ if config is None:
1646
+ return {}
1647
+
1648
+ return config.to_diff_dict()
1649
+
1650
+ def get_diff_sampling_param(self) -> dict[str, Any]:
1651
+ """
1652
+ This method returns a dictionary containing the non-default sampling
1653
+ parameters with `override_generation_config` applied.
1654
+
1655
+ The default sampling parameters are:
1656
+
1657
+ - vLLM's neutral defaults if `self.generation_config="vllm"`
1658
+ - the model's defaults if `self.generation_config="auto"`
1659
+ - as defined in `generation_config.json` if
1660
+ `self.generation_config="path/to/generation_config/dir"`
1661
+
1662
+ Returns:
1663
+ A dictionary containing the non-default sampling parameters.
1664
+ """
1665
+ if self.generation_config == "vllm":
1666
+ config = {}
1667
+ else:
1668
+ config = self.try_get_generation_config()
1669
+
1670
+ # Overriding with given generation config
1671
+ config.update(self.override_generation_config)
1672
+
1673
+ available_params = [
1674
+ "repetition_penalty",
1675
+ "temperature",
1676
+ "top_k",
1677
+ "top_p",
1678
+ "min_p",
1679
+ "max_new_tokens",
1680
+ ]
1681
+ if any(p in config for p in available_params):
1682
+ diff_sampling_param = {
1683
+ p: config.get(p)
1684
+ for p in available_params if config.get(p) is not None
1685
+ }
1686
+ # Huggingface definition of max_new_tokens is equivalent
1687
+ # to vLLM's max_tokens
1688
+ if "max_new_tokens" in diff_sampling_param:
1689
+ diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
1690
+ "max_new_tokens")
1691
+ else:
1692
+ diff_sampling_param = {}
1693
+
1694
+ if diff_sampling_param:
1695
+ logger.warning_once(
1696
+ "Default sampling parameters have been overridden by the "
1697
+ "model's Hugging Face generation config recommended from the "
1698
+ "model creator. If this is not intended, please relaunch "
1699
+ "vLLM instance with `--generation-config vllm`.")
1700
+ return diff_sampling_param
1701
+
1702
+ @property
1703
+ def is_encoder_decoder(self) -> bool:
1704
+ """Extract the HF encoder/decoder model flag."""
1705
+ """
1706
+ For Mllama, VLLM overrides HF's is_encoder_decoder flag and sets it to
1707
+ True to enable cross-attention
1708
+ """
1709
+ return is_encoder_decoder(self.hf_config)
1710
+
1711
+ @property
1712
+ def uses_mrope(self) -> bool:
1713
+ return uses_mrope(self.hf_config)
1714
+
1715
+ @property
1716
+ def is_multimodal_model(self) -> bool:
1717
+ return self.multimodal_config is not None
1718
+
1719
+ @property
1720
+ def is_multimodal_raw_input_only_model(self) -> bool:
1721
+ return self._model_info.supports_multimodal_raw_input_only
1722
+
1723
+ @property
1724
+ def is_cross_encoder(self) -> bool:
1725
+ return (self._model_info.supports_cross_encoding
1726
+ or self.convert_type == "classify")
1727
+
1728
+ @property
1729
+ def is_pp_supported(self) -> bool:
1730
+ return self._model_info.supports_pp
1731
+
1732
+ @property
1733
+ def is_attention_free(self) -> bool:
1734
+ return self._model_info.is_attention_free
1735
+
1736
+ @property
1737
+ def is_hybrid(self) -> bool:
1738
+ return self._model_info.is_hybrid
1739
+
1740
+ @property
1741
+ def has_noops(self) -> bool:
1742
+ return self._model_info.has_noops
1743
+
1744
+ @property
1745
+ def has_inner_state(self):
1746
+ return self._model_info.has_inner_state
1747
+
1748
+ @property
1749
+ def is_v1_compatible(self) -> bool:
1750
+ return not self._model_info.supports_v0_only
1751
+
1752
+ @property
1753
+ def use_mla(self) -> bool:
1754
+ return self.is_deepseek_mla and not envs.VLLM_MLA_DISABLE
1755
+
1756
+ @property
1757
+ def is_matryoshka(self) -> bool:
1758
+ return (bool(getattr(self.hf_config, "matryoshka_dimensions", None))
1759
+ or getattr(self.hf_config, "is_matryoshka", False))
1760
+
1761
+ @property
1762
+ def matryoshka_dimensions(self):
1763
+ return getattr(self.hf_config, "matryoshka_dimensions", None)
1764
+
1765
+ @property
1766
+ def use_pad_token(self) -> bool:
1767
+ # cross_encoder models defaults to using pad_token.
1768
+ # `llm as reranker` models defaults to not using pad_token.
1769
+ return getattr(self.hf_config, "use_pad_token", True)
1770
+
1771
+ @property
1772
+ def head_dtype(self) -> torch.dtype:
1773
+ """
1774
+ "head" refers to the last Linear layer(s) of an LLM,
1775
+ such as the lm_head in a generation model,
1776
+ or the score or classifier in a classification model.
1777
+
1778
+ The default head_dtype based on runner_type.\n
1779
+ - The pooling model defaults to using fp32 head,
1780
+ you can use --hf-overrides '{"head_dtype": "model"}' to disable it.\n
1781
+ - The generate model defaults to not using fp32 head,
1782
+ you can use --hf-overrides '{"head_dtype": "float32"}' to enable it.
1783
+ """
1784
+ head_dtype = _get_head_dtype(config=self.hf_config,
1785
+ dtype=self.dtype,
1786
+ runner_type=self.runner_type)
1787
+
1788
+ if head_dtype not in current_platform.supported_dtypes:
1789
+ logger.warning_once(
1790
+ "The current platform does not support [%s] head dtype, "
1791
+ "fallback to model dtype [%s].", head_dtype, self.dtype)
1792
+ return self.dtype
1793
+
1794
+ logger.debug_once("head dtype: %s", head_dtype)
1795
+ return head_dtype
1796
+
1797
+ def get_and_verify_max_len(self, max_model_len: int):
1798
+ # Consider max_model_len in tokenizer_config only when
1799
+ # pooling models use absolute position_embedding.
1800
+ tokenizer_config = None
1801
+ if (self.runner_type == "pooling" and getattr(
1802
+ self.hf_config, "position_embedding_type", "") == "absolute"):
1803
+ tokenizer_config = try_get_tokenizer_config(
1804
+ self.tokenizer,
1805
+ trust_remote_code=self.trust_remote_code,
1806
+ revision=self.tokenizer_revision)
1807
+ max_model_len = _get_and_verify_max_len(
1808
+ hf_config=self.hf_text_config,
1809
+ tokenizer_config=tokenizer_config,
1810
+ max_model_len=max_model_len,
1811
+ disable_sliding_window=self.disable_sliding_window,
1812
+ sliding_window=self.get_sliding_window(),
1813
+ spec_target_max_model_len=self.spec_target_max_model_len,
1814
+ encoder_config=self.encoder_config)
1815
+ logger.info("Using max model len %s", max_model_len)
1816
+ return max_model_len
1817
+
1818
+
1819
+ Device = Literal["auto", "cuda", "cpu", "tpu", "xpu"]
1820
+
1821
+
1822
+ @config
1823
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
1824
+ class DeviceConfig:
1825
+ """Configuration for the device to use for vLLM execution."""
1826
+
1827
+ device: SkipValidation[Optional[Union[Device, torch.device]]] = "auto"
1828
+ """Device type for vLLM execution.
1829
+ This parameter is deprecated and will be
1830
+ removed in a future release.
1831
+ It will now be set automatically based
1832
+ on the current platform."""
1833
+ device_type: str = field(init=False)
1834
+ """Device type from the current platform. This is set in
1835
+ `__post_init__`."""
1836
+
1837
+ def compute_hash(self) -> str:
1838
+ """
1839
+ WARNING: Whenever a new field is added to this config,
1840
+ ensure that it is included in the factors list if
1841
+ it affects the computation graph.
1842
+
1843
+ Provide a hash that uniquely identifies all the configs
1844
+ that affect the structure of the computation
1845
+ graph from input ids/embeddings to the final hidden states,
1846
+ excluding anything before input ids/embeddings and after
1847
+ the final hidden states.
1848
+ """
1849
+ # no factors to consider.
1850
+ # the device/platform information will be summarized
1851
+ # by torch/vllm automatically.
1852
+ factors: list[Any] = []
1853
+ hash_str = hashlib.md5(str(factors).encode(),
1854
+ usedforsecurity=False).hexdigest()
1855
+ return hash_str
1856
+
1857
+ def __post_init__(self):
1858
+ if self.device == "auto":
1859
+ # Automated device type detection
1860
+ from vllm.platforms import current_platform
1861
+ self.device_type = current_platform.device_type
1862
+ if not self.device_type:
1863
+ raise RuntimeError(
1864
+ "Failed to infer device type, please set "
1865
+ "the environment variable `VLLM_LOGGING_LEVEL=DEBUG` "
1866
+ "to turn on verbose logging to help debug the issue.")
1867
+ else:
1868
+ # Device type is assigned explicitly
1869
+ if isinstance(self.device, str):
1870
+ self.device_type = self.device
1871
+ elif isinstance(self.device, torch.device):
1872
+ self.device_type = self.device.type
1873
+
1874
+ # Some device types require processing inputs on CPU
1875
+ if self.device_type in ["tpu"]:
1876
+ self.device = None
1877
+ else:
1878
+ # Set device with device type
1879
+ self.device = torch.device(self.device_type)
1880
+
1881
+
1882
+ SpeculativeMethod = Literal["ngram", "eagle", "eagle3", "medusa",
1883
+ "mlp_speculator", "draft_model", "deepseek_mtp",
1884
+ "ernie_mtp", "qwen3_next_mtp"]
1885
+
1886
+
1887
+ @config
1888
+ @dataclass
1889
+ class SpeculativeConfig:
1890
+ """Configuration for speculative decoding."""
1891
+
1892
+ # General speculative decoding control
1893
+ num_speculative_tokens: SkipValidation[int] = None # type: ignore
1894
+ """The number of speculative tokens, if provided. It will default to the
1895
+ number in the draft model config if present, otherwise, it is required."""
1896
+ model: Optional[str] = None
1897
+ """The name of the draft model, eagle head, or additional weights, if
1898
+ provided."""
1899
+ method: Optional[SpeculativeMethod] = None
1900
+ """The name of the speculative method to use. If users provide and set the
1901
+ `model` param, the speculative method type will be detected automatically
1902
+ if possible, if `model` param is not provided, the method name must be
1903
+ provided.
1904
+
1905
+ If using `ngram` method, the related configuration `prompt_lookup_max` and
1906
+ `prompt_lookup_min` should be considered."""
1907
+ draft_tensor_parallel_size: Optional[int] = None
1908
+ """The degree of the tensor parallelism for the draft model. Can only be 1
1909
+ or the same as the target model's tensor parallel size."""
1910
+ disable_logprobs: bool = True
1911
+ """If set to True, token log probabilities are not returned during
1912
+ speculative decoding. If set to False, token log probabilities are returned
1913
+ according to the log probability settings in SamplingParams."""
1914
+
1915
+ # Draft model configuration
1916
+ quantization: Optional[me_quant.QuantizationMethods] = None
1917
+ """Quantization method that was used to quantize the draft model weights.
1918
+ If `None`, we assume the model weights are not quantized. Note that it only
1919
+ takes effect when using the draft model-based speculative method."""
1920
+ max_model_len: Optional[int] = None
1921
+ """The maximum model length of the draft model. Used when testing the
1922
+ ability to skip speculation for some sequences."""
1923
+ revision: Optional[str] = None
1924
+ """The specific model version to use for the draft model. It can be a
1925
+ branch name, a tag name, or a commit id. If unspecified, will use the
1926
+ default version."""
1927
+ code_revision: Optional[str] = None
1928
+ """The specific revision to use for the draft model code on Hugging Face
1929
+ Hub. It can be a branch name, a tag name, or a commit id. If unspecified,
1930
+ will use the default version."""
1931
+
1932
+ # Advanced control
1933
+ disable_by_batch_size: Optional[int] = None
1934
+ """Disable speculative decoding for new incoming requests when the number
1935
+ of enqueued requests is larger than this value, if provided."""
1936
+
1937
+ # Ngram proposer configuration
1938
+ prompt_lookup_max: Optional[int] = None
1939
+ """Maximum size of ngram token window when using Ngram proposer, required
1940
+ when method is set to ngram."""
1941
+ prompt_lookup_min: Optional[int] = None
1942
+ """Minimum size of ngram token window when using Ngram proposer, if
1943
+ provided. Defaults to 1."""
1944
+
1945
+ speculative_token_tree: Optional[str] = None
1946
+ """Specifies the tree structure for speculative token generation.
1947
+ """
1948
+ # required configuration params passed from engine
1949
+ target_model_config: SkipValidation[ModelConfig] = None # type: ignore
1950
+ """The configuration of the target model."""
1951
+ target_parallel_config: SkipValidation[
1952
+ ParallelConfig] = None # type: ignore
1953
+ """The parallel configuration for the target model."""
1954
+ enable_chunked_prefill: SkipValidation[bool] = None # type: ignore
1955
+ """Whether vLLM is configured to use chunked prefill or not. Used for
1956
+ raising an error since it's not yet compatible with speculative decode."""
1957
+ disable_log_stats: SkipValidation[bool] = None # type: ignore
1958
+ """Whether to disable the periodic printing of stage times in speculative
1959
+ decoding."""
1960
+
1961
+ # params generated in the post-init stage
1962
+ draft_model_config: SkipValidation[ModelConfig] = None # type: ignore
1963
+ """The configuration of the draft model initialized internal."""
1964
+ draft_parallel_config: SkipValidation[
1965
+ ParallelConfig] = None # type: ignore
1966
+ """The parallel configuration for the draft model initialized internal."""
1967
+
1968
+ def compute_hash(self) -> str:
1969
+ """
1970
+ WARNING: Whenever a new field is added to this config,
1971
+ ensure that it is included in the factors list if
1972
+ it affects the computation graph.
1973
+
1974
+ Provide a hash that uniquely identifies all the configs
1975
+ that affect the structure of the computation
1976
+ graph from input ids/embeddings to the final hidden states,
1977
+ excluding anything before input ids/embeddings and after
1978
+ the final hidden states.
1979
+ """
1980
+ factors: list[Any] = []
1981
+ # Eagle3 affects the computation graph because it returns intermediate
1982
+ # hidden states in addition to the final hidden state.
1983
+ factors.append(self.method == "eagle3")
1984
+ hash_str = hashlib.md5(str(factors).encode(),
1985
+ usedforsecurity=False).hexdigest()
1986
+ return hash_str
1987
+
1988
+ @staticmethod
1989
+ def hf_config_override(hf_config: PretrainedConfig) -> PretrainedConfig:
1990
+ if hf_config.model_type == "deepseek_v3":
1991
+ hf_config.model_type = "deepseek_mtp"
1992
+ if hf_config.model_type == "deepseek_mtp":
1993
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
1994
+ hf_config.update({
1995
+ "n_predict": n_predict,
1996
+ "architectures": ["DeepSeekMTPModel"]
1997
+ })
1998
+
1999
+ if hf_config.architectures[0] == "MiMoForCausalLM":
2000
+ hf_config.model_type = "mimo_mtp"
2001
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
2002
+ hf_config.update({
2003
+ "num_hidden_layers": 0,
2004
+ "n_predict": n_predict,
2005
+ "architectures": ["MiMoMTPModel"]
2006
+ })
2007
+
2008
+ if hf_config.architectures[0] == "Glm4MoeForCausalLM":
2009
+ hf_config.model_type = "glm4_moe_mtp"
2010
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
2011
+ hf_config.update({
2012
+ "num_hidden_layers": 0,
2013
+ "n_predict": n_predict,
2014
+ "architectures": ["Glm4MoeMTPModel"]
2015
+ })
2016
+
2017
+ if hf_config.model_type == "ernie4_5_moe":
2018
+ hf_config.model_type = "ernie_mtp"
2019
+ if hf_config.model_type == "ernie_mtp":
2020
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
2021
+ hf_config.update({
2022
+ "n_predict": n_predict,
2023
+ "architectures": ["ErnieMTPModel"]
2024
+ })
2025
+
2026
+ if hf_config.model_type == "qwen3_next":
2027
+ hf_config.model_type = "qwen3_next_mtp"
2028
+ if hf_config.model_type == "qwen3_next_mtp":
2029
+ n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
2030
+ hf_config.update({
2031
+ "n_predict": n_predict,
2032
+ "architectures": ["Qwen3NextMTP"]
2033
+ })
2034
+
2035
+ return hf_config
2036
+
2037
+ def __post_init__(self):
2038
+
2039
+ # Note: "method" is a new parameter that helps to extend the
2040
+ # configuration of non-model-based proposers, and the "model" parameter
2041
+ # will be used to set the draft model, eagle head, or additional weight
2042
+ # when needed. If users do not specify "method", the speculative method
2043
+ # will be detected automatically if possible. If the speculative method
2044
+ # can not be detected, it will be considered as the "draft_model" by
2045
+ # default.
2046
+
2047
+ if self.model is None and self.num_speculative_tokens is not None:
2048
+ # TODO(Shangming): Refactor mtp configuration logic when supporting
2049
+ # mtp acceleration for more models besides deepseek_v3
2050
+ if self.target_model_config and \
2051
+ (self.target_model_config.hf_text_config.model_type \
2052
+ == "deepseek_v3" or
2053
+ self.target_model_config.hf_text_config.model_type in
2054
+ ("mimo","ernie4_5_moe", "qwen3_next")):
2055
+ # use the draft model from the same model:
2056
+ self.model = self.target_model_config.model
2057
+ # Align the quantization of draft model for cases such as
2058
+ # --quantization fp8 with a bf16 checkpoint.
2059
+ if not self.quantization:
2060
+ self.quantization = self.target_model_config.quantization
2061
+ elif self.method in ("ngram", "[ngram]"):
2062
+ self.model = "ngram"
2063
+ else:
2064
+ raise ValueError("num_speculative_tokens was provided without "
2065
+ "speculative model.")
2066
+
2067
+ # Automatically configure the method for ngram when "model" is used
2068
+ # instead of "method"
2069
+ if self.method is None and (self.model is not None
2070
+ and self.model in ("ngram", "[ngram]")):
2071
+ self.method = "ngram"
2072
+
2073
+ if self.method in ("ngram", "[ngram]"):
2074
+ # Unified to "ngram" internally
2075
+ self.method = "ngram"
2076
+ # Set default values if not provided
2077
+ if (self.prompt_lookup_min is None
2078
+ and self.prompt_lookup_max is None):
2079
+ # TODO(woosuk): Tune these values. They are arbitrarily chosen.
2080
+ self.prompt_lookup_min = 5
2081
+ self.prompt_lookup_max = 5
2082
+ elif self.prompt_lookup_min is None:
2083
+ assert self.prompt_lookup_max is not None
2084
+ self.prompt_lookup_min = self.prompt_lookup_max
2085
+ elif self.prompt_lookup_max is None:
2086
+ assert self.prompt_lookup_min is not None
2087
+ self.prompt_lookup_max = self.prompt_lookup_min
2088
+
2089
+ # Validate values
2090
+ if self.prompt_lookup_min < 1:
2091
+ raise ValueError(
2092
+ f"prompt_lookup_min={self.prompt_lookup_min} must be > 0")
2093
+ if self.prompt_lookup_max < 1:
2094
+ raise ValueError(
2095
+ f"prompt_lookup_max={self.prompt_lookup_max} must be > 0")
2096
+ if self.prompt_lookup_min > self.prompt_lookup_max:
2097
+ raise ValueError(
2098
+ f"prompt_lookup_min={self.prompt_lookup_min} must "
2099
+ f"be <= prompt_lookup_max={self.prompt_lookup_max}")
2100
+
2101
+ # TODO: current we still need extract vocab_size from target model
2102
+ # config, in future, we may try refactor it out, and set
2103
+ # draft related config as None here.
2104
+ self.draft_model_config = self.target_model_config
2105
+ self.draft_parallel_config = self.target_parallel_config
2106
+ else:
2107
+ self.prompt_lookup_max = 0
2108
+ self.prompt_lookup_min = 0
2109
+
2110
+ if self.model is not None:
2111
+ self.draft_model_config = ModelConfig(
2112
+ model=self.model,
2113
+ runner="draft",
2114
+ tokenizer=self.target_model_config.tokenizer,
2115
+ tokenizer_mode=self.target_model_config.tokenizer_mode,
2116
+ trust_remote_code=self.target_model_config.
2117
+ trust_remote_code,
2118
+ allowed_local_media_path=self.target_model_config.
2119
+ allowed_local_media_path,
2120
+ dtype=self.target_model_config.dtype,
2121
+ seed=self.target_model_config.seed,
2122
+ revision=self.revision,
2123
+ code_revision=self.code_revision,
2124
+ tokenizer_revision=self.target_model_config.
2125
+ tokenizer_revision,
2126
+ spec_target_max_model_len=self.target_model_config.
2127
+ max_model_len,
2128
+ quantization=self.quantization,
2129
+ enforce_eager=self.target_model_config.enforce_eager,
2130
+ max_seq_len_to_capture=self.target_model_config.
2131
+ max_seq_len_to_capture,
2132
+ max_logprobs=self.target_model_config.max_logprobs,
2133
+ hf_overrides=SpeculativeConfig.hf_config_override,
2134
+ )
2135
+
2136
+ # Automatically detect the method
2137
+ if self.method in ('eagle', 'eagle3'):
2138
+ pass
2139
+ # examples:
2140
+ # yuhuili/EAGLE-LLaMA3-Instruct-8B
2141
+ # yuhuili/EAGLE3-LLaMA3.1-Instruct-8B
2142
+ # AngelSlim/Qwen3-8B_eagle3
2143
+ elif "eagle-" in self.draft_model_config.model.lower():
2144
+ self.method = "eagle"
2145
+ elif "eagle3" in self.draft_model_config.model.lower():
2146
+ self.method = "eagle3"
2147
+ elif self.draft_model_config.hf_config.model_type == "medusa":
2148
+ self.method = "medusa"
2149
+ elif (self.draft_model_config.hf_config.model_type ==
2150
+ "mlp_speculator"):
2151
+ self.method = "mlp_speculator"
2152
+ elif (self.draft_model_config.hf_config.model_type
2153
+ in ("deepseek_mtp", "mimo_mtp", "glm4_moe_mtp")):
2154
+ self.method = "deepseek_mtp"
2155
+ if self.num_speculative_tokens > 1:
2156
+ logger.warning(
2157
+ "All Deepseek MTP models only have " \
2158
+ "one layer. Might need some code changes " \
2159
+ "to support multiple layers."
2160
+ )
2161
+ elif (self.draft_model_config.hf_config.model_type ==
2162
+ "ernie_mtp"):
2163
+ self.method = "ernie_mtp"
2164
+ if self.num_speculative_tokens > 1:
2165
+ logger.warning(
2166
+ "All Ernie MTP models only have " \
2167
+ "one layer. Might need some code changes " \
2168
+ "to support multiple layers."
2169
+ )
2170
+ elif (self.draft_model_config.hf_config.model_type ==
2171
+ "qwen3_next_mtp"):
2172
+ self.method = "qwen3_next_mtp"
2173
+ if self.num_speculative_tokens > 1:
2174
+ logger.warning(
2175
+ "All Qwen3Next MTP models only have " \
2176
+ "one layer. Might need some code changes " \
2177
+ "to support multiple layers."
2178
+ )
2179
+ else:
2180
+ self.method = "draft_model"
2181
+ raise NotImplementedError(
2182
+ "Speculative decoding with draft model is not "
2183
+ "supported yet. Please consider using other "
2184
+ "speculative decoding methods such as ngram, medusa, "
2185
+ "eagle, or deepseek_mtp.")
2186
+
2187
+ # Replace hf_config for EAGLE draft_model
2188
+ if self.method in ("eagle", "eagle3"):
2189
+ if self.enable_chunked_prefill and not envs.VLLM_USE_V1:
2190
+ raise ValueError(
2191
+ "Chunked prefill and EAGLE are not compatible "
2192
+ "when using V0.")
2193
+
2194
+ from vllm.transformers_utils.configs import (
2195
+ SpeculatorsConfig)
2196
+ from vllm.transformers_utils.configs.eagle import (
2197
+ EAGLEConfig)
2198
+
2199
+ if isinstance(self.draft_model_config.hf_config,
2200
+ (EAGLEConfig, SpeculatorsConfig)):
2201
+ pass
2202
+ else:
2203
+ eagle_config = EAGLEConfig(
2204
+ self.draft_model_config.hf_config,
2205
+ method=self.method,
2206
+ model_type="eagle")
2207
+ self.draft_model_config.hf_config = eagle_config
2208
+
2209
+ if (self.num_speculative_tokens is not None
2210
+ and hasattr(self.draft_model_config.hf_config,
2211
+ "num_lookahead_tokens")):
2212
+ self.draft_model_config.hf_config.num_lookahead_tokens = \
2213
+ self.num_speculative_tokens
2214
+
2215
+ n_predict = getattr(self.draft_model_config.hf_config,
2216
+ "n_predict", None)
2217
+ if n_predict is not None:
2218
+ if self.num_speculative_tokens is None:
2219
+ # Default to max value defined in draft model config.
2220
+ self.num_speculative_tokens = n_predict
2221
+ elif self.num_speculative_tokens > n_predict and \
2222
+ self.num_speculative_tokens % n_predict != 0:
2223
+ # Ensure divisibility for MTP module reuse.
2224
+ raise ValueError(
2225
+ f"num_speculative_tokens:{self.num_speculative_tokens}"
2226
+ f" must be divisible by {n_predict=}")
2227
+
2228
+ if self.speculative_token_tree is None:
2229
+ # Generate chain of tokens.
2230
+ self.speculative_token_tree = str([
2231
+ (i + 1) * (0, )
2232
+ for i in range(self.num_speculative_tokens)
2233
+ ])
2234
+ else:
2235
+ # Sort the token tree breadth-first.
2236
+ tree_choices = ast.literal_eval(
2237
+ self.speculative_token_tree)
2238
+ self.speculative_token_tree = str(
2239
+ sorted(tree_choices, key=lambda t: (len(t), t)))
2240
+
2241
+ self.draft_tensor_parallel_size = \
2242
+ SpeculativeConfig._verify_and_get_draft_tp(
2243
+ self.target_parallel_config,
2244
+ self.draft_tensor_parallel_size,
2245
+ self.draft_model_config.hf_config
2246
+ )
2247
+
2248
+ self.draft_model_config.max_model_len = (
2249
+ SpeculativeConfig._maybe_override_draft_max_model_len(
2250
+ self.max_model_len,
2251
+ self.draft_model_config.max_model_len,
2252
+ self.target_model_config.max_model_len,
2253
+ ))
2254
+
2255
+ self.draft_parallel_config = (
2256
+ SpeculativeConfig.create_draft_parallel_config(
2257
+ self.target_parallel_config,
2258
+ self.draft_tensor_parallel_size))
2259
+
2260
+ @staticmethod
2261
+ def _maybe_override_draft_max_model_len(
2262
+ speculative_max_model_len: Optional[int],
2263
+ draft_max_model_len: int,
2264
+ target_max_model_len: int,
2265
+ ) -> int:
2266
+ """Determine the max sequence len for the draft model. This is usually
2267
+ the draft_max_model_len, but may be the target_max_model_len if it is
2268
+ less than the draft_max_model_len, or may be speculative_max_model_len
2269
+ if it is specified.
2270
+
2271
+ This is necessary so that sequences do not exceed the capacity of the
2272
+ draft model or the target model.
2273
+
2274
+ speculative_max_model_len is mainly used for testing that sequences can
2275
+ skip speculation.
2276
+ """
2277
+
2278
+ if speculative_max_model_len is not None:
2279
+
2280
+ if speculative_max_model_len > draft_max_model_len:
2281
+ raise ValueError(f"{speculative_max_model_len=} cannot be "
2282
+ f"larger than {draft_max_model_len=}")
2283
+
2284
+ if speculative_max_model_len > target_max_model_len:
2285
+ raise ValueError(f"{speculative_max_model_len=} cannot be "
2286
+ f"larger than {target_max_model_len=}")
2287
+
2288
+ return speculative_max_model_len
2289
+
2290
+ return min(
2291
+ draft_max_model_len,
2292
+ target_max_model_len,
2293
+ )
2294
+
2295
+ @staticmethod
2296
+ def _verify_and_get_draft_tp(
2297
+ target_parallel_config: ParallelConfig,
2298
+ speculative_draft_tensor_parallel_size: Optional[int],
2299
+ draft_hf_config: PretrainedConfig) -> int:
2300
+ """
2301
+ Verifies and adjusts the tensor parallel size for a draft model
2302
+ specified using speculative_draft_tensor_parallel_size.
2303
+ """
2304
+ # If speculative_draft_tensor_parallel_size is unset then set it
2305
+ # appropriately else verify that it is set correctly.
2306
+ if speculative_draft_tensor_parallel_size is None:
2307
+ if draft_hf_config.model_type == "mlp_speculator":
2308
+ speculative_draft_tensor_parallel_size = 1
2309
+ if target_parallel_config.tensor_parallel_size > 1:
2310
+ logger.warning(
2311
+ "%s cannot currently be run with tp>1; "
2312
+ "setting speculative_draft_tensor_parallel_size=1",
2313
+ draft_hf_config.model_type)
2314
+ else:
2315
+ speculative_draft_tensor_parallel_size = \
2316
+ target_parallel_config.tensor_parallel_size
2317
+ elif speculative_draft_tensor_parallel_size not in (
2318
+ 1, target_parallel_config.tensor_parallel_size):
2319
+ raise ValueError(
2320
+ f"{speculative_draft_tensor_parallel_size=} cannot be "
2321
+ f"other value than 1 or target model tensor_parallel_size")
2322
+ return speculative_draft_tensor_parallel_size
2323
+
2324
+ @staticmethod
2325
+ def create_draft_parallel_config(
2326
+ target_parallel_config: ParallelConfig,
2327
+ speculative_draft_tensor_parallel_size: int,
2328
+ ) -> ParallelConfig:
2329
+ """Create a parallel config for use by the draft worker.
2330
+
2331
+ This is mostly a copy of the target parallel config, except the tp_size.
2332
+ """
2333
+ draft_parallel_config = ParallelConfig(
2334
+ pipeline_parallel_size=target_parallel_config.
2335
+ pipeline_parallel_size,
2336
+ tensor_parallel_size=speculative_draft_tensor_parallel_size,
2337
+ distributed_executor_backend=target_parallel_config.
2338
+ distributed_executor_backend,
2339
+ max_parallel_loading_workers=target_parallel_config.
2340
+ max_parallel_loading_workers,
2341
+ disable_custom_all_reduce=target_parallel_config.
2342
+ disable_custom_all_reduce,
2343
+ ray_workers_use_nsight=target_parallel_config.
2344
+ ray_workers_use_nsight,
2345
+ placement_group=target_parallel_config.placement_group,
2346
+ )
2347
+
2348
+ return draft_parallel_config
2349
+
2350
+ @model_validator(mode='after')
2351
+ def _verify_args(self) -> Self:
2352
+ if self.num_speculative_tokens is None:
2353
+ raise ValueError(
2354
+ "num_speculative_tokens must be provided with "
2355
+ "speculative model unless the draft model config contains an "
2356
+ "n_predict parameter.")
2357
+
2358
+ if self.num_speculative_tokens <= 0:
2359
+ raise ValueError("Expected num_speculative_tokens to be greater "
2360
+ f"than zero ({self.num_speculative_tokens}).")
2361
+
2362
+ if self.draft_model_config:
2363
+ self.draft_model_config.verify_with_parallel_config(
2364
+ self.draft_parallel_config)
2365
+
2366
+ if (self.disable_by_batch_size is not None
2367
+ and self.disable_by_batch_size < 2):
2368
+ raise ValueError("Expect the batch size threshold of disabling "
2369
+ "speculative decoding is > 1, but got "
2370
+ f"{self.disable_by_batch_size=}")
2371
+
2372
+ eagle3_target_supported = ["llama", "qwen"]
2373
+ if self.method == "eagle3" and self.target_model_config and not any(
2374
+ supported_model in
2375
+ self.target_model_config.hf_text_config.model_type
2376
+ for supported_model in eagle3_target_supported):
2377
+ raise ValueError(
2378
+ f"Eagle3 is only supported for {eagle3_target_supported} models. " # noqa: E501
2379
+ f"Got {self.target_model_config.hf_text_config.model_type=}")
2380
+
2381
+ return self
2382
+
2383
+ @property
2384
+ def num_lookahead_slots(self) -> int:
2385
+ """The number of additional slots the scheduler should allocate per
2386
+ step, in addition to the slots allocated for each known token.
2387
+
2388
+ This is equal to the number of speculative tokens, as each speculative
2389
+ token must be scored.
2390
+ """
2391
+ return self.num_speculative_tokens
2392
+
2393
+ def use_eagle(self) -> bool:
2394
+ return self.method in ("eagle", "eagle3", "deepseek_mtp", "ernie_mtp",
2395
+ "qwen3_next_mtp")
2396
+
2397
+ def __repr__(self) -> str:
2398
+ method = self.method
2399
+ model = None if method == "ngram" else self.draft_model_config.model
2400
+ num_spec_tokens = self.num_speculative_tokens
2401
+ return f"SpeculativeConfig({method=}, {model=}, {num_spec_tokens=})"
2402
+
2403
+
2404
+ @config
2405
+ @dataclass
2406
+ class MultiModalConfig:
2407
+ """Controls the behavior of multimodal models."""
2408
+
2409
+ limit_per_prompt: dict[str, int] = \
2410
+ cast(dict[str, int], get_field(ModelConfig, "limit_mm_per_prompt"))
2411
+ """
2412
+ The maximum number of input items allowed per prompt for each modality.
2413
+ Defaults to 1 (V0) or 999 (V1) for each modality.
2414
+
2415
+ For example, to allow up to 16 images and 2 videos per prompt:
2416
+ `{"image": 16, "video": 2}`
2417
+ """
2418
+
2419
+ media_io_kwargs: dict[str, dict[str, Any]] = field(default_factory=dict)
2420
+ """Additional args passed to process media inputs, keyed by modalities.
2421
+ For example, to set num_frames for video, set
2422
+ `--media-io-kwargs '{"video": {"num_frames": 40} }'` """
2423
+
2424
+ mm_processor_kwargs: Optional[dict[str, object]] = None
2425
+ """
2426
+ Overrides for the multi-modal processor obtained from
2427
+ `transformers.AutoProcessor.from_pretrained`.
2428
+
2429
+ The available overrides depend on the model that is being run.
2430
+
2431
+ For example, for Phi-3-Vision:
2432
+ `{"num_crops": 4}`.
2433
+ """
2434
+
2435
+ mm_processor_cache_gb: float = 4
2436
+ """
2437
+ The size (in GiB) of the multi-modal processor cache, which is used to
2438
+
2439
+ This cache is duplicated for each API process and engine core process,
2440
+ resulting in a total memory usage of
2441
+ `mm_processor_cache_gb * (api_server_count + data_parallel_size)`.
2442
+
2443
+ Set to `0` to disable this cache completely (not recommended).
2444
+ """
2445
+
2446
+ mm_encoder_tp_mode: MMEncoderTPMode = "weights"
2447
+ """
2448
+ Indicates how to optimize multi-modal encoder inference using
2449
+ tensor parallelism (TP).
2450
+
2451
+ - `"weights"`: Within the same vLLM engine, split the weights of
2452
+ each layer across TP ranks. (default TP behavior)
2453
+ - `"data"`: Within the same vLLM engine, split the batched input data
2454
+ across TP ranks to process the data in parallel, while hosting
2455
+ the full weights on each TP rank.
2456
+ This batch-level DP is not to be confused with API request-level
2457
+ DP (which is controlled by `--data-parallel-size`).
2458
+ This is only supported on a per-model basis and falls back to
2459
+ `"weights"` if the encoder does not support DP.
2460
+ """
2461
+
2462
+ interleave_mm_strings: bool = False
2463
+ """
2464
+ Enable fully interleaved support for multimodal prompts.
2465
+ """
2466
+
2467
+ skip_mm_profiling: bool = False
2468
+ """
2469
+ When enabled, skips multimodal memory profiling and only profiles with
2470
+ language backbone model during engine initialization.
2471
+
2472
+ This reduces engine startup time but shifts the responsibility to users for
2473
+ estimating the peak memory usage of the activation of multimodal encoder and
2474
+ embedding cache.
2475
+ """
2476
+
2477
+ def compute_hash(self) -> str:
2478
+ """
2479
+ WARNING: Whenever a new field is added to this config,
2480
+ ensure that it is included in the factors list if
2481
+ it affects the computation graph.
2482
+
2483
+ Provide a hash that uniquely identifies all the configs
2484
+ that affect the structure of the computation
2485
+ graph from input ids/embeddings to the final hidden states,
2486
+ excluding anything before input ids/embeddings and after
2487
+ the final hidden states.
2488
+ """
2489
+ # no factors to consider.
2490
+ # this config will not affect the computation graph.
2491
+ factors: list[Any] = []
2492
+ hash_str = hashlib.md5(str(factors).encode(),
2493
+ usedforsecurity=False).hexdigest()
2494
+ return hash_str
2495
+
2496
+ def get_limit_per_prompt(self, modality: str) -> int:
2497
+ """
2498
+ Get the maximum number of input items allowed per prompt
2499
+ for the given modality.
2500
+ """
2501
+ return self.limit_per_prompt.get(
2502
+ modality,
2503
+ 999 if envs.VLLM_USE_V1 else 1,
2504
+ )
2505
+
2506
+ def merge_mm_processor_kwargs(
2507
+ self,
2508
+ inference_kwargs: Mapping[str, object],
2509
+ ) -> dict[str, object]:
2510
+ """
2511
+ Get the keyword arguments to pass to the multi-modal processor
2512
+ according to the extra arguments passed during inference.
2513
+ """
2514
+ kwargs = self.mm_processor_kwargs or {}
2515
+ return kwargs | dict(inference_kwargs)
2516
+
2517
+
2518
+ @config
2519
+ @dataclass
2520
+ class PoolerConfig:
2521
+ """Controls the behavior of output pooling in pooling models."""
2522
+
2523
+ pooling_type: Optional[str] = None
2524
+ """
2525
+ The pooling method of the pooling model. This should be a key in
2526
+ [`vllm.model_executor.layers.pooler.PoolingType`][].
2527
+ """
2528
+
2529
+ ## for embeddings models
2530
+ normalize: Optional[bool] = None
2531
+ """
2532
+ Whether to normalize the embeddings outputs. Defaults to True.
2533
+ """
2534
+ dimensions: Optional[int] = None
2535
+ """
2536
+ Reduce the dimensions of embeddings if model
2537
+ support matryoshka representation. Defaults to None.
2538
+ """
2539
+ enable_chunked_processing: Optional[bool] = None
2540
+ """
2541
+ Whether to enable chunked processing for long inputs that exceed the model's
2542
+ maximum position embeddings. When enabled, long inputs will be split into
2543
+ chunks, processed separately, and then aggregated using weighted averaging.
2544
+ This allows embedding models to handle arbitrarily long text without CUDA
2545
+ errors. Defaults to False.
2546
+ """
2547
+ max_embed_len: Optional[int] = None
2548
+ """
2549
+ Maximum input length allowed for embedding generation. When set, allows
2550
+ inputs longer than max_embed_len to be accepted for embedding models.
2551
+ When an input exceeds max_embed_len, it will be handled according to
2552
+ the original max_model_len validation logic.
2553
+ Defaults to None (i.e. set to max_model_len).
2554
+ """
2555
+
2556
+ ## for classification models
2557
+ activation: Optional[bool] = None
2558
+ """
2559
+ Whether to apply activation function to the classification outputs.
2560
+ Defaults to True.
2561
+ """
2562
+ logit_bias: Optional[float] = None
2563
+ """
2564
+ If provided, apply classification logit biases. Defaults to None.
2565
+ """
2566
+
2567
+ ## for reward models
2568
+ softmax: Optional[bool] = None
2569
+ """
2570
+ Whether to apply softmax to the reward outputs.
2571
+ Defaults to True.
2572
+ """
2573
+ step_tag_id: Optional[int] = None
2574
+ """
2575
+ If set, only the score corresponding to the ``step_tag_id`` in the
2576
+ generated sentence should be returned. Otherwise, the scores for all tokens
2577
+ are returned.
2578
+ """
2579
+ returned_token_ids: Optional[list[int]] = None
2580
+ """
2581
+ A list of indices for the vocabulary dimensions to be extracted,
2582
+ such as the token IDs of ``good_token`` and ``bad_token`` in the
2583
+ ``math-shepherd-mistral-7b-prm`` model.
2584
+ """
2585
+
2586
+ def compute_hash(self) -> str:
2587
+ """
2588
+ WARNING: Whenever a new field is added to this config,
2589
+ ensure that it is included in the factors list if
2590
+ it affects the computation graph.
2591
+
2592
+ Provide a hash that uniquely identifies all the configs
2593
+ that affect the structure of the computation
2594
+ graph from input ids/embeddings to the final hidden states,
2595
+ excluding anything before input ids/embeddings and after
2596
+ the final hidden states.
2597
+ """
2598
+ # no factors to consider.
2599
+ # this config will not affect the computation graph.
2600
+ factors: list[Any] = []
2601
+ hash_str = hashlib.md5(str(factors).encode(),
2602
+ usedforsecurity=False).hexdigest()
2603
+ return hash_str
2604
+
2605
+
2606
+ _STR_DTYPE_TO_TORCH_DTYPE = {
2607
+ "half": torch.float16,
2608
+ "float16": torch.float16,
2609
+ "float": torch.float32,
2610
+ "float32": torch.float32,
2611
+ "bfloat16": torch.bfloat16,
2612
+ }
2613
+
2614
+ # model_type -> reason
2615
+ _FLOAT16_NOT_SUPPORTED_MODELS = {
2616
+ "gemma2": "Numerical instability. Please use bfloat16 or float32 instead.",
2617
+ "gemma3": "Numerical instability. Please use bfloat16 or float32 instead.",
2618
+ "gemma3_text":
2619
+ "Numerical instability. Please use bfloat16 or float32 instead.",
2620
+ "plamo2": "Numerical instability. Please use bfloat16 or float32 instead.",
2621
+ "glm4": "Numerical instability. Please use bfloat16 or float32 instead.",
2622
+ }
2623
+
2624
+
2625
+ def _is_valid_dtype(model_type: str, dtype: torch.dtype):
2626
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16: # noqa: E501, SIM103
2627
+ return False
2628
+
2629
+ return True
2630
+
2631
+
2632
+ def _check_valid_dtype(model_type: str, dtype: torch.dtype):
2633
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16:
2634
+ reason = _FLOAT16_NOT_SUPPORTED_MODELS[model_type]
2635
+ raise ValueError(f"The model type {model_type!r} "
2636
+ f"does not support float16. Reason: {reason}")
2637
+
2638
+ return True
2639
+
2640
+
2641
+ def _find_dtype(
2642
+ model_id: str,
2643
+ config: PretrainedConfig,
2644
+ *,
2645
+ revision: Optional[str],
2646
+ ):
2647
+ # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
2648
+ # because config.torch_dtype can be None.
2649
+ config_dtype = getattr(config, "torch_dtype", None)
2650
+
2651
+ # Fallbacks for multi-modal models if the root config
2652
+ # does not define torch_dtype
2653
+ if config_dtype is None:
2654
+ config_dtype = getattr(config.get_text_config(), "torch_dtype", None)
2655
+ if config_dtype is None and hasattr(config, "vision_config"):
2656
+ config_dtype = getattr(config.vision_config, "torch_dtype", None)
2657
+ if config_dtype is None and hasattr(config, "encoder_config"):
2658
+ config_dtype = getattr(config.encoder_config, "torch_dtype", None)
2659
+
2660
+ # Try to read the dtype of the weights if they are in safetensors format
2661
+ if config_dtype is None:
2662
+ repo_mt = try_get_safetensors_metadata(model_id, revision=revision)
2663
+
2664
+ if repo_mt and (files_mt := repo_mt.files_metadata):
2665
+ param_dtypes: set[torch.dtype] = {
2666
+ _SAFETENSORS_TO_TORCH_DTYPE[dtype_str]
2667
+ for file_mt in files_mt.values()
2668
+ for dtype_str in file_mt.parameter_count
2669
+ if dtype_str in _SAFETENSORS_TO_TORCH_DTYPE
2670
+ }
2671
+
2672
+ if param_dtypes:
2673
+ return common_broadcastable_dtype(param_dtypes)
2674
+
2675
+ if config_dtype is None:
2676
+ config_dtype = torch.float32
2677
+
2678
+ return config_dtype
2679
+
2680
+
2681
+ def _resolve_auto_dtype(
2682
+ model_type: str,
2683
+ config_dtype: torch.dtype,
2684
+ *,
2685
+ is_pooling_model: bool,
2686
+ ):
2687
+ from vllm.platforms import current_platform
2688
+
2689
+ supported_dtypes = [
2690
+ dtype for dtype in current_platform.supported_dtypes
2691
+ if _is_valid_dtype(model_type, dtype)
2692
+ ]
2693
+
2694
+ if is_pooling_model and torch.float16 in supported_dtypes:
2695
+ preferred_dtype = torch.float16
2696
+ else:
2697
+ preferred_dtype = supported_dtypes[0]
2698
+
2699
+ # Downcast for float32 models
2700
+ if config_dtype == torch.float32:
2701
+ config_dtype = preferred_dtype
2702
+
2703
+ if config_dtype in supported_dtypes:
2704
+ return config_dtype
2705
+
2706
+ # Ensure device compatibility
2707
+ device_name = current_platform.get_device_name()
2708
+ device_capability = current_platform.get_device_capability()
2709
+
2710
+ if device_capability is None:
2711
+ device_str = f"{device_name!r}"
2712
+ else:
2713
+ version_str = device_capability.as_version_str()
2714
+ device_str = f"{device_name!r} (with compute capability {version_str})"
2715
+
2716
+ logger.warning(
2717
+ "Your device %s doesn't support %s. "
2718
+ "Falling back to %s for compatibility.",
2719
+ device_str,
2720
+ config_dtype,
2721
+ preferred_dtype,
2722
+ )
2723
+
2724
+ return preferred_dtype
2725
+
2726
+
2727
+ def _get_and_verify_dtype(
2728
+ model_id: str,
2729
+ config: PretrainedConfig,
2730
+ dtype: Union[str, torch.dtype],
2731
+ *,
2732
+ is_pooling_model: bool,
2733
+ revision: Optional[str] = None,
2734
+ ) -> torch.dtype:
2735
+ config_dtype = _find_dtype(model_id, config, revision=revision)
2736
+ model_type = config.model_type
2737
+
2738
+ if isinstance(dtype, str):
2739
+ dtype = dtype.lower()
2740
+ if dtype == "auto":
2741
+ # Set default dtype from model config
2742
+ torch_dtype = _resolve_auto_dtype(
2743
+ model_type,
2744
+ config_dtype,
2745
+ is_pooling_model=is_pooling_model,
2746
+ )
2747
+ else:
2748
+ if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
2749
+ raise ValueError(f"Unknown dtype: {dtype!r}")
2750
+ torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
2751
+ elif isinstance(dtype, torch.dtype):
2752
+ torch_dtype = dtype
2753
+ else:
2754
+ raise ValueError(f"Unknown dtype: {dtype}")
2755
+
2756
+ _check_valid_dtype(model_type, torch_dtype)
2757
+
2758
+ if torch_dtype != config_dtype:
2759
+ if torch_dtype == torch.float32:
2760
+ # Upcasting to float32 is allowed.
2761
+ logger.info("Upcasting %s to %s.", config_dtype, torch_dtype)
2762
+ elif config_dtype == torch.float32:
2763
+ # Downcasting from float32 to float16 or bfloat16 is allowed.
2764
+ logger.info("Downcasting %s to %s.", config_dtype, torch_dtype)
2765
+ else:
2766
+ # Casting between float16 and bfloat16 is allowed with a warning.
2767
+ logger.warning("Casting %s to %s.", config_dtype, torch_dtype)
2768
+
2769
+ return torch_dtype
2770
+
2771
+
2772
+ def _get_head_dtype(config: PretrainedConfig, dtype: torch.dtype,
2773
+ runner_type: str) -> torch.dtype:
2774
+ head_dtype: Optional[Union[str,
2775
+ torch.dtype]] = getattr(config, "head_dtype",
2776
+ None)
2777
+
2778
+ if head_dtype == "model":
2779
+ return dtype
2780
+ elif isinstance(head_dtype, str):
2781
+ head_dtype = head_dtype.lower()
2782
+ if head_dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
2783
+ raise ValueError(f"Unknown dtype: {head_dtype!r}")
2784
+ return _STR_DTYPE_TO_TORCH_DTYPE[head_dtype]
2785
+ elif isinstance(head_dtype, torch.dtype):
2786
+ return head_dtype
2787
+ elif head_dtype is None:
2788
+ if torch.float32 not in current_platform.supported_dtypes:
2789
+ return dtype
2790
+ if runner_type == "pooling":
2791
+ return torch.float32
2792
+ return dtype
2793
+ else:
2794
+ raise ValueError(f"Unknown dtype: {head_dtype}")
2795
+
2796
+
2797
+ def _get_and_verify_max_len(
2798
+ hf_config: PretrainedConfig,
2799
+ tokenizer_config: Optional[dict],
2800
+ max_model_len: Optional[int],
2801
+ disable_sliding_window: bool,
2802
+ sliding_window: Optional[int],
2803
+ spec_target_max_model_len: Optional[int] = None,
2804
+ encoder_config: Optional[Any] = None,
2805
+ ) -> int:
2806
+ """Get and verify the model's maximum length."""
2807
+ derived_max_model_len = float("inf")
2808
+ possible_keys = [
2809
+ # OPT
2810
+ "max_position_embeddings",
2811
+ # GPT-2
2812
+ "n_positions",
2813
+ # MPT
2814
+ "max_seq_len",
2815
+ # ChatGLM2
2816
+ "seq_length",
2817
+ # Command-R
2818
+ "model_max_length",
2819
+ # Whisper
2820
+ "max_target_positions",
2821
+ # Others
2822
+ "max_sequence_length",
2823
+ "max_seq_length",
2824
+ "seq_len",
2825
+ ]
2826
+ # Choose the smallest "max_length" from the possible keys
2827
+ max_len_key = None
2828
+ for key in possible_keys:
2829
+ max_len = getattr(hf_config, key, None)
2830
+ if max_len is not None:
2831
+ max_len_key = key if max_len < derived_max_model_len \
2832
+ else max_len_key
2833
+ derived_max_model_len = min(derived_max_model_len, max_len)
2834
+ # For Command-R / Cohere, Cohere2 / Aya Vision models
2835
+ if tmp_max_len := getattr(hf_config, "model_max_length", None):
2836
+ max_len_key = "model_max_length"
2837
+ derived_max_model_len = tmp_max_len
2838
+
2839
+ # If sliding window is manually disabled, max_length should be less
2840
+ # than the sliding window length in the model config.
2841
+ if (disable_sliding_window and sliding_window is not None
2842
+ and sliding_window < derived_max_model_len):
2843
+ max_len_key = "sliding_window"
2844
+ derived_max_model_len = sliding_window
2845
+
2846
+ # Consider model_max_length in tokenizer_config
2847
+ if tokenizer_config:
2848
+ tokenizer_model_max_length = tokenizer_config.get(
2849
+ "model_max_length", derived_max_model_len)
2850
+ derived_max_model_len = min(derived_max_model_len,
2851
+ tokenizer_model_max_length)
2852
+
2853
+ # If none of the keys were found in the config, use a default and
2854
+ # log a warning.
2855
+ if derived_max_model_len == float("inf"):
2856
+ if max_model_len is not None:
2857
+ # If max_model_len is specified, we use it.
2858
+ return max_model_len
2859
+
2860
+ if spec_target_max_model_len is not None:
2861
+ # If this is a speculative draft model, we use the max model len
2862
+ # from the target model.
2863
+ return spec_target_max_model_len
2864
+
2865
+ default_max_len = 2048
2866
+ logger.warning(
2867
+ "The model's config.json does not contain any of the following "
2868
+ "keys to determine the original maximum length of the model: "
2869
+ "%s. Assuming the model's maximum length is %d.", possible_keys,
2870
+ default_max_len)
2871
+ derived_max_model_len = default_max_len
2872
+
2873
+ rope_scaling = getattr(hf_config, "rope_scaling", None)
2874
+ # NOTE(woosuk): Gemma3's max_model_len (128K) is already scaled by RoPE
2875
+ # scaling, so we skip applying the scaling factor again.
2876
+ if rope_scaling is not None and "gemma3" not in hf_config.model_type:
2877
+ # No need to consider "type" key because of patch_rope_scaling when
2878
+ # loading HF config
2879
+ rope_type = rope_scaling["rope_type"]
2880
+
2881
+ if rope_type not in ("su", "longrope", "llama3"):
2882
+ if disable_sliding_window:
2883
+ # TODO(robertgshaw): Find a model that supports rope_scaling
2884
+ # with sliding window to see if this case should be allowed.
2885
+ raise NotImplementedError(
2886
+ "Disabling sliding window is not supported for models "
2887
+ "with rope_scaling. Please raise an issue so we can "
2888
+ "investigate.")
2889
+
2890
+ # NOTE: rope_type == "default" does not define factor
2891
+ # https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/modeling_rope_utils.py
2892
+ scaling_factor = rope_scaling.get("factor", 1.0)
2893
+
2894
+ if rope_type == "yarn":
2895
+ derived_max_model_len = rope_scaling[
2896
+ "original_max_position_embeddings"]
2897
+ derived_max_model_len *= scaling_factor
2898
+
2899
+ if encoder_config and "max_seq_length" in encoder_config:
2900
+ derived_max_model_len = encoder_config["max_seq_length"]
2901
+
2902
+ # If the user specified a max length, make sure it is smaller than the
2903
+ # derived length from the HF model config.
2904
+ if max_model_len is None:
2905
+ max_model_len = int(derived_max_model_len)
2906
+ if current_platform.is_tpu():
2907
+ logger.warning(
2908
+ "--max-model-len is not specified, "
2909
+ "it's currently using model's default length %s, "
2910
+ "which might be too large."
2911
+ "Please input with --max-model-len based on your "
2912
+ "request input length and output length, to avoid "
2913
+ "unnecessary degradation.", max_model_len)
2914
+ elif max_model_len > derived_max_model_len:
2915
+ # Some models might have a separate key for specifying model_max_length
2916
+ # that will be bigger than derived_max_model_len. We compare user input
2917
+ # with model_max_length and allow this override when it's smaller.
2918
+ model_max_length = getattr(hf_config, "model_max_length", None)
2919
+ if model_max_length is not None and max_model_len <= model_max_length:
2920
+ if disable_sliding_window:
2921
+ # TODO(robertgshaw): Find a model that has model_max_length
2922
+ # with sliding window to see if this case should be allowed.
2923
+ raise NotImplementedError(
2924
+ "Disabling sliding window is not supported for models "
2925
+ "model_max_length in the config. Please raise an issue "
2926
+ "so we can investigate.")
2927
+ else:
2928
+ msg = (
2929
+ f"User-specified max_model_len ({max_model_len}) is greater "
2930
+ f"than the derived max_model_len ({max_len_key}="
2931
+ f"{derived_max_model_len} or model_max_length="
2932
+ f"{model_max_length} in model's config.json).")
2933
+ warning = (
2934
+ "VLLM_ALLOW_LONG_MAX_MODEL_LEN must be used with extreme "
2935
+ "caution. If the model uses relative position encoding (RoPE), "
2936
+ "positions exceeding derived_max_model_len lead to nan. If the "
2937
+ "model uses absolute position encoding, positions exceeding "
2938
+ "derived_max_model_len will cause a CUDA array out-of-bounds "
2939
+ "error.")
2940
+ if envs.VLLM_ALLOW_LONG_MAX_MODEL_LEN:
2941
+ logger.warning_once("%s %s", msg, warning)
2942
+ else:
2943
+ raise ValueError(
2944
+ f"{msg} To allow overriding this maximum, set "
2945
+ f"the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN=1. {warning}")
2946
+ return int(max_model_len)
2947
+
2948
+
2949
+ def get_served_model_name(model: str,
2950
+ served_model_name: Optional[Union[str, list[str]]]):
2951
+ """
2952
+ If the input is a non-empty list, the first model_name in
2953
+ `served_model_name` is taken.
2954
+ If the input is a non-empty string, it is used directly.
2955
+ For cases where the input is either an empty string or an
2956
+ empty list, the fallback is to use `self.model`.
2957
+ """
2958
+ if not served_model_name:
2959
+ return model
2960
+ if isinstance(served_model_name, list):
2961
+ return served_model_name[0]
2962
+ return served_model_name
2963
+
2964
+
2965
+ GuidedDecodingBackend = Literal["auto", "xgrammar", "guidance", "outlines",
2966
+ "lm-format-enforcer"]
2967
+
2968
+
2969
+ @config
2970
+ @dataclass
2971
+ class DecodingConfig:
2972
+ """Dataclass which contains the decoding strategy of the engine."""
2973
+
2974
+ backend: GuidedDecodingBackend = "auto"
2975
+ """Which engine will be used for guided decoding (JSON schema / regex etc)
2976
+ by default. With "auto", we will make opinionated choices based on request
2977
+ contents and what the backend libraries currently support, so the behavior
2978
+ is subject to change in each release."""
2979
+
2980
+ disable_fallback: bool = False
2981
+ """If `True`, vLLM will not fallback to a different backend on error."""
2982
+
2983
+ disable_any_whitespace: bool = False
2984
+ """If `True`, the model will not generate any whitespace during guided
2985
+ decoding. This is only supported for xgrammar and guidance backends."""
2986
+
2987
+ disable_additional_properties: bool = False
2988
+ """If `True`, the `guidance` backend will not use `additionalProperties`
2989
+ in the JSON schema. This is only supported for the `guidance` backend and
2990
+ is used to better align its behaviour with `outlines` and `xgrammar`."""
2991
+
2992
+ reasoning_backend: str = ""
2993
+ """Select the reasoning parser depending on the model that you're using.
2994
+ This is used to parse the reasoning content into OpenAI API format."""
2995
+
2996
+ def compute_hash(self) -> str:
2997
+ """
2998
+ WARNING: Whenever a new field is added to this config,
2999
+ ensure that it is included in the factors list if
3000
+ it affects the computation graph.
3001
+
3002
+ Provide a hash that uniquely identifies all the configs
3003
+ that affect the structure of the computation
3004
+ graph from input ids/embeddings to the final hidden states,
3005
+ excluding anything before input ids/embeddings and after
3006
+ the final hidden states.
3007
+ """
3008
+ # no factors to consider.
3009
+ # this config will not affect the computation graph.
3010
+ factors: list[Any] = []
3011
+ hash_str = hashlib.md5(str(factors).encode(),
3012
+ usedforsecurity=False).hexdigest()
3013
+ return hash_str
3014
+
3015
+ def __post_init__(self):
3016
+ if (self.disable_any_whitespace
3017
+ and self.backend not in ("xgrammar", "guidance")):
3018
+ raise ValueError("disable_any_whitespace is only supported for "
3019
+ "xgrammar and guidance backends.")
3020
+ if (self.disable_additional_properties and self.backend != "guidance"):
3021
+ raise ValueError("disable_additional_properties is only supported "
3022
+ "for the guidance backend.")
3023
+
3024
+
3025
+ DetailedTraceModules = Literal["model", "worker", "all"]
3026
+
3027
+
3028
+ @config
3029
+ @dataclass
3030
+ class ObservabilityConfig:
3031
+ """Configuration for observability - metrics and tracing."""
3032
+
3033
+ show_hidden_metrics_for_version: Optional[str] = None
3034
+ """Enable deprecated Prometheus metrics that have been hidden since the
3035
+ specified version. For example, if a previously deprecated metric has been
3036
+ hidden since the v0.7.0 release, you use
3037
+ `--show-hidden-metrics-for-version=0.7` as a temporary escape hatch while
3038
+ you migrate to new metrics. The metric is likely to be removed completely
3039
+ in an upcoming release."""
3040
+
3041
+ @cached_property
3042
+ def show_hidden_metrics(self) -> bool:
3043
+ """Check if the hidden metrics should be shown."""
3044
+ if self.show_hidden_metrics_for_version is None:
3045
+ return False
3046
+ return version._prev_minor_version_was(
3047
+ self.show_hidden_metrics_for_version)
3048
+
3049
+ otlp_traces_endpoint: Optional[str] = None
3050
+ """Target URL to which OpenTelemetry traces will be sent."""
3051
+
3052
+ collect_detailed_traces: Optional[list[DetailedTraceModules]] = None
3053
+ """It makes sense to set this only if `--otlp-traces-endpoint` is set. If
3054
+ set, it will collect detailed traces for the specified modules. This
3055
+ involves use of possibly costly and or blocking operations and hence might
3056
+ have a performance impact.
3057
+
3058
+ Note that collecting detailed timing information for each request can be
3059
+ expensive."""
3060
+
3061
+ @cached_property
3062
+ def collect_model_forward_time(self) -> bool:
3063
+ """Whether to collect model forward time for the request."""
3064
+ return (self.collect_detailed_traces is not None
3065
+ and ("model" in self.collect_detailed_traces
3066
+ or "all" in self.collect_detailed_traces))
3067
+
3068
+ @cached_property
3069
+ def collect_model_execute_time(self) -> bool:
3070
+ """Whether to collect model execute time for the request."""
3071
+ return (self.collect_detailed_traces is not None
3072
+ and ("worker" in self.collect_detailed_traces
3073
+ or "all" in self.collect_detailed_traces))
3074
+
3075
+ def compute_hash(self) -> str:
3076
+ """
3077
+ WARNING: Whenever a new field is added to this config,
3078
+ ensure that it is included in the factors list if
3079
+ it affects the computation graph.
3080
+
3081
+ Provide a hash that uniquely identifies all the configs
3082
+ that affect the structure of the computation
3083
+ graph from input ids/embeddings to the final hidden states,
3084
+ excluding anything before input ids/embeddings and after
3085
+ the final hidden states.
3086
+ """
3087
+ # no factors to consider.
3088
+ # this config will not affect the computation graph.
3089
+ factors: list[Any] = []
3090
+ hash_str = hashlib.md5(str(factors).encode(),
3091
+ usedforsecurity=False).hexdigest()
3092
+ return hash_str
3093
+
3094
+ def __post_init__(self):
3095
+ if (self.collect_detailed_traces is not None
3096
+ and len(self.collect_detailed_traces) == 1
3097
+ and "," in self.collect_detailed_traces[0]):
3098
+ self._parse_collect_detailed_traces()
3099
+
3100
+ from vllm.tracing import is_otel_available, otel_import_error_traceback
3101
+ if not is_otel_available() and self.otlp_traces_endpoint is not None:
3102
+ raise ValueError(
3103
+ "OpenTelemetry is not available. Unable to configure "
3104
+ "'otlp_traces_endpoint'. Ensure OpenTelemetry packages are "
3105
+ f"installed. Original error:\n{otel_import_error_traceback}")
3106
+
3107
+ def _parse_collect_detailed_traces(self):
3108
+ assert isinstance(self.collect_detailed_traces, list)
3109
+ self.collect_detailed_traces = cast(
3110
+ list[DetailedTraceModules],
3111
+ self.collect_detailed_traces[0].split(","))
3112
+
3113
+
3114
+ @config
3115
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
3116
+ class VllmConfig:
3117
+ """Dataclass which contains all vllm-related configuration. This
3118
+ simplifies passing around the distinct configurations in the codebase.
3119
+ """
3120
+
3121
+ # TODO: use default_factory once default constructing ModelConfig doesn't
3122
+ # try to download a model
3123
+ model_config: ModelConfig = None # type: ignore
3124
+ """Model configuration."""
3125
+ cache_config: CacheConfig = field(default_factory=CacheConfig)
3126
+ """Cache configuration."""
3127
+ parallel_config: ParallelConfig = field(default_factory=ParallelConfig)
3128
+ """Parallel configuration."""
3129
+ scheduler_config: SchedulerConfig = field(default_factory=SchedulerConfig)
3130
+ """Scheduler configuration."""
3131
+ device_config: DeviceConfig = field(default_factory=DeviceConfig)
3132
+ """Device configuration."""
3133
+ load_config: LoadConfig = field(default_factory=LoadConfig)
3134
+ """Load configuration."""
3135
+ lora_config: Optional[LoRAConfig] = None
3136
+ """LoRA configuration."""
3137
+ speculative_config: Optional[SpeculativeConfig] = None
3138
+ """Speculative decoding configuration."""
3139
+ decoding_config: DecodingConfig = field(default_factory=DecodingConfig)
3140
+ """Decoding configuration."""
3141
+ observability_config: Optional[ObservabilityConfig] = None
3142
+ """Observability configuration."""
3143
+ quant_config: Optional[QuantizationConfig] = None
3144
+ """Quantization configuration."""
3145
+ compilation_config: CompilationConfig = field(
3146
+ default_factory=CompilationConfig)
3147
+ """`torch.compile` and cudagraph capture configuration for the model.
3148
+
3149
+ As a shorthand, `-O<n>` can be used to directly specify the compilation
3150
+ level `n`: `-O3` is equivalent to `-O.level=3` (same as `-O='{"level":3}'`).
3151
+ Currently, -O <n> and -O=<n> are supported as well but this will likely be
3152
+ removed in favor of clearer -O<n> syntax in the future.
3153
+
3154
+ NOTE: level 0 is the default level without any optimization. level 1 and 2
3155
+ are for internal testing only. level 3 is the recommended level for
3156
+ production, also default in V1.
3157
+
3158
+ You can specify the full compilation config like so:
3159
+ `{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8]}`
3160
+ """
3161
+ kv_transfer_config: Optional[KVTransferConfig] = None
3162
+ """The configurations for distributed KV cache transfer."""
3163
+ kv_events_config: Optional[KVEventsConfig] = None
3164
+ """The configurations for event publishing."""
3165
+ # some opaque config, only used to provide additional information
3166
+ # for the hash computation, mainly used for testing, debugging or out of
3167
+ # tree config registration.
3168
+ additional_config: Union[dict, SupportsHash] = field(default_factory=dict)
3169
+ """Additional config for specified platform. Different platforms may
3170
+ support different configs. Make sure the configs are valid for the platform
3171
+ you are using. Contents must be hashable."""
3172
+ instance_id: str = ""
3173
+ """The ID of the vLLM instance."""
3174
+
3175
+ def compute_hash(self) -> str:
3176
+ """
3177
+ WARNING: Whenever a new field is added to this config,
3178
+ ensure that it is included in the factors list if
3179
+ it affects the computation graph.
3180
+
3181
+ Provide a hash that uniquely identifies all the configs
3182
+ that affect the structure of the computation
3183
+ graph from input ids/embeddings to the final hidden states,
3184
+ excluding anything before input ids/embeddings and after
3185
+ the final hidden states.
3186
+ """
3187
+ factors: list[Any] = []
3188
+
3189
+ # summarize vllm config
3190
+ vllm_factors: list[Any] = []
3191
+ from vllm import __version__
3192
+ vllm_factors.append(__version__)
3193
+ vllm_factors.append(envs.VLLM_USE_V1)
3194
+ if self.model_config:
3195
+ vllm_factors.append(self.model_config.compute_hash())
3196
+ else:
3197
+ vllm_factors.append("None")
3198
+ if self.cache_config:
3199
+ vllm_factors.append(self.cache_config.compute_hash())
3200
+ else:
3201
+ vllm_factors.append("None")
3202
+ if self.parallel_config:
3203
+ vllm_factors.append(self.parallel_config.compute_hash())
3204
+ else:
3205
+ vllm_factors.append("None")
3206
+ if self.scheduler_config:
3207
+ vllm_factors.append(self.scheduler_config.compute_hash())
3208
+ else:
3209
+ vllm_factors.append("None")
3210
+ if self.device_config:
3211
+ vllm_factors.append(self.device_config.compute_hash())
3212
+ else:
3213
+ vllm_factors.append("None")
3214
+ if self.load_config:
3215
+ vllm_factors.append(self.load_config.compute_hash())
3216
+ else:
3217
+ vllm_factors.append("None")
3218
+ if self.lora_config:
3219
+ vllm_factors.append(self.lora_config.compute_hash())
3220
+ # LoRA creates static buffers based on max_num_batched_tokens.
3221
+ # The tensor sizes and strides get captured in the torch.compile
3222
+ # graph explicitly.
3223
+ vllm_factors.append(
3224
+ str(self.scheduler_config.max_num_batched_tokens))
3225
+ else:
3226
+ vllm_factors.append("None")
3227
+ if self.speculative_config:
3228
+ vllm_factors.append(self.speculative_config.compute_hash())
3229
+ else:
3230
+ vllm_factors.append("None")
3231
+ if self.decoding_config:
3232
+ vllm_factors.append(self.decoding_config.compute_hash())
3233
+ else:
3234
+ vllm_factors.append("None")
3235
+ if self.observability_config:
3236
+ vllm_factors.append(self.observability_config.compute_hash())
3237
+ else:
3238
+ vllm_factors.append("None")
3239
+ if self.quant_config:
3240
+ pass # should be captured by model_config.quantization
3241
+ if self.compilation_config:
3242
+ vllm_factors.append(self.compilation_config.compute_hash())
3243
+ else:
3244
+ vllm_factors.append("None")
3245
+ if self.kv_transfer_config:
3246
+ vllm_factors.append(self.kv_transfer_config.compute_hash())
3247
+ else:
3248
+ vllm_factors.append("None")
3249
+ if self.additional_config:
3250
+ if isinstance(additional_config := self.additional_config, dict):
3251
+ additional_config_hash = hashlib.md5(
3252
+ json.dumps(additional_config, sort_keys=True).encode(),
3253
+ usedforsecurity=False,
3254
+ ).hexdigest()
3255
+ else:
3256
+ additional_config_hash = additional_config.compute_hash()
3257
+ vllm_factors.append(additional_config_hash)
3258
+ else:
3259
+ vllm_factors.append("None")
3260
+ factors.append(vllm_factors)
3261
+
3262
+ hash_str = hashlib.md5(str(factors).encode(),
3263
+ usedforsecurity=False).hexdigest()[:10]
3264
+ return hash_str
3265
+
3266
+ def pad_for_cudagraph(self, batch_size: int) -> int:
3267
+ # if batch_size > self.compilation_config.max_capture_size,
3268
+ # it should raise an IndexError.
3269
+ # the caller should make sure the batch_size is within the range,
3270
+ # i.e., batch_size <= self.compilation_config.max_capture_size
3271
+ return self.compilation_config.bs_to_padded_graph_size[batch_size]
3272
+
3273
+ @staticmethod
3274
+ def _get_quantization_config(
3275
+ model_config: ModelConfig,
3276
+ load_config: LoadConfig) -> Optional[QuantizationConfig]:
3277
+ """Get the quantization config."""
3278
+ from vllm.platforms import current_platform
3279
+ if model_config.quantization is not None:
3280
+ from vllm.model_executor.model_loader.weight_utils import (
3281
+ get_quant_config)
3282
+ quant_config = get_quant_config(model_config, load_config)
3283
+ capability_tuple = current_platform.get_device_capability()
3284
+
3285
+ if capability_tuple is not None:
3286
+ capability = capability_tuple.to_int()
3287
+ if capability < quant_config.get_min_capability():
3288
+ raise ValueError(
3289
+ f"The quantization method {model_config.quantization} "
3290
+ "is not supported for the current GPU. Minimum "
3291
+ f"capability: {quant_config.get_min_capability()}. "
3292
+ f"Current capability: {capability}.")
3293
+ supported_dtypes = quant_config.get_supported_act_dtypes()
3294
+ if model_config.dtype not in supported_dtypes:
3295
+ raise ValueError(
3296
+ f"{model_config.dtype} is not supported for quantization "
3297
+ f"method {model_config.quantization}. Supported dtypes: "
3298
+ f"{supported_dtypes}")
3299
+ return quant_config
3300
+ return None
3301
+
3302
+ @staticmethod
3303
+ def get_quantization_config(
3304
+ model_config: ModelConfig,
3305
+ load_config: LoadConfig) -> Optional[QuantizationConfig]:
3306
+ import copy
3307
+
3308
+ # For some reason, the _ version of this modifies the model_config
3309
+ # object, so using deepcopy to avoid this problem.
3310
+ return VllmConfig._get_quantization_config(copy.deepcopy(model_config),
3311
+ load_config)
3312
+
3313
+ def with_hf_config(
3314
+ self,
3315
+ hf_config: PretrainedConfig,
3316
+ architectures: Optional[list[str]] = None,
3317
+ ) -> "VllmConfig":
3318
+ if architectures is not None:
3319
+ hf_config = copy.deepcopy(hf_config)
3320
+ hf_config.architectures = architectures
3321
+
3322
+ model_config = copy.deepcopy(self.model_config)
3323
+ model_config.hf_config = hf_config
3324
+
3325
+ return replace(self, model_config=model_config)
3326
+
3327
+ def __post_init__(self):
3328
+ """Verify configs are valid & consistent with each other.
3329
+ """
3330
+
3331
+ self.try_verify_and_update_config()
3332
+
3333
+ if self.model_config is not None:
3334
+ self.model_config.verify_async_output_proc(self.parallel_config,
3335
+ self.speculative_config,
3336
+ self.device_config)
3337
+ self.model_config.verify_with_parallel_config(self.parallel_config)
3338
+ self.model_config.verify_dual_chunk_attention_config(
3339
+ self.load_config)
3340
+
3341
+ self.cache_config.verify_with_parallel_config(self.parallel_config)
3342
+
3343
+ if self.lora_config is not None:
3344
+ self.lora_config.verify_with_cache_config(self.cache_config)
3345
+ self.lora_config.verify_with_model_config(self.model_config)
3346
+
3347
+ if self.quant_config is None and self.model_config is not None:
3348
+ self.quant_config = VllmConfig._get_quantization_config(
3349
+ self.model_config, self.load_config)
3350
+
3351
+ from vllm.platforms import current_platform
3352
+ if self.model_config is not None and \
3353
+ self.scheduler_config.chunked_prefill_enabled and \
3354
+ self.model_config.dtype == torch.float32 and \
3355
+ current_platform.get_device_capability() == (7, 5):
3356
+ logger.warning_once(
3357
+ "Turing devices tensor cores do not support float32 matmul. "
3358
+ "To workaround this limitation, vLLM will set 'ieee' input "
3359
+ "precision for chunked prefill triton kernels.")
3360
+
3361
+ # If the user does not explicitly set a compilation level, then
3362
+ # we use the default level. The default level depends on other
3363
+ # settings (see the below code).
3364
+ if self.compilation_config.level is None:
3365
+ if envs.VLLM_USE_V1:
3366
+ if (self.model_config is not None
3367
+ and not self.model_config.enforce_eager):
3368
+ self.compilation_config.level = CompilationLevel.PIECEWISE
3369
+ else:
3370
+ self.compilation_config.level = \
3371
+ CompilationLevel.NO_COMPILATION
3372
+
3373
+ else:
3374
+ # NB: Passing both --enforce-eager and a compilation level
3375
+ # in V0 means the compilation level wins out.
3376
+ self.compilation_config.level = CompilationLevel.NO_COMPILATION
3377
+
3378
+ # async tp is built on top of sequence parallelism
3379
+ # and requires it to be enabled.
3380
+ if self.compilation_config.pass_config.enable_async_tp:
3381
+ self.compilation_config.pass_config.enable_sequence_parallelism = \
3382
+ True
3383
+ if self.compilation_config.pass_config.enable_sequence_parallelism:
3384
+ self.compilation_config.custom_ops.append("+rms_norm")
3385
+
3386
+ if current_platform.is_cuda_alike() or current_platform.is_xpu():
3387
+ # if cudagraph_mode is not explicitly set by users, set default
3388
+ # value
3389
+ if self.compilation_config.cudagraph_mode is None:
3390
+ if envs.VLLM_USE_V1 and self.compilation_config.level \
3391
+ == CompilationLevel.PIECEWISE:
3392
+ self.compilation_config.cudagraph_mode = \
3393
+ CUDAGraphMode.PIECEWISE
3394
+ else:
3395
+ self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE
3396
+
3397
+ # disable cudagraph when enforce eager execution
3398
+ if self.model_config is not None and \
3399
+ self.model_config.enforce_eager:
3400
+ logger.info("Cudagraph is disabled under eager mode")
3401
+ self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE
3402
+ elif envs.VLLM_USE_V1:
3403
+ self.compilation_config.cudagraph_num_of_warmups = 1
3404
+
3405
+ self._set_cudagraph_sizes()
3406
+ else:
3407
+ self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE
3408
+
3409
+ if self.cache_config.cpu_offload_gb > 0 and \
3410
+ self.compilation_config.level != CompilationLevel.NO_COMPILATION \
3411
+ and not envs.VLLM_USE_V1:
3412
+ logger.warning(
3413
+ "CPU offload is not supported with `torch.compile` in v0 yet."
3414
+ " Disabling `torch.compile`.")
3415
+ self.compilation_config.level = CompilationLevel.NO_COMPILATION
3416
+
3417
+ if self.cache_config.kv_sharing_fast_prefill:
3418
+ if not envs.VLLM_USE_V1:
3419
+ raise NotImplementedError(
3420
+ "Fast prefill optimization for KV sharing is not supported "
3421
+ "in V0 currently.")
3422
+
3423
+ if self.speculative_config is not None and \
3424
+ self.speculative_config.use_eagle():
3425
+ raise NotImplementedError(
3426
+ "Fast prefill optimization for KV sharing is not "
3427
+ "compatible with EAGLE as EAGLE requires correct logits "
3428
+ "for all tokens while fast prefill gives incorrect logits "
3429
+ "for prompt tokens.")
3430
+
3431
+ logger.warning_once(
3432
+ "--kv-sharing-fast-prefill requires changes on model side for "
3433
+ "correctness and to realize prefill savings. ")
3434
+
3435
+ if ((not envs.VLLM_USE_V1) and self.lora_config is not None
3436
+ and self.compilation_config.level
3437
+ != CompilationLevel.NO_COMPILATION):
3438
+ logger.warning(
3439
+ "LoRA for V0 is not supported with `torch.compile` yet. "
3440
+ "Disabling `torch.compile`.")
3441
+ self.compilation_config.level = CompilationLevel.NO_COMPILATION
3442
+
3443
+ disable_chunked_prefill_reasons: list[str] = []
3444
+
3445
+ if self.model_config:
3446
+ if self.model_config.pooler_config:
3447
+ pooling_type = self.model_config.pooler_config.pooling_type
3448
+ if pooling_type is None or pooling_type.lower() != "last":
3449
+ disable_chunked_prefill_reasons.append(
3450
+ "Only \"last\" pooling supports chunked "
3451
+ "prefill and prefix caching; disabling both.")
3452
+ if not getattr(self.model_config.hf_config, "is_causal", True):
3453
+ disable_chunked_prefill_reasons.append(
3454
+ "Only models using causal attention supports chunked "
3455
+ "prefill and prefix caching; disabling both.")
3456
+ elif self.model_config.is_encoder_decoder:
3457
+ self.scheduler_config.max_num_encoder_input_tokens = \
3458
+ MULTIMODAL_REGISTRY.get_encdec_max_encoder_len(self.model_config)
3459
+ logger.debug(
3460
+ "Encoder-decoder model detected: setting "
3461
+ "`max_num_encoder_input_tokens` to encoder length (%s)",
3462
+ self.scheduler_config.max_num_encoder_input_tokens)
3463
+ self.scheduler_config.disable_chunked_mm_input = True
3464
+ disable_chunked_prefill_reasons.append(
3465
+ "Encoder-decoder models do not support chunked prefill nor"
3466
+ " prefix caching; disabling both.")
3467
+ if (self.model_config.architecture
3468
+ == "WhisperForConditionalGeneration"
3469
+ and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD")
3470
+ != "spawn"):
3471
+ logger.warning(
3472
+ "Whisper is known to have issues with "
3473
+ "forked workers. If startup is hanging, "
3474
+ "try setting 'VLLM_WORKER_MULTIPROC_METHOD' "
3475
+ "to 'spawn'.")
3476
+
3477
+ if disable_chunked_prefill_reasons:
3478
+ for reason in disable_chunked_prefill_reasons:
3479
+ logger.info(reason)
3480
+ self.scheduler_config.chunked_prefill_enabled = False
3481
+ self.scheduler_config.long_prefill_token_threshold = 0
3482
+
3483
+ if self.cache_config is not None:
3484
+ self.cache_config.enable_prefix_caching = False
3485
+
3486
+ if (self.kv_events_config is not None
3487
+ and self.kv_events_config.enable_kv_cache_events
3488
+ and not self.cache_config.enable_prefix_caching):
3489
+ logger.warning(
3490
+ "KV cache events are on, but prefix caching is not enabled."
3491
+ "Use --enable-prefix-caching to enable.")
3492
+ if (self.kv_events_config is not None
3493
+ and self.kv_events_config.publisher != "null"
3494
+ and not self.kv_events_config.enable_kv_cache_events):
3495
+ logger.warning("KV cache events are disabled,"
3496
+ "but the scheduler is configured to publish them."
3497
+ "Modify KVEventsConfig.enable_kv_cache_events"
3498
+ "to True to enable.")
3499
+ current_platform.check_and_update_config(self)
3500
+
3501
+ # final check of cudagraph mode after platform-specific update
3502
+ if envs.VLLM_USE_V1 and current_platform.is_cuda_alike():
3503
+ if self.compilation_config.cudagraph_mode == CUDAGraphMode.FULL \
3504
+ and self.model_config is not None and \
3505
+ not self.model_config.disable_cascade_attn:
3506
+ logger.info("CUDAGraphMode.FULL is not supported with "
3507
+ "cascade attention currently. Disabling cascade"
3508
+ "attention.")
3509
+ self.model_config.disable_cascade_attn = True
3510
+
3511
+ if self.compilation_config.cudagraph_mode\
3512
+ .requires_piecewise_compilation():
3513
+ assert self.compilation_config.level == \
3514
+ CompilationLevel.PIECEWISE, \
3515
+ "Compilation level should be CompilationLevel.PIECEWISE "\
3516
+ "when cudagraph_mode piecewise cudagraphs is used, "\
3517
+ f"cudagraph_mode={self.compilation_config.cudagraph_mode}"
3518
+
3519
+ if not self.instance_id:
3520
+ self.instance_id = random_uuid()[:5]
3521
+
3522
+ # Do this after all the updates to compilation_config.level
3523
+ if envs.VLLM_USE_V1 and \
3524
+ self.compilation_config.level == CompilationLevel.PIECEWISE:
3525
+ self.compilation_config.set_splitting_ops_for_v1()
3526
+
3527
+ if (envs.VLLM_USE_V1
3528
+ and not self.scheduler_config.disable_hybrid_kv_cache_manager):
3529
+ # logger should only print warning message for hybrid models. As we
3530
+ # can't know whether the model is hybrid or not now, so we don't log
3531
+ # warning message here and will log it later.
3532
+ if not current_platform.support_hybrid_kv_cache():
3533
+ # Hybrid KV cache manager is not supported on non-GPU platforms.
3534
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
3535
+ if self.kv_transfer_config is not None:
3536
+ # Hybrid KV cache manager is not compatible with KV transfer.
3537
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
3538
+ if self.kv_events_config is not None:
3539
+ # Hybrid KV cache manager is not compatible with KV events.
3540
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
3541
+ if self.model_config is not None and \
3542
+ self.model_config.attention_chunk_size is not None:
3543
+ if self.speculative_config is not None and \
3544
+ self.speculative_config.use_eagle():
3545
+ # Hybrid KV cache manager is not yet supported with chunked
3546
+ # local attention + eagle.
3547
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
3548
+ elif \
3549
+ not envs.VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE:
3550
+ logger.warning(
3551
+ "There is a latency regression when using chunked local"
3552
+ " attention with the hybrid KV cache manager. Disabling"
3553
+ " it, by default. To enable it, set the environment "
3554
+ "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE=1."
3555
+ )
3556
+ # Hybrid KV cache manager is not yet supported with chunked
3557
+ # local attention.
3558
+ self.scheduler_config.disable_hybrid_kv_cache_manager = True
3559
+
3560
+ def update_sizes_for_sequence_parallelism(self,
3561
+ possible_sizes: list) -> list:
3562
+ # remove the sizes that not multiple of tp_size when
3563
+ # enable sequence parallelism
3564
+ removed_sizes = [
3565
+ size for size in possible_sizes
3566
+ if size % self.parallel_config.tensor_parallel_size != 0
3567
+ ]
3568
+ if removed_sizes:
3569
+ logger.warning(
3570
+ "Batch sizes %s are removed because they are not "
3571
+ "multiple of tp_size %d when "
3572
+ "sequence parallelism is enabled", removed_sizes,
3573
+ self.parallel_config.tensor_parallel_size)
3574
+
3575
+ return [
3576
+ size for size in possible_sizes
3577
+ if size % self.parallel_config.tensor_parallel_size == 0
3578
+ ]
3579
+
3580
+ def _set_cudagraph_sizes(self):
3581
+ """
3582
+ vLLM defines the default candidate list of batch sizes for CUDA graph
3583
+ capture as:
3584
+
3585
+ ```python
3586
+ max_graph_size = min(max_num_seqs * 2, 512)
3587
+ # 1, 2, 4, then multiples of 8 up to max_graph_size
3588
+ cuda_graph_sizes = [1, 2, 4, 8, 16, 24, 32, 40, ..., max_graph_size]
3589
+
3590
+ In the end, `vllm_config.compilation_config.cudagraph_capture_sizes`
3591
+ will be the final sizes to capture cudagraph (in descending order).
3592
+
3593
+ These sizes are used to capture and reuse CUDA graphs for
3594
+ performance-critical paths (e.g., decoding). Capturing enables
3595
+ significantly faster kernel dispatch by avoiding Python overhead. The
3596
+ list is then filtered based on `max_num_batched_tokens` (e.g., 8192 on
3597
+ most GPUs), which controls the total allowed number of tokens in a
3598
+ batch. Since each sequence may have a variable number of tokens, the
3599
+ maximum usable batch size will depend on actual sequence lengths.
3600
+
3601
+ Example:
3602
+ With `max_num_batched_tokens = 8192`, and typical sequences
3603
+ averaging ~32 tokens, most practical batch sizes fall below 256.
3604
+ However, the system will still allow capture sizes up to 512 if
3605
+ shape and memory permit.
3606
+
3607
+ Note:
3608
+ If users explicitly specify cudagraph capture sizes in the
3609
+ compilation config, those will override this default logic.
3610
+ At runtime:
3611
+
3612
+ - If batch size <= one of the `cudagraph_capture_sizes`, the closest
3613
+ padded CUDA graph will be used.
3614
+ - If batch size > largest `cudagraph_capture_sizes`, cudagraph will
3615
+ not be used.
3616
+ """
3617
+
3618
+ # calculate the default `batch_size_capture_list`
3619
+ if not envs.VLLM_USE_V1:
3620
+ batch_size_capture_list = []
3621
+ if self.scheduler_config is not None and \
3622
+ self.model_config is not None and \
3623
+ not self.model_config.enforce_eager:
3624
+
3625
+ possible_sizes = [1, 2, 4] + [8 * i for i in range(1, 1025)]
3626
+ if self.parallel_config.tensor_parallel_size > 1 and \
3627
+ self.compilation_config.pass_config.enable_sequence_parallelism:
3628
+ possible_sizes = self.update_sizes_for_sequence_parallelism(
3629
+ possible_sizes)
3630
+
3631
+ # find the minimum size that is larger than max_num_seqs,
3632
+ # which then becomes the max_batchsize_to_capture
3633
+ larger_sizes = [
3634
+ x for x in possible_sizes
3635
+ if x >= self.scheduler_config.max_num_seqs
3636
+ ]
3637
+ if larger_sizes:
3638
+ max_batchsize_to_capture = larger_sizes[0]
3639
+ else:
3640
+ max_batchsize_to_capture = possible_sizes[-1]
3641
+
3642
+ # filter out the sizes that are
3643
+ # larger than max_batchsize_to_capture
3644
+ batch_size_capture_list = [
3645
+ size for size in possible_sizes
3646
+ if size <= max_batchsize_to_capture
3647
+ ]
3648
+ else:
3649
+ batch_size_capture_list = []
3650
+ if self.model_config is not None and \
3651
+ not self.model_config.enforce_eager:
3652
+ cuda_graph_sizes = self.scheduler_config.cuda_graph_sizes
3653
+ if len(cuda_graph_sizes) == 1:
3654
+ batch_size_capture_list = [1, 2, 4] + [
3655
+ i for i in range(8, cuda_graph_sizes[0] + 1, 8)
3656
+ ]
3657
+ elif len(cuda_graph_sizes) > 1:
3658
+ batch_size_capture_list = sorted(cuda_graph_sizes)
3659
+ else:
3660
+ raise TypeError(f"Invalid value for {cuda_graph_sizes=}.")
3661
+ if self.parallel_config.tensor_parallel_size > 1 and \
3662
+ self.compilation_config.pass_config.enable_sequence_parallelism:
3663
+ batch_size_capture_list = \
3664
+ self.update_sizes_for_sequence_parallelism(batch_size_capture_list)
3665
+ max_num_tokens = self.scheduler_config.max_num_batched_tokens
3666
+ batch_size_capture_list = [
3667
+ size for size in batch_size_capture_list
3668
+ if size <= max_num_tokens
3669
+ ]
3670
+
3671
+ self.compilation_config.init_with_cudagraph_sizes(
3672
+ batch_size_capture_list)
3673
+
3674
+ def recalculate_max_model_len(self, max_model_len: int):
3675
+ # Can only be called in try_verify_and_update_config
3676
+ model_config = self.model_config
3677
+ max_model_len = model_config.get_and_verify_max_len(max_model_len)
3678
+ self.model_config.max_model_len = max_model_len
3679
+ self.scheduler_config.max_model_len = max_model_len
3680
+
3681
+ def try_verify_and_update_config(self):
3682
+ if self.model_config is None:
3683
+ return
3684
+
3685
+ # Avoid running try_verify_and_update_config multiple times
3686
+ if getattr(self.model_config, "config_updated", False):
3687
+ return
3688
+ self.model_config.config_updated = True
3689
+
3690
+ architecture = self.model_config.architecture
3691
+ if architecture is None:
3692
+ return
3693
+
3694
+ from vllm.model_executor.models.config import (
3695
+ MODELS_CONFIG_MAP, HybridAttentionMambaModelConfig)
3696
+ cls = MODELS_CONFIG_MAP.get(architecture, None)
3697
+ if cls is not None:
3698
+ cls.verify_and_update_config(self)
3699
+
3700
+ if self.model_config.is_hybrid:
3701
+ HybridAttentionMambaModelConfig.verify_and_update_config(self)
3702
+
3703
+ if self.model_config.convert_type == "classify":
3704
+ # Maybe convert ForCausalLM into ForSequenceClassification model.
3705
+ from vllm.model_executor.models.adapters import (
3706
+ SequenceClassificationConfig)
3707
+ SequenceClassificationConfig.verify_and_update_config(self)
3708
+
3709
+ def __str__(self):
3710
+ return (
3711
+ f"model={self.model_config.model!r}, "
3712
+ f"speculative_config={self.speculative_config!r}, "
3713
+ f"tokenizer={self.model_config.tokenizer!r}, "
3714
+ f"skip_tokenizer_init={self.model_config.skip_tokenizer_init}, "
3715
+ f"tokenizer_mode={self.model_config.tokenizer_mode}, "
3716
+ f"revision={self.model_config.revision}, "
3717
+ f"tokenizer_revision={self.model_config.tokenizer_revision}, "
3718
+ f"trust_remote_code={self.model_config.trust_remote_code}, "
3719
+ f"dtype={self.model_config.dtype}, "
3720
+ f"max_seq_len={self.model_config.max_model_len}, "
3721
+ f"download_dir={self.load_config.download_dir!r}, "
3722
+ f"load_format={self.load_config.load_format}, "
3723
+ f"tensor_parallel_size={self.parallel_config.tensor_parallel_size}, " # noqa
3724
+ f"pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, " # noqa
3725
+ f"data_parallel_size={self.parallel_config.data_parallel_size}, " # noqa
3726
+ f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, " # noqa
3727
+ f"quantization={self.model_config.quantization}, "
3728
+ f"enforce_eager={self.model_config.enforce_eager}, "
3729
+ f"kv_cache_dtype={self.cache_config.cache_dtype}, "
3730
+ f"device_config={self.device_config.device}, "
3731
+ f"decoding_config={self.decoding_config!r}, "
3732
+ f"observability_config={self.observability_config!r}, "
3733
+ f"seed={self.model_config.seed}, "
3734
+ f"served_model_name={self.model_config.served_model_name}, "
3735
+ f"enable_prefix_caching={self.cache_config.enable_prefix_caching}, "
3736
+ f"chunked_prefill_enabled={self.scheduler_config.chunked_prefill_enabled}, " # noqa
3737
+ f"use_async_output_proc={self.model_config.use_async_output_proc}, "
3738
+ f"pooler_config={self.model_config.pooler_config!r}, "
3739
+ f"compilation_config={self.compilation_config!r}")
3740
+
3741
+
3742
+ _current_vllm_config: Optional[VllmConfig] = None
3743
+ _current_prefix: Optional[str] = None
3744
+
3745
+
3746
+ @contextmanager
3747
+ def set_current_vllm_config(vllm_config: VllmConfig,
3748
+ check_compile=False,
3749
+ prefix: Optional[str] = None):
3750
+ """
3751
+ Temporarily set the current vLLM config.
3752
+ Used during model initialization.
3753
+ We save the current vLLM config in a global variable,
3754
+ so that all modules can access it, e.g. custom ops
3755
+ can access the vLLM config to determine how to dispatch.
3756
+ """
3757
+ global _current_vllm_config, _current_prefix
3758
+ old_vllm_config = _current_vllm_config
3759
+ old_prefix = _current_prefix
3760
+ from vllm.compilation.counter import compilation_counter
3761
+ num_models_seen = compilation_counter.num_models_seen
3762
+ try:
3763
+ _current_vllm_config = vllm_config
3764
+ _current_prefix = prefix
3765
+ yield
3766
+ except Exception:
3767
+ raise
3768
+ else:
3769
+ logger.debug("enabled custom ops: %s",
3770
+ vllm_config.compilation_config.enabled_custom_ops)
3771
+ logger.debug("disabled custom ops: %s",
3772
+ vllm_config.compilation_config.disabled_custom_ops)
3773
+ if check_compile and \
3774
+ vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \
3775
+ and compilation_counter.num_models_seen == num_models_seen:
3776
+ # If the model supports compilation,
3777
+ # compilation_counter.num_models_seen should be increased
3778
+ # by at least 1.
3779
+ # If it is not increased, it means the model does not support
3780
+ # compilation (does not have @support_torch_compile decorator).
3781
+ logger.warning(
3782
+ "`torch.compile` is turned on, but the model %s"
3783
+ " does not support it. Please open an issue on GitHub"
3784
+ " if you want it to be supported.",
3785
+ vllm_config.model_config.model)
3786
+ finally:
3787
+ _current_vllm_config = old_vllm_config
3788
+ _current_prefix = old_prefix
3789
+ # Clear the compilation config cache when context changes
3790
+ get_cached_compilation_config.cache_clear()
3791
+
3792
+
3793
+ @lru_cache(maxsize=1)
3794
+ def get_cached_compilation_config():
3795
+ """Cache config to avoid repeated calls to get_current_vllm_config()"""
3796
+ return get_current_vllm_config().compilation_config
3797
+
3798
+
3799
+ def get_current_vllm_config() -> VllmConfig:
3800
+ if _current_vllm_config is None:
3801
+ # in ci, usually when we test custom ops/modules directly,
3802
+ # we don't set the vllm config. In that case, we set a default
3803
+ # config.
3804
+ logger.warning("Current vLLM config is not set.")
3805
+ from vllm.config import VllmConfig
3806
+ return VllmConfig()
3807
+ return _current_vllm_config
3808
+
3809
+
3810
+ def get_current_model_prefix() -> str:
3811
+ """
3812
+ Get the prefix of the model that's currently being initialized.
3813
+ """
3814
+ assert _current_prefix is not None, \
3815
+ "Current model prefix is not set. "
3816
+ return _current_prefix
3817
+
3818
+
3819
+ def contains_object_print(text):
3820
+ """
3821
+ Check if the text looks like a printed Python object, e.g.
3822
+ contains any substring matching the pattern: "at 0xFFFFFFF>"
3823
+ We match against 0x followed by 2-16 hex chars (there's
3824
+ a max of 16 on a 64-bit system).
3825
+
3826
+ Args:
3827
+ text (str): The text to check
3828
+
3829
+ Returns:
3830
+ result (bool): `True` if a match is found, `False` otherwise.
3831
+ """
3832
+ pattern = r'at 0x[a-fA-F0-9]{2,16}>'
3833
+ match = re.search(pattern, text)
3834
+ return match is not None
3835
+
3836
+
3837
+ def assert_hashable(text):
3838
+ if not contains_object_print(text):
3839
+ return True
3840
+ raise AssertionError(
3841
+ f"vLLM tried to hash some configs that may have Python objects ids "
3842
+ f"in them. This is a bug, please file an issue. "
3843
+ f"Text being hashed: {text}")
3844
+
3845
+
3846
+ T = TypeVar("T")
3847
+
3848
+
3849
+ def get_layers_from_vllm_config(
3850
+ vllm_config: VllmConfig,
3851
+ layer_type: type[T],
3852
+ layer_names: Optional[list[str]] = None) -> dict[str, T]:
3853
+ """
3854
+ Get layers from the vLLM config.
3855
+
3856
+ Args:
3857
+ vllm_config: The vLLM config.
3858
+ layer_type: The type of the layer to get.
3859
+ layer_names: The names of the layers to get. If None, return all layers.
3860
+ """
3861
+
3862
+ if layer_names is None:
3863
+ layer_names = list(
3864
+ vllm_config.compilation_config.static_forward_context.keys())
3865
+
3866
+ forward_context = vllm_config.compilation_config.static_forward_context
3867
+
3868
+ return {
3869
+ layer_name: forward_context[layer_name]
3870
+ for layer_name in layer_names
3871
+ if isinstance(forward_context[layer_name], layer_type)
3872
+ }
3873
+
3874
+
3875
+ @config
3876
+ @dataclass
3877
+ class SpeechToTextConfig:
3878
+ """Configuration for speech-to-text models."""
3879
+
3880
+ sample_rate: float = 16_000
3881
+ """Sample rate (Hz) to resample input audio to. Most speech models expect
3882
+ 16kHz audio input. The input audio will be automatically resampled to this
3883
+ rate before processing."""
3884
+
3885
+ max_audio_clip_s: int = 30
3886
+ """Maximum duration in seconds for a single audio clip without chunking.
3887
+ Audio longer than this will be split into smaller chunks if
3888
+ `allow_audio_chunking` evaluates to True, otherwise it will be rejected."""
3889
+
3890
+ overlap_chunk_second: int = 1
3891
+ """Overlap duration in seconds between consecutive audio chunks when
3892
+ splitting long audio. This helps maintain context across chunk boundaries
3893
+ and improves transcription quality at split points."""
3894
+
3895
+ min_energy_split_window_size: Optional[int] = 1600
3896
+ """Window size in samples for finding low-energy (quiet) regions to split
3897
+ audio chunks. The algorithm looks for the quietest moment within this
3898
+ window to minimize cutting through speech. Default 1600 samples ≈ 100ms
3899
+ at 16kHz. If None, no chunking will be done."""
3900
+
3901
+ @property
3902
+ def allow_audio_chunking(self) -> bool:
3903
+ return self.min_energy_split_window_size is not None
3904
+
3905
+
3906
+ def update_config(config: DataclassInstanceT,
3907
+ overrides: dict[str, Any]) -> DataclassInstanceT:
3908
+ processed_overrides = {}
3909
+ for field_name, value in overrides.items():
3910
+ assert hasattr(
3911
+ config, field_name), f"{type(config)} has no field `{field_name}`"
3912
+ current_value = getattr(config, field_name)
3913
+ if is_dataclass(current_value) and not is_dataclass(value):
3914
+ assert isinstance(value, dict), (
3915
+ f"Overrides to {type(config)}.{field_name} must be a dict"
3916
+ f" or {type(current_value)}, but got {type(value)}")
3917
+ value = update_config(
3918
+ current_value, # type: ignore[type-var]
3919
+ value)
3920
+ processed_overrides[field_name] = value
3921
+ return replace(config, **processed_overrides)