vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1398) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2044 -0
  5. vllm/_ipex_ops.py +393 -0
  6. vllm/_version.py +34 -0
  7. vllm/assets/__init__.py +0 -0
  8. vllm/assets/audio.py +45 -0
  9. vllm/assets/base.py +41 -0
  10. vllm/assets/image.py +50 -0
  11. vllm/assets/video.py +145 -0
  12. vllm/attention/__init__.py +15 -0
  13. vllm/attention/backends/__init__.py +0 -0
  14. vllm/attention/backends/abstract.py +204 -0
  15. vllm/attention/backends/utils.py +33 -0
  16. vllm/attention/layer.py +645 -0
  17. vllm/attention/layers/__init__.py +0 -0
  18. vllm/attention/layers/chunked_local_attention.py +93 -0
  19. vllm/attention/layers/cross_attention.py +162 -0
  20. vllm/attention/layers/encoder_only_attention.py +86 -0
  21. vllm/attention/ops/__init__.py +0 -0
  22. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  23. vllm/attention/ops/common.py +345 -0
  24. vllm/attention/ops/flashmla.py +192 -0
  25. vllm/attention/ops/merge_attn_states.py +43 -0
  26. vllm/attention/ops/paged_attn.py +262 -0
  27. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  28. vllm/attention/ops/prefix_prefill.py +928 -0
  29. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  30. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  31. vllm/attention/ops/triton_decode_attention.py +691 -0
  32. vllm/attention/ops/triton_flash_attention.py +984 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
  35. vllm/attention/ops/triton_unified_attention.py +894 -0
  36. vllm/attention/selector.py +245 -0
  37. vllm/attention/utils/__init__.py +0 -0
  38. vllm/attention/utils/fa_utils.py +85 -0
  39. vllm/attention/utils/kv_sharing_utils.py +33 -0
  40. vllm/beam_search.py +87 -0
  41. vllm/benchmarks/__init__.py +0 -0
  42. vllm/benchmarks/datasets.py +2723 -0
  43. vllm/benchmarks/latency.py +170 -0
  44. vllm/benchmarks/lib/__init__.py +3 -0
  45. vllm/benchmarks/lib/endpoint_request_func.py +533 -0
  46. vllm/benchmarks/lib/ready_checker.py +73 -0
  47. vllm/benchmarks/lib/utils.py +80 -0
  48. vllm/benchmarks/serve.py +1358 -0
  49. vllm/benchmarks/throughput.py +696 -0
  50. vllm/collect_env.py +823 -0
  51. vllm/compilation/__init__.py +0 -0
  52. vllm/compilation/activation_quant_fusion.py +189 -0
  53. vllm/compilation/backends.py +650 -0
  54. vllm/compilation/base_static_graph.py +56 -0
  55. vllm/compilation/collective_fusion.py +1188 -0
  56. vllm/compilation/compiler_interface.py +573 -0
  57. vllm/compilation/counter.py +47 -0
  58. vllm/compilation/cuda_graph.py +199 -0
  59. vllm/compilation/cuda_piecewise_backend.py +117 -0
  60. vllm/compilation/decorators.py +400 -0
  61. vllm/compilation/fix_functionalization.py +205 -0
  62. vllm/compilation/fusion.py +383 -0
  63. vllm/compilation/fusion_attn.py +295 -0
  64. vllm/compilation/fx_utils.py +84 -0
  65. vllm/compilation/inductor_pass.py +136 -0
  66. vllm/compilation/monitor.py +57 -0
  67. vllm/compilation/noop_elimination.py +158 -0
  68. vllm/compilation/pass_manager.py +125 -0
  69. vllm/compilation/post_cleanup.py +20 -0
  70. vllm/compilation/sequence_parallelism.py +478 -0
  71. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  72. vllm/compilation/vllm_inductor_pass.py +156 -0
  73. vllm/compilation/wrapper.py +136 -0
  74. vllm/config/__init__.py +814 -0
  75. vllm/config/cache.py +220 -0
  76. vllm/config/compilation.py +673 -0
  77. vllm/config/device.py +74 -0
  78. vllm/config/kv_events.py +50 -0
  79. vllm/config/kv_transfer.py +111 -0
  80. vllm/config/load.py +113 -0
  81. vllm/config/lora.py +132 -0
  82. vllm/config/model.py +1912 -0
  83. vllm/config/multimodal.py +129 -0
  84. vllm/config/observability.py +99 -0
  85. vllm/config/parallel.py +524 -0
  86. vllm/config/pooler.py +97 -0
  87. vllm/config/scheduler.py +287 -0
  88. vllm/config/speculative.py +568 -0
  89. vllm/config/speech_to_text.py +39 -0
  90. vllm/config/structured_outputs.py +64 -0
  91. vllm/config/utils.py +145 -0
  92. vllm/connections.py +186 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +311 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +41 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +440 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +295 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +323 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
  106. vllm/distributed/device_communicators/pynccl.py +340 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +589 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +635 -0
  113. vllm/distributed/device_communicators/symm_mem.py +136 -0
  114. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  115. vllm/distributed/device_communicators/xpu_communicator.py +94 -0
  116. vllm/distributed/eplb/__init__.py +8 -0
  117. vllm/distributed/eplb/eplb_state.py +620 -0
  118. vllm/distributed/eplb/rebalance_algo.py +239 -0
  119. vllm/distributed/eplb/rebalance_execute.py +424 -0
  120. vllm/distributed/kv_events.py +362 -0
  121. vllm/distributed/kv_transfer/README.md +29 -0
  122. vllm/distributed/kv_transfer/__init__.py +13 -0
  123. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  124. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  125. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  126. vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
  132. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
  133. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
  134. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
  135. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
  140. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  141. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  142. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  144. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  145. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  146. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  147. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  148. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  149. vllm/distributed/parallel_state.py +1532 -0
  150. vllm/distributed/tpu_distributed_utils.py +178 -0
  151. vllm/distributed/utils.py +536 -0
  152. vllm/engine/__init__.py +0 -0
  153. vllm/engine/arg_utils.py +1778 -0
  154. vllm/engine/async_llm_engine.py +6 -0
  155. vllm/engine/llm_engine.py +6 -0
  156. vllm/engine/metrics.py +577 -0
  157. vllm/engine/metrics_types.py +84 -0
  158. vllm/engine/protocol.py +333 -0
  159. vllm/entrypoints/__init__.py +0 -0
  160. vllm/entrypoints/api_server.py +178 -0
  161. vllm/entrypoints/chat_utils.py +1705 -0
  162. vllm/entrypoints/cli/__init__.py +12 -0
  163. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  164. vllm/entrypoints/cli/benchmark/base.py +25 -0
  165. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  166. vllm/entrypoints/cli/benchmark/main.py +55 -0
  167. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  168. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  169. vllm/entrypoints/cli/collect_env.py +36 -0
  170. vllm/entrypoints/cli/main.py +60 -0
  171. vllm/entrypoints/cli/openai.py +233 -0
  172. vllm/entrypoints/cli/run_batch.py +67 -0
  173. vllm/entrypoints/cli/serve.py +232 -0
  174. vllm/entrypoints/cli/types.py +29 -0
  175. vllm/entrypoints/constants.py +10 -0
  176. vllm/entrypoints/context.py +481 -0
  177. vllm/entrypoints/harmony_utils.py +436 -0
  178. vllm/entrypoints/launcher.py +164 -0
  179. vllm/entrypoints/llm.py +1629 -0
  180. vllm/entrypoints/logger.py +79 -0
  181. vllm/entrypoints/openai/__init__.py +0 -0
  182. vllm/entrypoints/openai/api_server.py +1953 -0
  183. vllm/entrypoints/openai/cli_args.py +288 -0
  184. vllm/entrypoints/openai/logits_processors.py +90 -0
  185. vllm/entrypoints/openai/protocol.py +2757 -0
  186. vllm/entrypoints/openai/run_batch.py +491 -0
  187. vllm/entrypoints/openai/serving_chat.py +1597 -0
  188. vllm/entrypoints/openai/serving_classification.py +173 -0
  189. vllm/entrypoints/openai/serving_completion.py +692 -0
  190. vllm/entrypoints/openai/serving_embedding.py +631 -0
  191. vllm/entrypoints/openai/serving_engine.py +992 -0
  192. vllm/entrypoints/openai/serving_models.py +288 -0
  193. vllm/entrypoints/openai/serving_pooling.py +276 -0
  194. vllm/entrypoints/openai/serving_responses.py +1709 -0
  195. vllm/entrypoints/openai/serving_score.py +479 -0
  196. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  197. vllm/entrypoints/openai/serving_transcription.py +136 -0
  198. vllm/entrypoints/openai/speech_to_text.py +388 -0
  199. vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
  200. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  201. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  202. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  203. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  204. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  205. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  206. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
  207. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  208. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  209. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  210. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  211. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  212. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  213. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
  214. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  216. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
  217. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  218. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  219. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  220. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
  221. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  222. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  223. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  224. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  225. vllm/entrypoints/renderer.py +395 -0
  226. vllm/entrypoints/score_utils.py +232 -0
  227. vllm/entrypoints/ssl.py +75 -0
  228. vllm/entrypoints/tool.py +139 -0
  229. vllm/entrypoints/tool_server.py +206 -0
  230. vllm/entrypoints/utils.py +233 -0
  231. vllm/env_override.py +23 -0
  232. vllm/envs.py +1590 -0
  233. vllm/executor/__init__.py +0 -0
  234. vllm/executor/executor_base.py +381 -0
  235. vllm/executor/msgspec_utils.py +35 -0
  236. vllm/executor/ray_distributed_executor.py +699 -0
  237. vllm/executor/ray_utils.py +410 -0
  238. vllm/executor/uniproc_executor.py +176 -0
  239. vllm/forward_context.py +402 -0
  240. vllm/inputs/__init__.py +30 -0
  241. vllm/inputs/data.py +356 -0
  242. vllm/inputs/parse.py +151 -0
  243. vllm/inputs/preprocess.py +664 -0
  244. vllm/logger.py +229 -0
  245. vllm/logging_utils/__init__.py +10 -0
  246. vllm/logging_utils/dump_input.py +81 -0
  247. vllm/logging_utils/formatter.py +79 -0
  248. vllm/logging_utils/log_time.py +32 -0
  249. vllm/logits_process.py +119 -0
  250. vllm/logprobs.py +28 -0
  251. vllm/lora/__init__.py +0 -0
  252. vllm/lora/layers/__init__.py +34 -0
  253. vllm/lora/layers/base.py +69 -0
  254. vllm/lora/layers/base_linear.py +185 -0
  255. vllm/lora/layers/column_parallel_linear.py +609 -0
  256. vllm/lora/layers/logits_processor.py +247 -0
  257. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  258. vllm/lora/layers/replicated_linear.py +60 -0
  259. vllm/lora/layers/row_parallel_linear.py +196 -0
  260. vllm/lora/layers/utils.py +65 -0
  261. vllm/lora/layers/vocal_parallel_embedding.py +174 -0
  262. vllm/lora/lora_weights.py +199 -0
  263. vllm/lora/models.py +816 -0
  264. vllm/lora/ops/__init__.py +0 -0
  265. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  266. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  267. vllm/lora/ops/torch_ops/__init__.py +16 -0
  268. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  269. vllm/lora/ops/triton_ops/__init__.py +12 -0
  270. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  271. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  272. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  273. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  274. vllm/lora/ops/triton_ops/utils.py +126 -0
  275. vllm/lora/ops/xla_ops/__init__.py +7 -0
  276. vllm/lora/ops/xla_ops/lora_ops.py +144 -0
  277. vllm/lora/peft_helper.py +127 -0
  278. vllm/lora/punica_wrapper/__init__.py +10 -0
  279. vllm/lora/punica_wrapper/punica_base.py +458 -0
  280. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  281. vllm/lora/punica_wrapper/punica_gpu.py +272 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  284. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  285. vllm/lora/punica_wrapper/utils.py +136 -0
  286. vllm/lora/request.py +97 -0
  287. vllm/lora/resolver.py +85 -0
  288. vllm/lora/utils.py +246 -0
  289. vllm/lora/worker_manager.py +267 -0
  290. vllm/model_executor/__init__.py +12 -0
  291. vllm/model_executor/custom_op.py +194 -0
  292. vllm/model_executor/layers/__init__.py +0 -0
  293. vllm/model_executor/layers/activation.py +575 -0
  294. vllm/model_executor/layers/attention_layer_base.py +23 -0
  295. vllm/model_executor/layers/fla/__init__.py +8 -0
  296. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  297. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  298. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  299. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  300. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  301. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  302. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  303. vllm/model_executor/layers/fla/ops/index.py +39 -0
  304. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  305. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  306. vllm/model_executor/layers/fla/ops/op.py +39 -0
  307. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  308. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  309. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  310. vllm/model_executor/layers/fused_moe/__init__.py +89 -0
  311. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
  312. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
  313. vllm/model_executor/layers/fused_moe/config.py +804 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  545. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
  546. vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
  547. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
  548. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  549. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
  550. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
  551. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
  552. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
  553. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
  554. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
  555. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
  556. vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
  557. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
  558. vllm/model_executor/layers/fused_moe/layer.py +2195 -0
  559. vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
  560. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  561. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  562. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  563. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  564. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
  565. vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
  566. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
  567. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  568. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  569. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
  570. vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
  571. vllm/model_executor/layers/fused_moe/utils.py +274 -0
  572. vllm/model_executor/layers/layernorm.py +395 -0
  573. vllm/model_executor/layers/lightning_attn.py +661 -0
  574. vllm/model_executor/layers/linear.py +1603 -0
  575. vllm/model_executor/layers/logits_processor.py +106 -0
  576. vllm/model_executor/layers/mamba/__init__.py +0 -0
  577. vllm/model_executor/layers/mamba/abstract.py +42 -0
  578. vllm/model_executor/layers/mamba/linear_attn.py +403 -0
  579. vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
  580. vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
  581. vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
  582. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  583. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
  584. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  585. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  586. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
  587. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
  588. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
  589. vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
  590. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
  591. vllm/model_executor/layers/mamba/short_conv.py +253 -0
  592. vllm/model_executor/layers/mla.py +173 -0
  593. vllm/model_executor/layers/pooler.py +719 -0
  594. vllm/model_executor/layers/quantization/__init__.py +157 -0
  595. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  596. vllm/model_executor/layers/quantization/awq.py +228 -0
  597. vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
  598. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  599. vllm/model_executor/layers/quantization/base_config.py +170 -0
  600. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  601. vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
  602. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  603. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
  604. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
  605. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  606. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  607. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  608. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  609. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  610. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
  611. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  612. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  613. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  614. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
  615. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  616. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  625. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  626. vllm/model_executor/layers/quantization/experts_int8.py +223 -0
  627. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  628. vllm/model_executor/layers/quantization/fp8.py +1098 -0
  629. vllm/model_executor/layers/quantization/gguf.py +599 -0
  630. vllm/model_executor/layers/quantization/gptq.py +340 -0
  631. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  632. vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
  633. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  634. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  635. vllm/model_executor/layers/quantization/inc.py +61 -0
  636. vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
  637. vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
  638. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  639. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  640. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  641. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  642. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  643. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  644. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  645. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  646. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  647. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  648. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  649. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  650. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  651. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
  652. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  653. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  654. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  655. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  656. vllm/model_executor/layers/quantization/kv_cache.py +143 -0
  657. vllm/model_executor/layers/quantization/modelopt.py +1596 -0
  658. vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
  659. vllm/model_executor/layers/quantization/mxfp4.py +988 -0
  660. vllm/model_executor/layers/quantization/petit.py +306 -0
  661. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  662. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  663. vllm/model_executor/layers/quantization/quark/quark.py +432 -0
  664. vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
  665. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  666. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  667. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
  668. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  669. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  670. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  671. vllm/model_executor/layers/quantization/rtn.py +466 -0
  672. vllm/model_executor/layers/quantization/schema.py +86 -0
  673. vllm/model_executor/layers/quantization/torchao.py +214 -0
  674. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  675. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  676. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  677. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  889. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
  890. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
  891. vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
  892. vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
  893. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  894. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  895. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  896. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  897. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  898. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  899. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  900. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  901. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
  902. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  903. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  904. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  905. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  906. vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
  907. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  908. vllm/model_executor/layers/resampler.py +270 -0
  909. vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
  910. vllm/model_executor/layers/rotary_embedding/base.py +177 -0
  911. vllm/model_executor/layers/rotary_embedding/common.py +150 -0
  912. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
  913. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  914. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  915. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  916. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  917. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  918. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  919. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  920. vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
  921. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  922. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  923. vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
  924. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  925. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  926. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  927. vllm/model_executor/layers/utils.py +195 -0
  928. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  929. vllm/model_executor/model_loader/__init__.py +138 -0
  930. vllm/model_executor/model_loader/base_loader.py +52 -0
  931. vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
  932. vllm/model_executor/model_loader/default_loader.py +277 -0
  933. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  934. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  935. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  936. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  937. vllm/model_executor/model_loader/tensorizer.py +738 -0
  938. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  939. vllm/model_executor/model_loader/tpu.py +114 -0
  940. vllm/model_executor/model_loader/utils.py +292 -0
  941. vllm/model_executor/model_loader/weight_utils.py +990 -0
  942. vllm/model_executor/models/__init__.py +33 -0
  943. vllm/model_executor/models/adapters.py +542 -0
  944. vllm/model_executor/models/aimv2.py +246 -0
  945. vllm/model_executor/models/apertus.py +579 -0
  946. vllm/model_executor/models/arcee.py +422 -0
  947. vllm/model_executor/models/arctic.py +558 -0
  948. vllm/model_executor/models/aria.py +650 -0
  949. vllm/model_executor/models/aya_vision.py +468 -0
  950. vllm/model_executor/models/baichuan.py +474 -0
  951. vllm/model_executor/models/bailing_moe.py +642 -0
  952. vllm/model_executor/models/bamba.py +514 -0
  953. vllm/model_executor/models/bert.py +665 -0
  954. vllm/model_executor/models/bert_with_rope.py +687 -0
  955. vllm/model_executor/models/blip.py +339 -0
  956. vllm/model_executor/models/blip2.py +712 -0
  957. vllm/model_executor/models/bloom.py +374 -0
  958. vllm/model_executor/models/chameleon.py +1139 -0
  959. vllm/model_executor/models/chatglm.py +476 -0
  960. vllm/model_executor/models/clip.py +407 -0
  961. vllm/model_executor/models/cohere2_vision.py +481 -0
  962. vllm/model_executor/models/commandr.py +465 -0
  963. vllm/model_executor/models/config.py +445 -0
  964. vllm/model_executor/models/dbrx.py +471 -0
  965. vllm/model_executor/models/deepseek.py +497 -0
  966. vllm/model_executor/models/deepseek_eagle.py +240 -0
  967. vllm/model_executor/models/deepseek_mtp.py +289 -0
  968. vllm/model_executor/models/deepseek_v2.py +1444 -0
  969. vllm/model_executor/models/deepseek_vl2.py +658 -0
  970. vllm/model_executor/models/dots1.py +546 -0
  971. vllm/model_executor/models/dots_ocr.py +873 -0
  972. vllm/model_executor/models/ernie45.py +43 -0
  973. vllm/model_executor/models/ernie45_moe.py +607 -0
  974. vllm/model_executor/models/ernie45_vl.py +1527 -0
  975. vllm/model_executor/models/ernie45_vl_moe.py +727 -0
  976. vllm/model_executor/models/ernie_mtp.py +268 -0
  977. vllm/model_executor/models/exaone.py +550 -0
  978. vllm/model_executor/models/exaone4.py +533 -0
  979. vllm/model_executor/models/fairseq2_llama.py +154 -0
  980. vllm/model_executor/models/falcon.py +509 -0
  981. vllm/model_executor/models/falcon_h1.py +674 -0
  982. vllm/model_executor/models/fuyu.py +399 -0
  983. vllm/model_executor/models/gemma.py +425 -0
  984. vllm/model_executor/models/gemma2.py +422 -0
  985. vllm/model_executor/models/gemma3.py +555 -0
  986. vllm/model_executor/models/gemma3_mm.py +721 -0
  987. vllm/model_executor/models/gemma3n.py +1113 -0
  988. vllm/model_executor/models/gemma3n_mm.py +761 -0
  989. vllm/model_executor/models/glm.py +23 -0
  990. vllm/model_executor/models/glm4.py +304 -0
  991. vllm/model_executor/models/glm4_1v.py +1690 -0
  992. vllm/model_executor/models/glm4_moe.py +727 -0
  993. vllm/model_executor/models/glm4_moe_mtp.py +301 -0
  994. vllm/model_executor/models/glm4v.py +654 -0
  995. vllm/model_executor/models/gpt2.py +380 -0
  996. vllm/model_executor/models/gpt_bigcode.py +344 -0
  997. vllm/model_executor/models/gpt_j.py +339 -0
  998. vllm/model_executor/models/gpt_neox.py +330 -0
  999. vllm/model_executor/models/gpt_oss.py +712 -0
  1000. vllm/model_executor/models/granite.py +489 -0
  1001. vllm/model_executor/models/granite_speech.py +794 -0
  1002. vllm/model_executor/models/granitemoe.py +550 -0
  1003. vllm/model_executor/models/granitemoehybrid.py +614 -0
  1004. vllm/model_executor/models/granitemoeshared.py +332 -0
  1005. vllm/model_executor/models/gritlm.py +262 -0
  1006. vllm/model_executor/models/grok1.py +547 -0
  1007. vllm/model_executor/models/h2ovl.py +536 -0
  1008. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1009. vllm/model_executor/models/hyperclovax_vision.py +1192 -0
  1010. vllm/model_executor/models/idefics2_vision_model.py +417 -0
  1011. vllm/model_executor/models/idefics3.py +756 -0
  1012. vllm/model_executor/models/interfaces.py +959 -0
  1013. vllm/model_executor/models/interfaces_base.py +192 -0
  1014. vllm/model_executor/models/intern_vit.py +441 -0
  1015. vllm/model_executor/models/internlm2.py +450 -0
  1016. vllm/model_executor/models/internlm2_ve.py +148 -0
  1017. vllm/model_executor/models/interns1.py +838 -0
  1018. vllm/model_executor/models/interns1_vit.py +418 -0
  1019. vllm/model_executor/models/internvl.py +1423 -0
  1020. vllm/model_executor/models/jais.py +373 -0
  1021. vllm/model_executor/models/jamba.py +591 -0
  1022. vllm/model_executor/models/jina_vl.py +144 -0
  1023. vllm/model_executor/models/keye.py +1680 -0
  1024. vllm/model_executor/models/keye_vl1_5.py +602 -0
  1025. vllm/model_executor/models/kimi_vl.py +618 -0
  1026. vllm/model_executor/models/lfm2.py +548 -0
  1027. vllm/model_executor/models/llama.py +669 -0
  1028. vllm/model_executor/models/llama4.py +746 -0
  1029. vllm/model_executor/models/llama4_eagle.py +239 -0
  1030. vllm/model_executor/models/llama_eagle.py +179 -0
  1031. vllm/model_executor/models/llama_eagle3.py +296 -0
  1032. vllm/model_executor/models/llava.py +870 -0
  1033. vllm/model_executor/models/llava_next.py +571 -0
  1034. vllm/model_executor/models/llava_next_video.py +476 -0
  1035. vllm/model_executor/models/llava_onevision.py +942 -0
  1036. vllm/model_executor/models/longcat_flash.py +715 -0
  1037. vllm/model_executor/models/longcat_flash_mtp.py +352 -0
  1038. vllm/model_executor/models/mamba.py +275 -0
  1039. vllm/model_executor/models/mamba2.py +291 -0
  1040. vllm/model_executor/models/medusa.py +169 -0
  1041. vllm/model_executor/models/midashenglm.py +792 -0
  1042. vllm/model_executor/models/mimo.py +188 -0
  1043. vllm/model_executor/models/mimo_mtp.py +280 -0
  1044. vllm/model_executor/models/minicpm.py +631 -0
  1045. vllm/model_executor/models/minicpm3.py +230 -0
  1046. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1047. vllm/model_executor/models/minicpmo.py +770 -0
  1048. vllm/model_executor/models/minicpmv.py +1784 -0
  1049. vllm/model_executor/models/minimax_text_01.py +986 -0
  1050. vllm/model_executor/models/minimax_vl_01.py +426 -0
  1051. vllm/model_executor/models/mistral3.py +628 -0
  1052. vllm/model_executor/models/mixtral.py +606 -0
  1053. vllm/model_executor/models/mllama4.py +1076 -0
  1054. vllm/model_executor/models/mlp_speculator.py +206 -0
  1055. vllm/model_executor/models/modernbert.py +374 -0
  1056. vllm/model_executor/models/module_mapping.py +72 -0
  1057. vllm/model_executor/models/molmo.py +1567 -0
  1058. vllm/model_executor/models/moonvit.py +673 -0
  1059. vllm/model_executor/models/motif.py +345 -0
  1060. vllm/model_executor/models/mpt.py +329 -0
  1061. vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
  1062. vllm/model_executor/models/nemotron.py +507 -0
  1063. vllm/model_executor/models/nemotron_h.py +565 -0
  1064. vllm/model_executor/models/nemotron_nas.py +481 -0
  1065. vllm/model_executor/models/nemotron_vl.py +652 -0
  1066. vllm/model_executor/models/nvlm_d.py +203 -0
  1067. vllm/model_executor/models/olmo.py +404 -0
  1068. vllm/model_executor/models/olmo2.py +439 -0
  1069. vllm/model_executor/models/olmoe.py +483 -0
  1070. vllm/model_executor/models/opt.py +412 -0
  1071. vllm/model_executor/models/orion.py +348 -0
  1072. vllm/model_executor/models/ovis.py +559 -0
  1073. vllm/model_executor/models/ovis2_5.py +642 -0
  1074. vllm/model_executor/models/paligemma.py +411 -0
  1075. vllm/model_executor/models/persimmon.py +343 -0
  1076. vllm/model_executor/models/phi.py +356 -0
  1077. vllm/model_executor/models/phi3.py +19 -0
  1078. vllm/model_executor/models/phi3v.py +698 -0
  1079. vllm/model_executor/models/phi4_multimodal.py +1475 -0
  1080. vllm/model_executor/models/phi4mm.py +1279 -0
  1081. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1082. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1083. vllm/model_executor/models/phimoe.py +679 -0
  1084. vllm/model_executor/models/pixtral.py +1345 -0
  1085. vllm/model_executor/models/plamo2.py +978 -0
  1086. vllm/model_executor/models/qwen.py +361 -0
  1087. vllm/model_executor/models/qwen2.py +523 -0
  1088. vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
  1089. vllm/model_executor/models/qwen2_5_vl.py +1481 -0
  1090. vllm/model_executor/models/qwen2_audio.py +489 -0
  1091. vllm/model_executor/models/qwen2_moe.py +558 -0
  1092. vllm/model_executor/models/qwen2_rm.py +122 -0
  1093. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1094. vllm/model_executor/models/qwen3.py +341 -0
  1095. vllm/model_executor/models/qwen3_moe.py +692 -0
  1096. vllm/model_executor/models/qwen3_next.py +1266 -0
  1097. vllm/model_executor/models/qwen3_next_mtp.py +281 -0
  1098. vllm/model_executor/models/qwen3_vl.py +1613 -0
  1099. vllm/model_executor/models/qwen3_vl_moe.py +358 -0
  1100. vllm/model_executor/models/qwen_vl.py +795 -0
  1101. vllm/model_executor/models/radio.py +576 -0
  1102. vllm/model_executor/models/registry.py +990 -0
  1103. vllm/model_executor/models/roberta.py +252 -0
  1104. vllm/model_executor/models/rvl.py +103 -0
  1105. vllm/model_executor/models/seed_oss.py +485 -0
  1106. vllm/model_executor/models/siglip.py +540 -0
  1107. vllm/model_executor/models/siglip2navit.py +689 -0
  1108. vllm/model_executor/models/skyworkr1v.py +911 -0
  1109. vllm/model_executor/models/smolvlm.py +44 -0
  1110. vllm/model_executor/models/solar.py +504 -0
  1111. vllm/model_executor/models/stablelm.py +341 -0
  1112. vllm/model_executor/models/starcoder2.py +354 -0
  1113. vllm/model_executor/models/step3_text.py +510 -0
  1114. vllm/model_executor/models/step3_vl.py +1072 -0
  1115. vllm/model_executor/models/swin.py +475 -0
  1116. vllm/model_executor/models/tarsier.py +639 -0
  1117. vllm/model_executor/models/telechat2.py +151 -0
  1118. vllm/model_executor/models/teleflm.py +79 -0
  1119. vllm/model_executor/models/terratorch.py +294 -0
  1120. vllm/model_executor/models/transformers.py +948 -0
  1121. vllm/model_executor/models/ultravox.py +654 -0
  1122. vllm/model_executor/models/utils.py +808 -0
  1123. vllm/model_executor/models/vision.py +404 -0
  1124. vllm/model_executor/models/voxtral.py +786 -0
  1125. vllm/model_executor/models/whisper.py +963 -0
  1126. vllm/model_executor/models/zamba2.py +960 -0
  1127. vllm/model_executor/parameter.py +620 -0
  1128. vllm/model_executor/utils.py +86 -0
  1129. vllm/model_executor/warmup/__init__.py +0 -0
  1130. vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
  1131. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1132. vllm/multimodal/__init__.py +33 -0
  1133. vllm/multimodal/audio.py +116 -0
  1134. vllm/multimodal/base.py +27 -0
  1135. vllm/multimodal/cache.py +697 -0
  1136. vllm/multimodal/evs.py +273 -0
  1137. vllm/multimodal/hasher.py +102 -0
  1138. vllm/multimodal/image.py +130 -0
  1139. vllm/multimodal/inputs.py +987 -0
  1140. vllm/multimodal/parse.py +511 -0
  1141. vllm/multimodal/processing.py +2148 -0
  1142. vllm/multimodal/profiling.py +284 -0
  1143. vllm/multimodal/registry.py +345 -0
  1144. vllm/multimodal/utils.py +503 -0
  1145. vllm/multimodal/video.py +319 -0
  1146. vllm/outputs.py +324 -0
  1147. vllm/platforms/__init__.py +263 -0
  1148. vllm/platforms/cpu.py +340 -0
  1149. vllm/platforms/cuda.py +668 -0
  1150. vllm/platforms/interface.py +620 -0
  1151. vllm/platforms/rocm.py +497 -0
  1152. vllm/platforms/tpu.py +233 -0
  1153. vllm/platforms/xpu.py +243 -0
  1154. vllm/plugins/__init__.py +72 -0
  1155. vllm/plugins/io_processors/__init__.py +68 -0
  1156. vllm/plugins/io_processors/interface.py +67 -0
  1157. vllm/plugins/lora_resolvers/README.md +16 -0
  1158. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1159. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1160. vllm/pooling_params.py +191 -0
  1161. vllm/profiler/__init__.py +0 -0
  1162. vllm/profiler/layerwise_profile.py +375 -0
  1163. vllm/profiler/utils.py +148 -0
  1164. vllm/py.typed +2 -0
  1165. vllm/ray/__init__.py +0 -0
  1166. vllm/ray/lazy_utils.py +22 -0
  1167. vllm/ray/ray_env.py +72 -0
  1168. vllm/reasoning/__init__.py +29 -0
  1169. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1170. vllm/reasoning/basic_parsers.py +156 -0
  1171. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1172. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1173. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1174. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1175. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1176. vllm/reasoning/mistral_reasoning_parser.py +56 -0
  1177. vllm/reasoning/qwen3_reasoning_parser.py +72 -0
  1178. vllm/reasoning/seedoss_reasoning_parser.py +28 -0
  1179. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1180. vllm/sampling_params.py +593 -0
  1181. vllm/scalar_type.py +349 -0
  1182. vllm/scripts.py +15 -0
  1183. vllm/sequence.py +103 -0
  1184. vllm/tasks.py +11 -0
  1185. vllm/test_utils.py +129 -0
  1186. vllm/third_party/__init__.py +0 -0
  1187. vllm/third_party/pynvml.py +6140 -0
  1188. vllm/tracing.py +136 -0
  1189. vllm/transformers_utils/__init__.py +24 -0
  1190. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1191. vllm/transformers_utils/chat_templates/registry.py +70 -0
  1192. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1193. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1194. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1195. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1196. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1197. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1198. vllm/transformers_utils/config.py +1102 -0
  1199. vllm/transformers_utils/config_parser_base.py +20 -0
  1200. vllm/transformers_utils/configs/__init__.py +63 -0
  1201. vllm/transformers_utils/configs/arctic.py +207 -0
  1202. vllm/transformers_utils/configs/chatglm.py +72 -0
  1203. vllm/transformers_utils/configs/deepseek_v3.py +101 -0
  1204. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1205. vllm/transformers_utils/configs/dotsocr.py +69 -0
  1206. vllm/transformers_utils/configs/eagle.py +84 -0
  1207. vllm/transformers_utils/configs/falcon.py +90 -0
  1208. vllm/transformers_utils/configs/jais.py +237 -0
  1209. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1210. vllm/transformers_utils/configs/medusa.py +63 -0
  1211. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1212. vllm/transformers_utils/configs/mistral.py +165 -0
  1213. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1214. vllm/transformers_utils/configs/moonvit.py +33 -0
  1215. vllm/transformers_utils/configs/nemotron.py +205 -0
  1216. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1217. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1218. vllm/transformers_utils/configs/olmo3.py +80 -0
  1219. vllm/transformers_utils/configs/ovis.py +176 -0
  1220. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1221. vllm/transformers_utils/configs/radio.py +91 -0
  1222. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1223. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1224. vllm/transformers_utils/configs/speculators/base.py +111 -0
  1225. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1226. vllm/transformers_utils/configs/ultravox.py +116 -0
  1227. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1228. vllm/transformers_utils/dynamic_module.py +60 -0
  1229. vllm/transformers_utils/processor.py +299 -0
  1230. vllm/transformers_utils/processors/__init__.py +16 -0
  1231. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1232. vllm/transformers_utils/processors/ovis.py +420 -0
  1233. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1234. vllm/transformers_utils/runai_utils.py +104 -0
  1235. vllm/transformers_utils/s3_utils.py +93 -0
  1236. vllm/transformers_utils/tokenizer.py +292 -0
  1237. vllm/transformers_utils/tokenizer_base.py +154 -0
  1238. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1239. vllm/transformers_utils/tokenizers/mistral.py +521 -0
  1240. vllm/transformers_utils/utils.py +108 -0
  1241. vllm/triton_utils/__init__.py +16 -0
  1242. vllm/triton_utils/importing.py +96 -0
  1243. vllm/usage/__init__.py +0 -0
  1244. vllm/usage/usage_lib.py +259 -0
  1245. vllm/utils/__init__.py +3566 -0
  1246. vllm/utils/deep_gemm.py +319 -0
  1247. vllm/utils/flashinfer.py +443 -0
  1248. vllm/utils/jsontree.py +178 -0
  1249. vllm/utils/tensor_schema.py +235 -0
  1250. vllm/v1/__init__.py +0 -0
  1251. vllm/v1/attention/__init__.py +0 -0
  1252. vllm/v1/attention/backends/__init__.py +0 -0
  1253. vllm/v1/attention/backends/cpu_attn.py +919 -0
  1254. vllm/v1/attention/backends/flash_attn.py +795 -0
  1255. vllm/v1/attention/backends/flashinfer.py +1181 -0
  1256. vllm/v1/attention/backends/flex_attention.py +861 -0
  1257. vllm/v1/attention/backends/gdn_attn.py +332 -0
  1258. vllm/v1/attention/backends/linear_attn.py +67 -0
  1259. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1260. vllm/v1/attention/backends/mamba2_attn.py +232 -0
  1261. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1262. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1263. vllm/v1/attention/backends/mla/common.py +1783 -0
  1264. vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
  1265. vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
  1266. vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
  1267. vllm/v1/attention/backends/mla/flashmla.py +203 -0
  1268. vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
  1269. vllm/v1/attention/backends/mla/indexer.py +342 -0
  1270. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1271. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1272. vllm/v1/attention/backends/pallas.py +409 -0
  1273. vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
  1274. vllm/v1/attention/backends/rocm_attn.py +426 -0
  1275. vllm/v1/attention/backends/short_conv_attn.py +94 -0
  1276. vllm/v1/attention/backends/tree_attn.py +451 -0
  1277. vllm/v1/attention/backends/triton_attn.py +361 -0
  1278. vllm/v1/attention/backends/utils.py +990 -0
  1279. vllm/v1/attention/backends/xformers.py +438 -0
  1280. vllm/v1/core/__init__.py +0 -0
  1281. vllm/v1/core/block_pool.py +416 -0
  1282. vllm/v1/core/encoder_cache_manager.py +333 -0
  1283. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1284. vllm/v1/core/kv_cache_manager.py +399 -0
  1285. vllm/v1/core/kv_cache_utils.py +1291 -0
  1286. vllm/v1/core/sched/__init__.py +0 -0
  1287. vllm/v1/core/sched/async_scheduler.py +47 -0
  1288. vllm/v1/core/sched/interface.py +158 -0
  1289. vllm/v1/core/sched/output.py +166 -0
  1290. vllm/v1/core/sched/request_queue.py +224 -0
  1291. vllm/v1/core/sched/scheduler.py +1296 -0
  1292. vllm/v1/core/sched/utils.py +69 -0
  1293. vllm/v1/core/single_type_kv_cache_manager.py +671 -0
  1294. vllm/v1/cudagraph_dispatcher.py +125 -0
  1295. vllm/v1/engine/__init__.py +203 -0
  1296. vllm/v1/engine/async_llm.py +742 -0
  1297. vllm/v1/engine/coordinator.py +357 -0
  1298. vllm/v1/engine/core.py +1235 -0
  1299. vllm/v1/engine/core_client.py +1334 -0
  1300. vllm/v1/engine/detokenizer.py +349 -0
  1301. vllm/v1/engine/exceptions.py +17 -0
  1302. vllm/v1/engine/llm_engine.py +370 -0
  1303. vllm/v1/engine/logprobs.py +201 -0
  1304. vllm/v1/engine/output_processor.py +576 -0
  1305. vllm/v1/engine/parallel_sampling.py +133 -0
  1306. vllm/v1/engine/processor.py +545 -0
  1307. vllm/v1/engine/utils.py +860 -0
  1308. vllm/v1/executor/__init__.py +0 -0
  1309. vllm/v1/executor/abstract.py +137 -0
  1310. vllm/v1/executor/multiproc_executor.py +726 -0
  1311. vllm/v1/executor/ray_distributed_executor.py +108 -0
  1312. vllm/v1/executor/utils.py +23 -0
  1313. vllm/v1/kv_cache_interface.py +375 -0
  1314. vllm/v1/kv_offload/__init__.py +0 -0
  1315. vllm/v1/kv_offload/abstract.py +165 -0
  1316. vllm/v1/kv_offload/backend.py +96 -0
  1317. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1318. vllm/v1/kv_offload/backends/cpu.py +61 -0
  1319. vllm/v1/kv_offload/cpu.py +75 -0
  1320. vllm/v1/kv_offload/factory.py +56 -0
  1321. vllm/v1/kv_offload/lru_manager.py +132 -0
  1322. vllm/v1/kv_offload/mediums.py +39 -0
  1323. vllm/v1/kv_offload/spec.py +61 -0
  1324. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1325. vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
  1326. vllm/v1/kv_offload/worker/worker.py +142 -0
  1327. vllm/v1/metrics/__init__.py +0 -0
  1328. vllm/v1/metrics/loggers.py +741 -0
  1329. vllm/v1/metrics/prometheus.py +82 -0
  1330. vllm/v1/metrics/ray_wrappers.py +152 -0
  1331. vllm/v1/metrics/reader.py +246 -0
  1332. vllm/v1/metrics/stats.py +257 -0
  1333. vllm/v1/outputs.py +161 -0
  1334. vllm/v1/pool/__init__.py +0 -0
  1335. vllm/v1/pool/metadata.py +77 -0
  1336. vllm/v1/request.py +241 -0
  1337. vllm/v1/sample/__init__.py +0 -0
  1338. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1339. vllm/v1/sample/logits_processor/builtin.py +275 -0
  1340. vllm/v1/sample/logits_processor/interface.py +97 -0
  1341. vllm/v1/sample/logits_processor/state.py +161 -0
  1342. vllm/v1/sample/metadata.py +43 -0
  1343. vllm/v1/sample/ops/__init__.py +0 -0
  1344. vllm/v1/sample/ops/bad_words.py +39 -0
  1345. vllm/v1/sample/ops/logprobs.py +26 -0
  1346. vllm/v1/sample/ops/penalties.py +43 -0
  1347. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1348. vllm/v1/sample/rejection_sampler.py +623 -0
  1349. vllm/v1/sample/sampler.py +285 -0
  1350. vllm/v1/sample/tpu/__init__.py +0 -0
  1351. vllm/v1/sample/tpu/metadata.py +124 -0
  1352. vllm/v1/sample/tpu/sampler.py +213 -0
  1353. vllm/v1/serial_utils.py +423 -0
  1354. vllm/v1/spec_decode/__init__.py +0 -0
  1355. vllm/v1/spec_decode/eagle.py +1011 -0
  1356. vllm/v1/spec_decode/medusa.py +66 -0
  1357. vllm/v1/spec_decode/metadata.py +62 -0
  1358. vllm/v1/spec_decode/metrics.py +211 -0
  1359. vllm/v1/spec_decode/ngram_proposer.py +276 -0
  1360. vllm/v1/spec_decode/utils.py +14 -0
  1361. vllm/v1/structured_output/__init__.py +295 -0
  1362. vllm/v1/structured_output/backend_guidance.py +245 -0
  1363. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1364. vllm/v1/structured_output/backend_outlines.py +320 -0
  1365. vllm/v1/structured_output/backend_types.py +134 -0
  1366. vllm/v1/structured_output/backend_xgrammar.py +327 -0
  1367. vllm/v1/structured_output/request.py +86 -0
  1368. vllm/v1/structured_output/utils.py +454 -0
  1369. vllm/v1/utils.py +396 -0
  1370. vllm/v1/worker/__init__.py +0 -0
  1371. vllm/v1/worker/block_table.py +210 -0
  1372. vllm/v1/worker/cpu_model_runner.py +175 -0
  1373. vllm/v1/worker/cpu_worker.py +156 -0
  1374. vllm/v1/worker/gpu_input_batch.py +863 -0
  1375. vllm/v1/worker/gpu_model_runner.py +4160 -0
  1376. vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
  1377. vllm/v1/worker/gpu_worker.py +710 -0
  1378. vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
  1379. vllm/v1/worker/lora_model_runner_mixin.py +183 -0
  1380. vllm/v1/worker/tpu_input_batch.py +587 -0
  1381. vllm/v1/worker/tpu_model_runner.py +1946 -0
  1382. vllm/v1/worker/tpu_worker.py +346 -0
  1383. vllm/v1/worker/ubatch_splitting.py +192 -0
  1384. vllm/v1/worker/ubatch_utils.py +27 -0
  1385. vllm/v1/worker/ubatching.py +224 -0
  1386. vllm/v1/worker/utils.py +344 -0
  1387. vllm/v1/worker/worker_base.py +65 -0
  1388. vllm/v1/worker/xpu_model_runner.py +57 -0
  1389. vllm/v1/worker/xpu_worker.py +179 -0
  1390. vllm/version.py +41 -0
  1391. vllm/vllm_flash_attn/.gitkeep +0 -0
  1392. vllm/worker/__init__.py +0 -0
  1393. vllm/worker/worker_base.py +279 -0
  1394. vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
  1395. vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
  1396. vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
  1397. vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
  1398. vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
vllm/config/model.py ADDED
@@ -0,0 +1,1912 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import hashlib
5
+ import json
6
+ import warnings
7
+ from dataclasses import InitVar, field
8
+ from importlib.util import find_spec
9
+ from typing import (TYPE_CHECKING, Any, Callable, Literal, Optional, Union,
10
+ cast, get_args)
11
+
12
+ import torch
13
+ from pydantic import (ConfigDict, SkipValidation, field_validator,
14
+ model_validator)
15
+ from pydantic.dataclasses import dataclass
16
+ from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE
17
+
18
+ import vllm.envs as envs
19
+ from vllm.config.multimodal import (MMCacheType, MMEncoderTPMode,
20
+ MultiModalConfig)
21
+ from vllm.config.pooler import PoolerConfig
22
+ from vllm.config.utils import assert_hashable, config
23
+ from vllm.logger import init_logger
24
+ from vllm.platforms import current_platform
25
+ from vllm.transformers_utils.config import (
26
+ ConfigFormat, get_config, get_hf_image_processor_config,
27
+ get_hf_text_config, get_pooling_config,
28
+ get_sentence_transformer_tokenizer_config, is_encoder_decoder,
29
+ is_interleaved, try_get_generation_config, try_get_safetensors_metadata,
30
+ try_get_tokenizer_config, uses_mrope)
31
+ from vllm.transformers_utils.runai_utils import (ObjectStorageModel,
32
+ is_runai_obj_uri)
33
+ from vllm.transformers_utils.utils import maybe_model_redirect
34
+ from vllm.utils import LayerBlockType, LazyLoader, common_broadcastable_dtype
35
+
36
+ if TYPE_CHECKING:
37
+ from transformers import PretrainedConfig
38
+
39
+ import vllm.model_executor.layers.quantization as me_quant
40
+ import vllm.model_executor.models as me_models
41
+ from vllm.config.load import LoadConfig
42
+ from vllm.config.parallel import ParallelConfig
43
+ from vllm.config.scheduler import RunnerType
44
+ from vllm.model_executor.layers.quantization import QuantizationMethods
45
+ from vllm.v1.sample.logits_processor import LogitsProcessor
46
+ else:
47
+ PretrainedConfig = Any
48
+
49
+ me_quant = LazyLoader("model_executor", globals(),
50
+ "vllm.model_executor.layers.quantization")
51
+ me_models = LazyLoader("model_executor", globals(),
52
+ "vllm.model_executor.models")
53
+ LoadConfig = Any
54
+ ParallelConfig = Any
55
+ RunnerType = Any
56
+ QuantizationMethods = Any
57
+ LogitsProcessor = Any
58
+
59
+ logger = init_logger(__name__)
60
+
61
+ RunnerOption = Literal["auto", "generate", "pooling", "draft"]
62
+ ConvertType = Literal["none", "embed", "classify", "reward"]
63
+ ConvertOption = Literal["auto", ConvertType]
64
+ TaskOption = Literal["auto", "generate", "embedding", "embed", "classify",
65
+ "score", "reward", "transcription", "draft"]
66
+ TokenizerMode = Literal["auto", "slow", "mistral", "custom"]
67
+ ModelDType = Literal["auto", "half", "float16", "bfloat16", "float", "float32"]
68
+ LogprobsMode = Literal["raw_logits", "raw_logprobs", "processed_logits",
69
+ "processed_logprobs"]
70
+ HfOverrides = Union[dict[str, Any], Callable[[PretrainedConfig],
71
+ PretrainedConfig]]
72
+ ModelImpl = Literal["auto", "vllm", "transformers", "terratorch"]
73
+
74
+ _RUNNER_TASKS: dict[RunnerType, list[TaskOption]] = {
75
+ "generate": ["generate", "transcription"],
76
+ "pooling": ["embedding", "embed", "classify", "score", "reward"],
77
+ "draft": ["draft"],
78
+ }
79
+
80
+ _RUNNER_CONVERTS: dict[RunnerType, list[ConvertType]] = {
81
+ "generate": [],
82
+ "pooling": ["embed", "classify", "reward"],
83
+ "draft": [],
84
+ }
85
+
86
+
87
+ @config
88
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
89
+ class ModelConfig:
90
+ """Configuration for the model."""
91
+
92
+ model: str = "Qwen/Qwen3-0.6B"
93
+ """Name or path of the Hugging Face model to use. It is also used as the
94
+ content for `model_name` tag in metrics output when `served_model_name` is
95
+ not specified."""
96
+ runner: RunnerOption = "auto"
97
+ """The type of model runner to use. Each vLLM instance only supports one
98
+ model runner, even if the same model can be used for multiple types."""
99
+ convert: ConvertOption = "auto"
100
+ """Convert the model using adapters defined in
101
+ [vllm.model_executor.models.adapters][]. The most common use case is to
102
+ adapt a text generation model to be used for pooling tasks."""
103
+ task: Optional[TaskOption] = None
104
+ """[DEPRECATED] The task to use the model for. If the model supports more
105
+ than one model runner, this is used to select which model runner to run.
106
+
107
+ Note that the model may support other tasks using the same model runner.
108
+ """
109
+ tokenizer: SkipValidation[str] = None # type: ignore
110
+ """Name or path of the Hugging Face tokenizer to use. If unspecified, model
111
+ name or path will be used."""
112
+ tokenizer_mode: TokenizerMode = "auto"
113
+ """Tokenizer mode:\n
114
+ - "auto" will use the fast tokenizer if available.\n
115
+ - "slow" will always use the slow tokenizer.\n
116
+ - "mistral" will always use the tokenizer from `mistral_common`.\n
117
+ - "custom" will use --tokenizer to select the preregistered tokenizer."""
118
+ trust_remote_code: bool = False
119
+ """Trust remote code (e.g., from HuggingFace) when downloading the model
120
+ and tokenizer."""
121
+ dtype: Union[ModelDType, torch.dtype] = "auto"
122
+ """Data type for model weights and activations:\n
123
+ - "auto" will use FP16 precision for FP32 and FP16 models, and BF16
124
+ precision for BF16 models.\n
125
+ - "half" for FP16. Recommended for AWQ quantization.\n
126
+ - "float16" is the same as "half".\n
127
+ - "bfloat16" for a balance between precision and range.\n
128
+ - "float" is shorthand for FP32 precision.\n
129
+ - "float32" for FP32 precision."""
130
+ seed: Optional[int] = None
131
+ """Random seed for reproducibility. Initialized to None in V0, but
132
+ initialized to 0 in V1."""
133
+ hf_config_path: Optional[str] = None
134
+ """Name or path of the Hugging Face config to use. If unspecified, model
135
+ name or path will be used."""
136
+ allowed_local_media_path: str = ""
137
+ """Allowing API requests to read local images or videos from directories
138
+ specified by the server file system. This is a security risk. Should only
139
+ be enabled in trusted environments."""
140
+ allowed_media_domains: Optional[list[str]] = None
141
+ """If set, only media URLs that belong to this domain can be used for
142
+ multi-modal inputs. """
143
+ revision: Optional[str] = None
144
+ """The specific model version to use. It can be a branch name, a tag name,
145
+ or a commit id. If unspecified, will use the default version."""
146
+ code_revision: Optional[str] = None
147
+ """The specific revision to use for the model code on the Hugging Face Hub.
148
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
149
+ use the default version."""
150
+ rope_scaling: dict[str, Any] = field(default_factory=dict)
151
+ """RoPE scaling configuration. For example,
152
+ `{"rope_type":"dynamic","factor":2.0}`."""
153
+ rope_theta: Optional[float] = None
154
+ """RoPE theta. Use with `rope_scaling`. In some cases, changing the RoPE
155
+ theta improves the performance of the scaled model."""
156
+ tokenizer_revision: Optional[str] = None
157
+ """The specific revision to use for the tokenizer on the Hugging Face Hub.
158
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
159
+ use the default version."""
160
+ max_model_len: SkipValidation[int] = None # type: ignore
161
+ """Model context length (prompt and output). If unspecified, will be
162
+ automatically derived from the model config.
163
+
164
+ When passing via `--max-model-len`, supports k/m/g/K/M/G in human-readable
165
+ format. Examples:\n
166
+ - 1k -> 1000\n
167
+ - 1K -> 1024\n
168
+ - 25.6k -> 25,600"""
169
+ spec_target_max_model_len: Optional[int] = None
170
+ """Specify the maximum length for spec decoding draft models."""
171
+ quantization: SkipValidation[Optional[QuantizationMethods]] = None
172
+ """Method used to quantize the weights. If `None`, we first check the
173
+ `quantization_config` attribute in the model config file. If that is
174
+ `None`, we assume the model weights are not quantized and use `dtype` to
175
+ determine the data type of the weights."""
176
+ enforce_eager: bool = False
177
+ """Whether to always use eager-mode PyTorch. If True, we will disable CUDA
178
+ graph and always execute the model in eager mode. If False, we will use
179
+ CUDA graph and eager execution in hybrid for maximal performance and
180
+ flexibility."""
181
+ max_logprobs: int = 20
182
+ """Maximum number of log probabilities to return when `logprobs` is
183
+ specified in `SamplingParams`. The default value comes the default for the
184
+ OpenAI Chat Completions API. -1 means no cap, i.e. all (output_length *
185
+ vocab_size) logprobs are allowed to be returned and it may cause OOM."""
186
+ logprobs_mode: LogprobsMode = "raw_logprobs"
187
+ """Indicates the content returned in the logprobs and prompt_logprobs.
188
+ Supported mode:
189
+ 1) raw_logprobs, 2) processed_logprobs, 3) raw_logits, 4) processed_logits.
190
+ Raw means the values before applying any logit processors, like bad words.
191
+ Processed means the values after applying all processors, including
192
+ temperature and top_k/top_p.
193
+ """
194
+ disable_sliding_window: bool = False
195
+ """Whether to disable sliding window. If True, we will disable the sliding
196
+ window functionality of the model, capping to sliding window size. If the
197
+ model does not support sliding window, this argument is ignored."""
198
+ disable_cascade_attn: bool = False
199
+ """Disable cascade attention for V1. While cascade attention does not
200
+ change the mathematical correctness, disabling it could be useful for
201
+ preventing potential numerical issues. Note that even if this is set to
202
+ False, cascade attention will be only used when the heuristic tells that
203
+ it's beneficial."""
204
+ skip_tokenizer_init: bool = False
205
+ """Skip initialization of tokenizer and detokenizer. Expects valid
206
+ `prompt_token_ids` and `None` for prompt from the input. The generated
207
+ output will contain token ids."""
208
+ enable_prompt_embeds: bool = False
209
+ """If `True`, enables passing text embeddings as inputs via the
210
+ `prompt_embeds` key. Note that enabling this will double the time required
211
+ for graph compilation."""
212
+ served_model_name: Optional[Union[str, list[str]]] = None
213
+ """The model name(s) used in the API. If multiple names are provided, the
214
+ server will respond to any of the provided names. The model name in the
215
+ model field of a response will be the first name in this list. If not
216
+ specified, the model name will be the same as the `--model` argument. Noted
217
+ that this name(s) will also be used in `model_name` tag content of
218
+ prometheus metrics, if multiple names provided, metrics tag will take the
219
+ first one."""
220
+ config_format: Union[str, ConfigFormat] = "auto"
221
+ """The format of the model config to load:\n
222
+ - "auto" will try to load the config in hf format if available else it
223
+ will try to load in mistral format.\n
224
+ - "hf" will load the config in hf format.\n
225
+ - "mistral" will load the config in mistral format."""
226
+ hf_token: Optional[Union[bool, str]] = None
227
+ """The token to use as HTTP bearer authorization for remote files . If
228
+ `True`, will use the token generated when running `huggingface-cli login`
229
+ (stored in `~/.huggingface`)."""
230
+ hf_overrides: HfOverrides = field(default_factory=dict)
231
+ """If a dictionary, contains arguments to be forwarded to the Hugging Face
232
+ config. If a callable, it is called to update the HuggingFace config."""
233
+ logits_processor_pattern: Optional[str] = None
234
+ """Optional regex pattern specifying valid logits processor qualified names
235
+ that can be passed with the `logits_processors` extra completion argument.
236
+ Defaults to `None`, which allows no processors."""
237
+ generation_config: str = "auto"
238
+ """The folder path to the generation config. Defaults to `"auto"`, the
239
+ generation config will be loaded from model path. If set to `"vllm"`, no
240
+ generation config is loaded, vLLM defaults will be used. If set to a folder
241
+ path, the generation config will be loaded from the specified folder path.
242
+ If `max_new_tokens` is specified in generation config, then it sets a
243
+ server-wide limit on the number of output tokens for all requests."""
244
+ override_generation_config: dict[str, Any] = field(default_factory=dict)
245
+ """Overrides or sets generation config. e.g. `{"temperature": 0.5}`. If
246
+ used with `--generation-config auto`, the override parameters will be
247
+ merged with the default config from the model. If used with
248
+ `--generation-config vllm`, only the override parameters are used."""
249
+ enable_sleep_mode: bool = False
250
+ """Enable sleep mode for the engine (only cuda platform is supported)."""
251
+ model_impl: Union[str, ModelImpl] = "auto"
252
+ """Which implementation of the model to use:\n
253
+ - "auto" will try to use the vLLM implementation, if it exists, and fall
254
+ back to the Transformers implementation if no vLLM implementation is
255
+ available.\n
256
+ - "vllm" will use the vLLM model implementation.\n
257
+ - "transformers" will use the Transformers model implementation.\n
258
+ - "terratorch" will use the TerraTorch model implementation.
259
+ """
260
+ override_attention_dtype: Optional[str] = None
261
+ """Override dtype for attention"""
262
+ logits_processors: Optional[list[Union[str, type[LogitsProcessor]]]] = None
263
+ """One or more logits processors' fully-qualified class names or class
264
+ definitions"""
265
+ io_processor_plugin: Optional[str] = None
266
+ """IOProcessor plugin name to load at model startup"""
267
+
268
+ # Pooler config
269
+ pooler_config: Optional[PoolerConfig] = None
270
+ """Pooler config which controls the behaviour of output pooling in pooling
271
+ models."""
272
+ override_pooler_config: Optional[Union[dict, PoolerConfig]] = None
273
+ """[DEPRECATED] Use `pooler_config` instead. This field will be removed in
274
+ v0.12.0 or v1.0.0, whichever is sooner."""
275
+
276
+ # Multimodal config and init vars
277
+ multimodal_config: Optional[MultiModalConfig] = None
278
+ """Configuration for multimodal model. If `None`, this will be inferred
279
+ from the architecture of `self.model`."""
280
+ limit_mm_per_prompt: InitVar[Optional[dict[str, int]]] = None
281
+ media_io_kwargs: InitVar[Optional[dict[str, dict[str, Any]]]] = None
282
+ mm_processor_kwargs: InitVar[Optional[dict[str, Any]]] = None
283
+ mm_processor_cache_gb: InitVar[Optional[float]] = None
284
+ mm_processor_cache_type: InitVar[Optional[MMCacheType]] = None
285
+ mm_shm_cache_max_object_size_mb: InitVar[Optional[int]] = None
286
+ mm_encoder_tp_mode: InitVar[Optional[MMEncoderTPMode]] = None
287
+ interleave_mm_strings: InitVar[Optional[bool]] = None
288
+ skip_mm_profiling: InitVar[Optional[bool]] = None
289
+ video_pruning_rate: InitVar[Optional[float]] = None
290
+
291
+ def compute_hash(self) -> str:
292
+ """
293
+ WARNING: Whenever a new field is added to this config,
294
+ ensure that it is included in the factors list if
295
+ it affects the computation graph.
296
+
297
+ Provide a hash that uniquely identifies all the configs
298
+ that affect the structure of the computation
299
+ graph from input ids/embeddings to the final hidden states,
300
+ excluding anything before input ids/embeddings and after
301
+ the final hidden states.
302
+ """
303
+ factors: list[Any] = []
304
+ factors.append(self.model)
305
+ factors.append(self.dtype)
306
+ factors.append(self.quantization)
307
+ factors.append(self.revision)
308
+ factors.append(self.code_revision)
309
+ factors.append(self.max_model_len)
310
+ factors.append(self.max_logprobs)
311
+ factors.append(self.disable_sliding_window)
312
+ factors.append(self.trust_remote_code)
313
+ factors.append(self.generation_config)
314
+ factors.append(self.model_impl)
315
+ factors.append(self.override_generation_config)
316
+ factors.append(self.rope_scaling)
317
+ factors.append(self.rope_theta)
318
+ factors.append(self.video_pruning_rate)
319
+
320
+ # hf_config can control how the model looks!
321
+ try:
322
+ hf_config_json = self.hf_config.to_json_string(use_diff=False)
323
+ except TypeError:
324
+ from transformers import PretrainedConfig
325
+
326
+ from vllm.utils.jsontree import json_map_leaves
327
+
328
+ # Handle nested HF configs with unserializable values gracefully
329
+ hf_config_json = json.dumps(
330
+ json_map_leaves(
331
+ lambda v: v.to_dict()
332
+ if isinstance(v, PretrainedConfig) else str(v),
333
+ self.hf_config.to_dict(),
334
+ ),
335
+ indent=2,
336
+ sort_keys=True,
337
+ ) + "\n"
338
+
339
+ factors.append(hf_config_json)
340
+
341
+ str_factors = str(factors)
342
+ assert_hashable(str_factors)
343
+ return hashlib.sha256(str(factors).encode()).hexdigest()
344
+
345
+ def __post_init__(
346
+ self,
347
+ # Multimodal config init vars
348
+ limit_mm_per_prompt: Optional[dict[str, int]],
349
+ media_io_kwargs: Optional[dict[str, dict[str, Any]]],
350
+ mm_processor_kwargs: Optional[dict[str, Any]],
351
+ mm_processor_cache_gb: Optional[float],
352
+ mm_processor_cache_type: Optional[MMCacheType],
353
+ mm_shm_cache_max_object_size_mb: Optional[int],
354
+ mm_encoder_tp_mode: Optional[MMEncoderTPMode],
355
+ interleave_mm_strings: Optional[bool],
356
+ skip_mm_profiling: Optional[bool],
357
+ video_pruning_rate: Optional[float],
358
+ ) -> None:
359
+ # Set the default seed to 0 in V1.
360
+ # NOTE(woosuk): In V0, we set the default seed to None because the
361
+ # driver worker shares the same process as the user process, and thus
362
+ # setting a seed affects the user process as well.
363
+ # In V1, we use separate processes for workers (unless
364
+ # VLLM_ENABLE_V1_MULTIPROCESSING=0), so setting a seed here
365
+ # doesn't affect the user process. However, without a consistent seed,
366
+ # different tensor parallel workers would sample different tokens,
367
+ # leading to inconsistent results.
368
+ if envs.VLLM_USE_V1 and self.seed is None:
369
+ self.seed = 0
370
+ if not envs.VLLM_ENABLE_V1_MULTIPROCESSING:
371
+ logger.warning(
372
+ "The global random seed is set to %d. Since "
373
+ "VLLM_ENABLE_V1_MULTIPROCESSING is set to False, this may "
374
+ "affect the random state of the Python process that "
375
+ "launched vLLM.", self.seed)
376
+
377
+ # Keep set served_model_name before maybe_model_redirect(self.model)
378
+ self.served_model_name = get_served_model_name(self.model,
379
+ self.served_model_name)
380
+ self.model = maybe_model_redirect(self.model)
381
+ # The tokenizer is consistent with the model by default.
382
+ if self.tokenizer is None:
383
+ self.tokenizer = self.model
384
+ if self.tokenizer_revision is None:
385
+ self.tokenizer_revision = self.revision
386
+ self.tokenizer = maybe_model_redirect(self.tokenizer)
387
+
388
+ if isinstance(self.hf_config_path, str):
389
+ self.hf_config_path = maybe_model_redirect(self.hf_config_path)
390
+
391
+ if callable(self.hf_overrides):
392
+ hf_overrides_kw = {}
393
+ hf_overrides_fn = self.hf_overrides
394
+ else:
395
+ hf_overrides_kw = self.hf_overrides
396
+ hf_overrides_fn = None
397
+
398
+ if self.rope_scaling:
399
+ hf_override: dict[str, Any] = {"rope_scaling": self.rope_scaling}
400
+ hf_overrides_kw.update(hf_override)
401
+ hf_overrides_str = json.dumps(hf_overrides_kw)
402
+ msg = (
403
+ "`--rope-scaling` will be removed in a future release. "
404
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
405
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
406
+ if self.rope_theta is not None:
407
+ hf_override = {"rope_theta": self.rope_theta}
408
+ hf_overrides_kw.update(hf_override)
409
+ hf_overrides_str = json.dumps(hf_overrides_kw)
410
+ msg = (
411
+ "`--rope-theta` will be removed in a future release. "
412
+ f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
413
+ warnings.warn(DeprecationWarning(msg), stacklevel=2)
414
+
415
+ self.maybe_pull_model_tokenizer_for_runai(self.model, self.tokenizer)
416
+
417
+ if (backend := envs.VLLM_ATTENTION_BACKEND
418
+ ) and backend == "FLASHINFER" and find_spec("flashinfer") is None:
419
+ raise ValueError(
420
+ "VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
421
+ "module was not found. See "
422
+ "https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile " # noqa: E501
423
+ "for instructions on how to install it.")
424
+
425
+ from vllm.platforms import current_platform
426
+
427
+ if (self.override_attention_dtype is not None
428
+ and not current_platform.is_rocm()):
429
+ warnings.warn(
430
+ "override-attention-dtype is set but not using ROCm platform",
431
+ stacklevel=2)
432
+
433
+ if (self.enable_sleep_mode
434
+ and not current_platform.is_sleep_mode_available()):
435
+ raise ValueError(
436
+ "Sleep mode is not supported on current platform.")
437
+
438
+ hf_config = get_config(self.hf_config_path or self.model,
439
+ self.trust_remote_code,
440
+ self.revision,
441
+ self.code_revision,
442
+ self.config_format,
443
+ hf_overrides_kw=hf_overrides_kw,
444
+ hf_overrides_fn=hf_overrides_fn)
445
+
446
+ self.hf_config = hf_config
447
+ self.hf_text_config = get_hf_text_config(self.hf_config)
448
+ self.attention_chunk_size = getattr(self.hf_text_config,
449
+ "attention_chunk_size", None)
450
+ self.encoder_config = self._get_encoder_config()
451
+ self.hf_image_processor_config = get_hf_image_processor_config(
452
+ self.model, hf_token=self.hf_token, revision=self.revision)
453
+
454
+ architectures = self.architectures
455
+ registry = self.registry
456
+ is_generative_model = registry.is_text_generation_model(
457
+ architectures, self)
458
+ is_pooling_model = registry.is_pooling_model(architectures, self)
459
+
460
+ def _task_to_convert(task: TaskOption) -> ConvertType:
461
+ if task == "embedding" or task == "embed":
462
+ return "embed"
463
+ if task == "classify":
464
+ return "classify"
465
+ if task == "reward":
466
+ return "reward"
467
+ if task == "score":
468
+ new_task = self._get_default_pooling_task(architectures)
469
+ return "classify" if new_task == "classify" else "embed"
470
+
471
+ return "none"
472
+
473
+ if self.task is not None:
474
+ runner: RunnerOption = "auto"
475
+ convert: ConvertOption = "auto"
476
+ msg_prefix = ("The 'task' option has been deprecated and will be "
477
+ "removed in v0.13.0 or v1.0, whichever comes first.")
478
+ msg_hint = "Please remove this option."
479
+
480
+ is_generative_task = self.task in _RUNNER_TASKS["generate"]
481
+ is_pooling_task = self.task in _RUNNER_TASKS["pooling"]
482
+
483
+ if is_generative_model and is_pooling_model:
484
+ if is_generative_task:
485
+ runner = "generate"
486
+ convert = "auto"
487
+ msg_hint = ("Please replace this option with `--runner "
488
+ "generate` to continue using this model "
489
+ "as a generative model.")
490
+ elif is_pooling_task:
491
+ runner = "pooling"
492
+ convert = "auto"
493
+ msg_hint = ("Please replace this option with `--runner "
494
+ "pooling` to continue using this model "
495
+ "as a pooling model.")
496
+ else: # task == "auto"
497
+ pass
498
+ elif is_generative_model or is_pooling_model:
499
+ if is_generative_task:
500
+ runner = "generate"
501
+ convert = "auto"
502
+ msg_hint = "Please remove this option"
503
+ elif is_pooling_task:
504
+ runner = "pooling"
505
+ convert = _task_to_convert(self.task)
506
+ msg_hint = ("Please replace this option with `--convert "
507
+ f"{convert}` to continue using this model "
508
+ "as a pooling model.")
509
+ else: # task == "auto"
510
+ pass
511
+ else:
512
+ raise AssertionError("The model should be a generative or "
513
+ "pooling model when task is set to "
514
+ f"{self.task!r}.")
515
+
516
+ self.runner = runner
517
+ self.convert = convert
518
+
519
+ msg = f"{msg_prefix} {msg_hint}"
520
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
521
+
522
+ self.runner_type = self._get_runner_type(architectures, self.runner)
523
+ self.convert_type = self._get_convert_type(architectures,
524
+ self.runner_type,
525
+ self.convert)
526
+
527
+ if self.runner_type == "generate" and not is_generative_model:
528
+ generate_converts = _RUNNER_CONVERTS["generate"]
529
+ if self.convert_type not in generate_converts:
530
+ # Currently we don't have any converters for generative models
531
+ raise ValueError(
532
+ "This model does not support `--runner generate`.")
533
+ if self.runner_type == "pooling" and not is_pooling_model:
534
+ pooling_converts = _RUNNER_CONVERTS["pooling"]
535
+ if self.convert_type not in pooling_converts:
536
+ convert_option = "<" + "|".join(pooling_converts) + ">"
537
+ raise ValueError(
538
+ "This model does not support `--runner pooling`. "
539
+ f"You can pass `--convert {convert_option} to adapt "
540
+ "it into a pooling model.")
541
+
542
+ # Note: Initialize these attributes early because transformers fallback
543
+ # may fail to load dynamic modules in child processes
544
+ model_info, arch = registry.inspect_model_cls(architectures, self)
545
+ self._model_info = model_info
546
+ self._architecture = arch
547
+ logger.info("Resolved architecture: %s", arch)
548
+
549
+ # Init pooler config if needed
550
+ if self.runner_type == "pooling":
551
+ if self.override_pooler_config is not None:
552
+ logger.warning_once(
553
+ "`override_pooler_config` is deprecated and will be "
554
+ "removed in v0.12.0 or v1.0.0, whichever is sooner. "
555
+ "Please use `pooler_config` instead.")
556
+
557
+ if isinstance(self.override_pooler_config, dict):
558
+ self.pooler_config = PoolerConfig(
559
+ **self.override_pooler_config)
560
+ else:
561
+ self.pooler_config = self.override_pooler_config
562
+
563
+ if self.pooler_config is None:
564
+ self.pooler_config = PoolerConfig()
565
+
566
+ base_config = get_pooling_config(self.model, self.revision)
567
+ if base_config is not None:
568
+ # Only set values that are not overridden by the user
569
+ for k, v in base_config.items():
570
+ if getattr(self.pooler_config, k) is None:
571
+ setattr(self.pooler_config, k, v)
572
+
573
+ default_pooling_type = self._model_info.default_pooling_type
574
+ if self.pooler_config.pooling_type is None:
575
+ self.pooler_config.pooling_type = default_pooling_type
576
+
577
+ self.dtype: torch.dtype = _get_and_verify_dtype(
578
+ self.model,
579
+ self.hf_config,
580
+ self.dtype,
581
+ is_pooling_model=self.runner_type == "pooling",
582
+ revision=self.revision,
583
+ )
584
+
585
+ # Interleaved attention is not supported by some backends in V0
586
+ if (not self.disable_sliding_window
587
+ and is_interleaved(self.hf_text_config)
588
+ and not envs.VLLM_USE_V1
589
+ and (backend := envs.VLLM_ATTENTION_BACKEND)
590
+ in ("XFORMERS", "FLASHINFER")):
591
+ logger.warning_once(
592
+ "%s has interleaved attention, which is currently not "
593
+ "supported by the %s backend. Disabling sliding window and "
594
+ "capping the max length to the sliding window size (%d).",
595
+ self.hf_text_config.model_type,
596
+ backend,
597
+ self.hf_text_config.sliding_window,
598
+ )
599
+ self.disable_sliding_window = True
600
+
601
+ self.original_max_model_len = self.max_model_len
602
+ self.max_model_len = self.get_and_verify_max_len(self.max_model_len)
603
+ # Init multimodal config if needed
604
+ if self._model_info.supports_multimodal:
605
+ if (mm_encoder_tp_mode == "data" and
606
+ not self._model_info.supports_multimodal_encoder_tp_data):
607
+ logger.warning_once(
608
+ "This model does not support `--mm-encoder-tp-mode data`. "
609
+ "Falling back to `--mm-encoder-tp-mode weights`.")
610
+ mm_encoder_tp_mode = "weights"
611
+
612
+ mm_config_kwargs = dict(
613
+ limit_per_prompt=limit_mm_per_prompt,
614
+ media_io_kwargs=media_io_kwargs,
615
+ mm_processor_kwargs=mm_processor_kwargs,
616
+ mm_processor_cache_gb=mm_processor_cache_gb,
617
+ mm_processor_cache_type=mm_processor_cache_type,
618
+ mm_shm_cache_max_object_size_mb=mm_shm_cache_max_object_size_mb,
619
+ mm_encoder_tp_mode=mm_encoder_tp_mode,
620
+ interleave_mm_strings=interleave_mm_strings,
621
+ skip_mm_profiling=skip_mm_profiling,
622
+ video_pruning_rate=video_pruning_rate,
623
+ )
624
+
625
+ mm_config_kwargs = {
626
+ k: v
627
+ for k, v in mm_config_kwargs.items() if v is not None
628
+ }
629
+
630
+ self.multimodal_config = MultiModalConfig(**mm_config_kwargs)
631
+
632
+ if self.disable_sliding_window:
633
+ # Set after get_and_verify_max_len to ensure that max_model_len
634
+ # can be correctly capped to sliding window size
635
+ self.hf_text_config.sliding_window = None
636
+
637
+ if not self.skip_tokenizer_init:
638
+ self._verify_tokenizer_mode()
639
+
640
+ # Avoid running try_verify_and_update_config multiple times
641
+ self.config_updated = False
642
+
643
+ self._verify_quantization()
644
+ self._verify_cuda_graph()
645
+ self._verify_bnb_config()
646
+
647
+ @field_validator("quantization", mode="before")
648
+ @classmethod
649
+ def validate_quantization_before(cls, value: Any) -> Any:
650
+ if isinstance(value, str):
651
+ return value.lower()
652
+ return value
653
+
654
+ @model_validator(mode="after")
655
+ def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
656
+ if not isinstance(self.tokenizer, str):
657
+ raise ValueError("tokenizer must be a string after __post_init__.")
658
+ if not isinstance(self.max_model_len, int):
659
+ raise ValueError(
660
+ "max_model_len must be an integer after __post_init__.")
661
+ return self
662
+
663
+ def _get_transformers_backend_cls(self) -> str:
664
+ """Determine which Transformers backend class will be used if
665
+ `model_impl` is set to `transformers` or `auto`."""
666
+ if getattr(self, "runner_type", self.runner) == "pooling":
667
+ return "TransformersModel"
668
+ if self.hf_config != self.hf_text_config:
669
+ # If 'hf_text_config' is the same as 'hf_config'. If not, it is
670
+ # probably a composite config, i.e. multimodal
671
+ return "TransformersForMultimodalLM"
672
+ return "TransformersForCausalLM"
673
+
674
+ def using_transformers_backend(self) -> bool:
675
+ """Check if the model is using the Transformers backend class."""
676
+ return self.architecture == self._get_transformers_backend_cls()
677
+
678
+ @property
679
+ def registry(self):
680
+ return me_models.ModelRegistry
681
+
682
+ @property
683
+ def architectures(self) -> list[str]:
684
+ return getattr(self.hf_config, "architectures", [])
685
+
686
+ @property
687
+ def architecture(self) -> str:
688
+ """The architecture vllm actually used."""
689
+ return self._architecture
690
+
691
+ def maybe_pull_model_tokenizer_for_runai(self, model: str,
692
+ tokenizer: str) -> None:
693
+ """Pull model/tokenizer from Object Storage to temporary
694
+ directory when needed.
695
+
696
+ Args:
697
+ model: Model name or path
698
+ tokenizer: Tokenizer name or path
699
+ """
700
+
701
+ if not (is_runai_obj_uri(model) or is_runai_obj_uri(tokenizer)):
702
+ return
703
+
704
+ if is_runai_obj_uri(model):
705
+ object_storage_model = ObjectStorageModel(url=model)
706
+ object_storage_model.pull_files(
707
+ model, allow_pattern=["*.model", "*.py", "*.json"])
708
+ self.model_weights = model
709
+ self.model = object_storage_model.dir
710
+
711
+ # If tokenizer is same as model, download to same directory
712
+ if model == tokenizer:
713
+ object_storage_model.pull_files(model,
714
+ ignore_pattern=[
715
+ "*.pt", "*.safetensors",
716
+ "*.bin", "*.tensors",
717
+ "*.pth"
718
+ ])
719
+ self.tokenizer = object_storage_model.dir
720
+ return
721
+
722
+ # Only download tokenizer if needed and not already handled
723
+ if is_runai_obj_uri(tokenizer):
724
+ object_storage_tokenizer = ObjectStorageModel(url=tokenizer)
725
+ object_storage_tokenizer.pull_files(model,
726
+ ignore_pattern=[
727
+ "*.pt", "*.safetensors",
728
+ "*.bin", "*.tensors",
729
+ "*.pth"
730
+ ])
731
+ self.tokenizer = object_storage_tokenizer.dir
732
+
733
+ def _get_encoder_config(self):
734
+ return get_sentence_transformer_tokenizer_config(
735
+ self.model, self.revision)
736
+
737
+ def _verify_tokenizer_mode(self) -> None:
738
+ tokenizer_mode = cast(TokenizerMode, self.tokenizer_mode.lower())
739
+ if tokenizer_mode not in get_args(TokenizerMode):
740
+ raise ValueError(
741
+ f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
742
+ f"one of {get_args(TokenizerMode)}.")
743
+ self.tokenizer_mode = tokenizer_mode
744
+
745
+ def _get_default_runner_type(
746
+ self,
747
+ architectures: list[str],
748
+ ) -> RunnerType:
749
+ registry = self.registry
750
+
751
+ # Some Sentence Transformers models use *ForCausalLM archs
752
+ if get_pooling_config(self.model, self.revision):
753
+ return "pooling"
754
+
755
+ for arch in architectures:
756
+ if arch in registry.get_supported_archs():
757
+ if registry.is_pooling_model(architectures, self):
758
+ return "pooling"
759
+ if registry.is_text_generation_model(architectures, self):
760
+ return "generate"
761
+
762
+ match = try_match_architecture_defaults(arch)
763
+ if match:
764
+ _, (runner_type, _) = match
765
+ return runner_type
766
+
767
+ return "generate"
768
+
769
+ def _get_runner_type(
770
+ self,
771
+ architectures: list[str],
772
+ runner: RunnerOption,
773
+ ) -> RunnerType:
774
+ if runner != "auto":
775
+ return runner
776
+
777
+ runner_type = self._get_default_runner_type(architectures)
778
+
779
+ # Don't log the most common case
780
+ if runner_type != "generate":
781
+ logger.info(
782
+ "Resolved `--runner auto` to `--runner %s`. "
783
+ "Pass the value explicitly to silence this message.",
784
+ runner_type)
785
+
786
+ return runner_type
787
+
788
+ def _get_default_convert_type(
789
+ self,
790
+ architectures: list[str],
791
+ runner_type: RunnerType,
792
+ ) -> ConvertType:
793
+ registry = self.registry
794
+
795
+ for arch in architectures:
796
+ if arch in registry.get_supported_archs():
797
+ if (runner_type == "generate"
798
+ and registry.is_text_generation_model(
799
+ architectures, self)):
800
+ return "none"
801
+ if (runner_type == "pooling"
802
+ and registry.is_pooling_model(architectures, self)):
803
+ return "none"
804
+
805
+ match = try_match_architecture_defaults(arch,
806
+ runner_type=runner_type)
807
+ if match:
808
+ _, (_, convert_type) = match
809
+ return convert_type
810
+
811
+ # This is to handle Sentence Transformers models that use *ForCausalLM
812
+ # and also multi-modal pooling models which are not defined as
813
+ # Sentence Transformers models
814
+ if runner_type == "pooling":
815
+ return "embed"
816
+
817
+ return "none"
818
+
819
+ def _get_convert_type(
820
+ self,
821
+ architectures: list[str],
822
+ runner_type: RunnerType,
823
+ convert: ConvertOption,
824
+ ) -> ConvertType:
825
+ if convert != "auto":
826
+ return convert
827
+
828
+ convert_type = self._get_default_convert_type(architectures,
829
+ runner_type)
830
+
831
+ # Don't log the most common case
832
+ if convert_type != "none":
833
+ logger.info(
834
+ "Resolved `--convert auto` to `--convert %s`. "
835
+ "Pass the value explicitly to silence this message.",
836
+ convert_type)
837
+
838
+ return convert_type
839
+
840
+ def _get_default_pooling_task(
841
+ self,
842
+ architectures: list[str],
843
+ ) -> Literal["embed", "classify", "reward"]:
844
+ if self.registry.is_cross_encoder_model(architectures, self):
845
+ return "classify"
846
+
847
+ for arch in architectures:
848
+ match = try_match_architecture_defaults(arch,
849
+ runner_type="pooling")
850
+ if match:
851
+ _, (_, convert_type) = match
852
+ assert convert_type != "none"
853
+ return convert_type
854
+
855
+ return "embed"
856
+
857
+ def _parse_quant_hf_config(self, hf_config: PretrainedConfig):
858
+ quant_cfg = getattr(hf_config, "quantization_config", None)
859
+ if quant_cfg is None:
860
+ # compressed-tensors uses a "compression_config" key
861
+ quant_cfg = getattr(hf_config, "compression_config", None)
862
+
863
+ else:
864
+ # Set quant_method for ModelOpt models.
865
+ producer_name = quant_cfg.get("producer", {}).get("name")
866
+ if producer_name == "modelopt":
867
+ quant_algo = quant_cfg.get("quantization",
868
+ {}).get("quant_algo")
869
+ if quant_algo == "FP8":
870
+ quant_cfg["quant_method"] = "modelopt"
871
+ elif quant_algo == "NVFP4":
872
+ quant_cfg["quant_method"] = "modelopt_fp4"
873
+ elif quant_algo is not None:
874
+ raise ValueError(
875
+ f"Unknown ModelOpt quant algo: {quant_algo}")
876
+
877
+ return quant_cfg
878
+
879
+ def _verify_quantization(self) -> None:
880
+ supported_quantization = me_quant.QUANTIZATION_METHODS
881
+ if self.quantization is not None:
882
+ self.quantization = cast(me_quant.QuantizationMethods,
883
+ self.quantization)
884
+
885
+ # Parse quantization method from the HF model config, if available.
886
+ quant_cfg = self._parse_quant_hf_config(self.hf_config)
887
+ if quant_cfg is None and (text_config := getattr(
888
+ self.hf_config, "text_config", None)):
889
+ # Check the text config as well for multi-modal models.
890
+ quant_cfg = self._parse_quant_hf_config(text_config)
891
+
892
+ if quant_cfg is not None:
893
+ # Use the community standard 'quant_method'
894
+ quant_method = quant_cfg.get("quant_method", "").lower()
895
+
896
+ # Normalize library names
897
+ quant_method = quant_method.replace("compressed_tensors",
898
+ "compressed-tensors")
899
+
900
+ quant_cfg["quant_method"] = quant_method
901
+
902
+ # Quantization methods which are overrides (i.e. they have a
903
+ # `override_quantization_method` method) must be checked in order
904
+ # of preference (this is particularly important for GPTQ).
905
+ overrides = [
906
+ "bitblas",
907
+ "gptq_marlin_24",
908
+ "gptq_marlin",
909
+ "gptq_bitblas",
910
+ "awq_marlin",
911
+ "ipex",
912
+ "moe_wna16",
913
+ "modelopt",
914
+ "modelopt_fp4",
915
+ "petit_nvfp4",
916
+ # Ensure heavy backends are probed last to avoid unnecessary
917
+ # imports during override detection (e.g., MXFP4 imports Triton)
918
+ "mxfp4",
919
+ ]
920
+ quantization_methods = [
921
+ q for q in supported_quantization if q not in overrides
922
+ ]
923
+ # Any custom overrides will be in quantization_methods so we place
924
+ # them at the start of the list so custom overrides have preference
925
+ # over the built-in ones.
926
+ quantization_methods = quantization_methods + overrides
927
+
928
+ # Detect which checkpoint is it
929
+ for name in quantization_methods:
930
+ method = me_quant.get_quantization_config(name)
931
+ quantization_override = method.override_quantization_method(
932
+ quant_cfg, self.quantization)
933
+ if quantization_override is not None:
934
+ # Raise error if the override is not custom (custom would
935
+ # be in QUANTIZATION_METHODS but not QuantizationMethods)
936
+ # and hasn't been added to the overrides list.
937
+ if (name in get_args(me_quant.QuantizationMethods)
938
+ and name not in overrides):
939
+ raise ValueError(
940
+ f"Quantization method {name} is an override but "
941
+ "is has not been added to the `overrides` list "
942
+ "above. This is necessary to ensure that the "
943
+ "overrides are checked in order of preference.")
944
+ quant_method = quantization_override
945
+ self.quantization = quantization_override
946
+ break
947
+
948
+ quant_method = quant_method if quant_method != "" else None
949
+ # Verify quantization configurations.
950
+ if self.quantization is None:
951
+ self.quantization = quant_method
952
+ elif self.quantization != quant_method:
953
+ raise ValueError(
954
+ "Quantization method specified in the model config "
955
+ f"({quant_method}) does not match the quantization "
956
+ f"method specified in the `quantization` argument "
957
+ f"({self.quantization}).")
958
+
959
+ if self.quantization is not None:
960
+ if self.quantization not in supported_quantization:
961
+ raise ValueError(
962
+ f"Unknown quantization method: {self.quantization}. Must "
963
+ f"be one of {supported_quantization}.")
964
+ from vllm.platforms import current_platform
965
+ current_platform.verify_quantization(self.quantization)
966
+
967
+ def _verify_cuda_graph(self) -> None:
968
+ # CUDAGraph capture not supported for encoder-decoder models on ROCm
969
+ unsupported_rocm = self.is_encoder_decoder
970
+ if (unsupported_rocm and not self.enforce_eager
971
+ and current_platform.is_rocm()):
972
+ logger.warning(
973
+ "CUDA graph is not supported for %s on ROCm yet, fallback "
974
+ "to eager mode.", self.hf_config.model_type)
975
+ self.enforce_eager = True
976
+
977
+ def _verify_bnb_config(self) -> None:
978
+ """
979
+ The current version of bitsandbytes (0.46.1) with 8-bit models does not
980
+ yet support CUDA graph.
981
+ # TODO Remove this when bitsandbytes supports.
982
+ """
983
+ is_bitsandbytes = self.quantization == "bitsandbytes"
984
+ has_quantization_config = (getattr(self.hf_config,
985
+ "quantization_config", None)
986
+ is not None)
987
+ is_8bit = (self.hf_config.quantization_config.get(
988
+ "load_in_8bit", False) if has_quantization_config else False)
989
+ if all([
990
+ is_bitsandbytes,
991
+ has_quantization_config,
992
+ is_8bit,
993
+ not self.enforce_eager,
994
+ ]):
995
+ logger.warning(
996
+ "CUDA graph is not supported on BitsAndBytes 8bit yet, "
997
+ "fallback to the eager mode.")
998
+
999
+ self.enforce_eager = True
1000
+
1001
+ def _verify_with_expert_parallelism(self) -> None:
1002
+ num_expert_names = [
1003
+ "moe_num_experts", # Dbrx
1004
+ "num_experts", # Jamba
1005
+ "n_routed_experts", # DeepSeek
1006
+ "num_local_experts", # Mixtral
1007
+ ]
1008
+ num_experts = 0
1009
+ for name in num_expert_names:
1010
+ num_experts = getattr(self.hf_text_config, name, 0)
1011
+ if num_experts > 0:
1012
+ break
1013
+ if num_experts < 1:
1014
+ raise ValueError(
1015
+ "Number of experts in the model must be greater than 0 "
1016
+ "when expert parallelism is enabled.")
1017
+
1018
+ def verify_dual_chunk_attention_config(
1019
+ self,
1020
+ load_config: LoadConfig,
1021
+ ) -> None:
1022
+ if hasattr(self.hf_config, "dual_chunk_attention_config"):
1023
+ # Try loading the sparse attention config
1024
+ from vllm.model_executor.model_loader.weight_utils import (
1025
+ get_sparse_attention_config)
1026
+ sparse_attn_config = get_sparse_attention_config(self, load_config)
1027
+ if sparse_attn_config:
1028
+ self.hf_config.dual_chunk_attention_config[
1029
+ "sparse_attention_config"] = sparse_attn_config
1030
+ if "sparse_attention_enabled" not in \
1031
+ self.hf_config.dual_chunk_attention_config:
1032
+ self.hf_config.dual_chunk_attention_config[
1033
+ "sparse_attention_enabled"] = True
1034
+
1035
+ def verify_with_parallel_config(
1036
+ self,
1037
+ parallel_config: ParallelConfig,
1038
+ ) -> None:
1039
+
1040
+ if parallel_config.distributed_executor_backend == "external_launcher":
1041
+ assert self.seed is not None, (
1042
+ "Seed must be set when using external launcher backend to "
1043
+ "make sure sampling results are the same across workers.")
1044
+
1045
+ total_num_attention_heads = getattr(self.hf_text_config,
1046
+ "num_attention_heads", 0)
1047
+ tensor_parallel_size = parallel_config.tensor_parallel_size
1048
+ if total_num_attention_heads % tensor_parallel_size != 0:
1049
+ raise ValueError(
1050
+ f"Total number of attention heads ({total_num_attention_heads})"
1051
+ " must be divisible by tensor parallel size "
1052
+ f"({tensor_parallel_size}).")
1053
+
1054
+ if parallel_config.enable_expert_parallel:
1055
+ self._verify_with_expert_parallelism()
1056
+
1057
+ pipeline_parallel_size = parallel_config.pipeline_parallel_size
1058
+ if (pipeline_parallel_size > 1
1059
+ and not self.registry.is_pp_supported_model(
1060
+ self.architectures, self)):
1061
+ raise NotImplementedError(
1062
+ "Pipeline parallelism is not supported for this model. "
1063
+ "Supported models implement the `SupportsPP` interface.")
1064
+
1065
+ def get_sliding_window(self) -> Optional[int]:
1066
+ """Get the sliding window size from the HF text config if present."""
1067
+ return getattr(self.hf_text_config, "sliding_window", None)
1068
+
1069
+ def get_vocab_size(self) -> int:
1070
+ return getattr(self.hf_text_config, "vocab_size", 0)
1071
+
1072
+ def get_hidden_size(self) -> int:
1073
+ return getattr(self.hf_text_config, "hidden_size", 0)
1074
+
1075
+ @property
1076
+ def is_deepseek_mla(self) -> bool:
1077
+ if not hasattr(self.hf_text_config, "model_type"):
1078
+ return False
1079
+ elif self.hf_text_config.model_type in \
1080
+ ('deepseek_v2', 'deepseek_v3', 'deepseek_v32', 'deepseek_mtp',
1081
+ 'kimi_k2', 'longcat_flash'):
1082
+ return self.hf_text_config.kv_lora_rank is not None
1083
+ elif self.hf_text_config.model_type == 'eagle':
1084
+ # if the model is an EAGLE module, check for the
1085
+ # underlying architecture
1086
+ return self.hf_text_config.model.model_type in \
1087
+ ('deepseek_v2', 'deepseek_v3', 'deepseek_v32') \
1088
+ and self.hf_text_config.kv_lora_rank is not None
1089
+ return False
1090
+
1091
+ def get_head_size(self) -> int:
1092
+ # TODO remove hard code
1093
+ if self.is_deepseek_mla:
1094
+ qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim",
1095
+ 0)
1096
+ if self.use_mla:
1097
+ return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
1098
+ else:
1099
+ qk_nope_head_dim = getattr(self.hf_text_config,
1100
+ "qk_nope_head_dim", 0)
1101
+ if qk_rope_head_dim and qk_nope_head_dim:
1102
+ return qk_rope_head_dim + qk_nope_head_dim
1103
+
1104
+ if hasattr(self.hf_text_config,
1105
+ "model_type") and (self.hf_text_config.model_type
1106
+ == "zamba2"):
1107
+ return self.hf_text_config.attention_head_dim
1108
+
1109
+ if self.is_attention_free:
1110
+ return 0
1111
+
1112
+ # NOTE: Some configs may set head_dim=None in the config
1113
+ if getattr(self.hf_text_config, "head_dim", None) is not None:
1114
+ return self.hf_text_config.head_dim
1115
+
1116
+ # NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head`
1117
+ if getattr(self.hf_text_config, "hidden_size_per_head",
1118
+ None) is not None:
1119
+ return self.hf_text_config.hidden_size_per_head
1120
+
1121
+ # FIXME(woosuk): This may not be true for all models.
1122
+ return (self.hf_text_config.hidden_size //
1123
+ self.hf_text_config.num_attention_heads)
1124
+
1125
+ def get_total_num_kv_heads(self) -> int:
1126
+ """Returns the total number of KV heads."""
1127
+ # For GPTBigCode & Falcon:
1128
+ # NOTE: for falcon, when new_decoder_architecture is True, the
1129
+ # multi_query flag is ignored and we use n_head_kv for the number of
1130
+ # KV heads.
1131
+ falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
1132
+ new_decoder_arch_falcon = (
1133
+ self.hf_config.model_type in falcon_model_types
1134
+ and getattr(self.hf_config, "new_decoder_architecture", False))
1135
+ if not new_decoder_arch_falcon and getattr(self.hf_text_config,
1136
+ "multi_query", False):
1137
+ # Multi-query attention, only one KV head.
1138
+ # Currently, tensor parallelism is not supported in this case.
1139
+ return 1
1140
+
1141
+ # For DBRX and MPT
1142
+ if self.hf_config.model_type == "mpt":
1143
+ if "kv_n_heads" in self.hf_config.attn_config:
1144
+ return self.hf_config.attn_config["kv_n_heads"]
1145
+ return self.hf_config.num_attention_heads
1146
+ if self.hf_config.model_type == "dbrx":
1147
+ return getattr(self.hf_config.attn_config, "kv_n_heads",
1148
+ self.hf_config.num_attention_heads)
1149
+
1150
+ if self.hf_config.model_type == "nemotron-nas":
1151
+ for block in self.hf_config.block_configs:
1152
+ if not block.attention.no_op:
1153
+ return self.hf_config.num_attention_heads \
1154
+ // block.attention.n_heads_in_group
1155
+
1156
+ raise RuntimeError("Couldn't determine number of kv heads")
1157
+
1158
+ if self.is_attention_free:
1159
+ return 0
1160
+
1161
+ attributes = [
1162
+ # For Falcon:
1163
+ "n_head_kv",
1164
+ "num_kv_heads",
1165
+ # For LLaMA-2:
1166
+ "num_key_value_heads",
1167
+ # For ChatGLM:
1168
+ "multi_query_group_num",
1169
+ ]
1170
+ for attr in attributes:
1171
+ num_kv_heads = getattr(self.hf_text_config, attr, None)
1172
+ if num_kv_heads is not None:
1173
+ return num_kv_heads
1174
+
1175
+ # For non-grouped-query attention models, the number of KV heads is
1176
+ # equal to the number of attention heads.
1177
+ return self.hf_text_config.num_attention_heads
1178
+
1179
+ def get_num_kv_heads(self, parallel_config: ParallelConfig) -> int:
1180
+ """Returns the number of KV heads per GPU."""
1181
+ if self.use_mla:
1182
+ # When using MLA during decode it becomes MQA
1183
+ return 1
1184
+
1185
+ total_num_kv_heads = self.get_total_num_kv_heads()
1186
+ # If tensor parallelism is used, we divide the number of KV heads by
1187
+ # the tensor parallel size. We will replicate the KV heads in the
1188
+ # case where the number of KV heads is smaller than the tensor
1189
+ # parallel size so each GPU has at least one KV head.
1190
+ return max(1,
1191
+ total_num_kv_heads // parallel_config.tensor_parallel_size)
1192
+
1193
+ def get_num_attention_heads(self, parallel_config: ParallelConfig) -> int:
1194
+ num_heads = getattr(self.hf_text_config, "num_attention_heads", 0)
1195
+ return num_heads // parallel_config.tensor_parallel_size
1196
+
1197
+ def get_layers_start_end_indices(
1198
+ self, parallel_config: ParallelConfig) -> tuple[int, int]:
1199
+ from vllm.distributed.utils import get_pp_indices
1200
+ if (self.hf_text_config.model_type == "deepseek_mtp"
1201
+ or self.hf_config.model_type == "mimo_mtp"
1202
+ or self.hf_config.model_type == "glm4_moe_mtp"
1203
+ or self.hf_config.model_type == "ernie_mtp"
1204
+ or self.hf_config.model_type == "qwen3_next_mtp"):
1205
+ total_num_hidden_layers = getattr(self.hf_text_config,
1206
+ "num_nextn_predict_layers", 0)
1207
+ elif (self.hf_config.model_type == "longcat_flash_mtp"):
1208
+ total_num_hidden_layers = getattr(self.hf_text_config,
1209
+ "num_nextn_predict_layers", 1)
1210
+ else:
1211
+ total_num_hidden_layers = getattr(self.hf_text_config,
1212
+ "num_hidden_layers", 0)
1213
+ # the layout order is: DP x PP x TP
1214
+ pp_rank = (parallel_config.rank // parallel_config.tensor_parallel_size
1215
+ ) % parallel_config.pipeline_parallel_size
1216
+ pp_size = parallel_config.pipeline_parallel_size
1217
+ start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
1218
+ return start, end
1219
+
1220
+ def get_num_layers(self, parallel_config: ParallelConfig) -> int:
1221
+ start, end = self.get_layers_start_end_indices(parallel_config)
1222
+ return end - start
1223
+
1224
+ def get_num_layers_by_block_type(
1225
+ self,
1226
+ parallel_config: ParallelConfig,
1227
+ block_type: LayerBlockType = LayerBlockType.attention,
1228
+ ) -> int:
1229
+ # This function relies on 'layers_block_type' in hf_config,
1230
+ # for w/o this attribute, we will need to have workarounds like so
1231
+ attn_block_type = block_type == LayerBlockType.attention
1232
+ is_transformer = not self.is_hybrid and \
1233
+ not self.has_noops and \
1234
+ not self.is_attention_free
1235
+ start, end = self.get_layers_start_end_indices(parallel_config)
1236
+
1237
+ if is_transformer:
1238
+ # Handle the basic case first
1239
+ return end - start if attn_block_type else 0
1240
+ elif self.is_attention_free:
1241
+ # Attention free
1242
+ # Note that this code assumes there
1243
+ # is only one type of attention-free block type.
1244
+ return 0 if attn_block_type else end - start
1245
+ elif self.has_noops:
1246
+ block_configs = self.hf_config.block_configs
1247
+ return sum(not bc.attention.no_op
1248
+ for bc in block_configs[start:end])
1249
+ else:
1250
+ # Hybrid model Jamba
1251
+ layers_block_type_value = getattr(self.hf_text_config,
1252
+ "layers_block_type", None)
1253
+ if layers_block_type_value is not None:
1254
+ if hasattr(self.hf_text_config,
1255
+ "model_type") and (self.hf_text_config.model_type
1256
+ == "zamba2"):
1257
+ if attn_block_type:
1258
+ return sum(t == "hybrid"
1259
+ for t in layers_block_type_value[start:end])
1260
+ else:
1261
+ return self.get_num_layers(parallel_config)
1262
+ return sum(t == block_type.value
1263
+ for t in layers_block_type_value[start:end])
1264
+
1265
+ # Hybrid model Minimax
1266
+ attn_type_list = getattr(self.hf_config, "attn_type_list", None)
1267
+ if attn_type_list:
1268
+ return sum(t == 1 for t in attn_type_list[start:end])
1269
+
1270
+ # Hybrid model Qwen3Next
1271
+ layer_types_value = getattr(self.hf_config, "layer_types", None)
1272
+ if layer_types_value is not None:
1273
+ if getattr(block_type, "value", block_type) == "attention":
1274
+ return sum(t == "full_attention"
1275
+ for t in layer_types_value[start:end])
1276
+ elif getattr(block_type, "value",
1277
+ block_type) == "linear_attention":
1278
+ return sum(t == "linear_attention"
1279
+ for t in layer_types_value[start:end])
1280
+ else:
1281
+ return sum(t == getattr(block_type, "value", block_type)
1282
+ for t in layer_types_value[start:end])
1283
+
1284
+ if (layers_block_type_value is None and attn_type_list is None
1285
+ and layer_types_value is None):
1286
+ raise ValueError(
1287
+ "The model is an hybrid without a"
1288
+ "layers_block_type or an attn_type_list, or a layer_types "
1289
+ "in the hf_config, cannot determine the num of "
1290
+ f"{block_type.value} layers")
1291
+
1292
+ def get_mamba_chunk_size(self) -> Optional[int]:
1293
+ """
1294
+ Returns the mamba chunk size if it exists
1295
+ """
1296
+ # used by e.g. Bamba, FalconH1, Granite, PLaMo2
1297
+ chunk_size = getattr(self.hf_text_config, "mamba_chunk_size", None)
1298
+ if chunk_size is None:
1299
+ # used by e.g. Mamba2, NemotronH, Zamba
1300
+ chunk_size = getattr(self.hf_text_config, "chunk_size", None)
1301
+ return chunk_size
1302
+
1303
+ def get_multimodal_config(self) -> MultiModalConfig:
1304
+ """
1305
+ Get the multimodal configuration of the model.
1306
+
1307
+ Raises:
1308
+ ValueError: If the model is not multimodal.
1309
+ """
1310
+ if self.multimodal_config is None:
1311
+ raise ValueError("The model is not multimodal.")
1312
+
1313
+ return self.multimodal_config
1314
+
1315
+ def try_get_generation_config(self) -> dict[str, Any]:
1316
+ """
1317
+ This method attempts to retrieve the non-default values of the
1318
+ generation config for this model.
1319
+
1320
+ The generation config can contain information about special tokens, as
1321
+ well as sampling parameters. Which is why this method exists separately
1322
+ to `get_diff_sampling_param`.
1323
+
1324
+ Returns:
1325
+ A dictionary containing the non-default generation config.
1326
+ """
1327
+ if self.generation_config in {"auto", "vllm"}:
1328
+ config = try_get_generation_config(
1329
+ self.hf_config_path or self.model,
1330
+ trust_remote_code=self.trust_remote_code,
1331
+ revision=self.revision,
1332
+ )
1333
+ else:
1334
+ config = try_get_generation_config(
1335
+ self.generation_config,
1336
+ trust_remote_code=self.trust_remote_code,
1337
+ )
1338
+
1339
+ if config is None:
1340
+ return {}
1341
+
1342
+ return config.to_diff_dict()
1343
+
1344
+ def get_diff_sampling_param(self) -> dict[str, Any]:
1345
+ """
1346
+ This method returns a dictionary containing the non-default sampling
1347
+ parameters with `override_generation_config` applied.
1348
+
1349
+ The default sampling parameters are:
1350
+
1351
+ - vLLM's neutral defaults if `self.generation_config="vllm"`
1352
+ - the model's defaults if `self.generation_config="auto"`
1353
+ - as defined in `generation_config.json` if
1354
+ `self.generation_config="path/to/generation_config/dir"`
1355
+
1356
+ Returns:
1357
+ A dictionary containing the non-default sampling parameters.
1358
+ """
1359
+ if self.generation_config == "vllm":
1360
+ config = {}
1361
+ else:
1362
+ config = self.try_get_generation_config()
1363
+
1364
+ # Overriding with given generation config
1365
+ config.update(self.override_generation_config)
1366
+
1367
+ available_params = [
1368
+ "repetition_penalty",
1369
+ "temperature",
1370
+ "top_k",
1371
+ "top_p",
1372
+ "min_p",
1373
+ "max_new_tokens",
1374
+ ]
1375
+ if any(p in config for p in available_params):
1376
+ diff_sampling_param = {
1377
+ p: config.get(p)
1378
+ for p in available_params if config.get(p) is not None
1379
+ }
1380
+ # Huggingface definition of max_new_tokens is equivalent
1381
+ # to vLLM's max_tokens
1382
+ if "max_new_tokens" in diff_sampling_param:
1383
+ diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
1384
+ "max_new_tokens")
1385
+ else:
1386
+ diff_sampling_param = {}
1387
+
1388
+ if diff_sampling_param:
1389
+ logger.warning_once(
1390
+ "Default sampling parameters have been overridden by the "
1391
+ "model's Hugging Face generation config recommended from the "
1392
+ "model creator. If this is not intended, please relaunch "
1393
+ "vLLM instance with `--generation-config vllm`.")
1394
+ return diff_sampling_param
1395
+
1396
+ @property
1397
+ def is_encoder_decoder(self) -> bool:
1398
+ """Extract the HF encoder/decoder model flag."""
1399
+ return is_encoder_decoder(self.hf_config)
1400
+
1401
+ @property
1402
+ def uses_mrope(self) -> bool:
1403
+ return uses_mrope(self.hf_config)
1404
+
1405
+ @property
1406
+ def is_multimodal_model(self) -> bool:
1407
+ return self.multimodal_config is not None
1408
+
1409
+ @property
1410
+ def is_multimodal_raw_input_only_model(self) -> bool:
1411
+ return self._model_info.supports_multimodal_raw_input_only
1412
+
1413
+ @property
1414
+ def is_cross_encoder(self) -> bool:
1415
+ return (self._model_info.supports_cross_encoding
1416
+ or self.convert_type == "classify")
1417
+
1418
+ @property
1419
+ def is_pp_supported(self) -> bool:
1420
+ return self._model_info.supports_pp
1421
+
1422
+ @property
1423
+ def is_attention_free(self) -> bool:
1424
+ return self._model_info.is_attention_free
1425
+
1426
+ @property
1427
+ def is_hybrid(self) -> bool:
1428
+ return self._model_info.is_hybrid
1429
+
1430
+ @property
1431
+ def has_noops(self) -> bool:
1432
+ return self._model_info.has_noops
1433
+
1434
+ @property
1435
+ def has_inner_state(self):
1436
+ return self._model_info.has_inner_state
1437
+
1438
+ @property
1439
+ def is_v1_compatible(self) -> bool:
1440
+ return not self._model_info.supports_v0_only
1441
+
1442
+ @property
1443
+ def use_mla(self) -> bool:
1444
+ return self.is_deepseek_mla and not envs.VLLM_MLA_DISABLE
1445
+
1446
+ @property
1447
+ def is_matryoshka(self) -> bool:
1448
+ return (bool(getattr(self.hf_config, "matryoshka_dimensions", None))
1449
+ or getattr(self.hf_config, "is_matryoshka", False))
1450
+
1451
+ @property
1452
+ def matryoshka_dimensions(self):
1453
+ return getattr(self.hf_config, "matryoshka_dimensions", None)
1454
+
1455
+ @property
1456
+ def use_pad_token(self) -> bool:
1457
+ # cross_encoder models defaults to using pad_token.
1458
+ # `llm as reranker` models defaults to not using pad_token.
1459
+ return getattr(self.hf_config, "use_pad_token", True)
1460
+
1461
+ @property
1462
+ def head_dtype(self) -> torch.dtype:
1463
+ """
1464
+ "head" refers to the last Linear layer(s) of an LLM,
1465
+ such as the lm_head in a generation model,
1466
+ or the score or classifier in a classification model.
1467
+
1468
+ `head_dtype` currently only supports pooling models.\n
1469
+ - The pooling model defaults to using fp32 head,
1470
+ you can use --hf-overrides '{"head_dtype": "model"}' to disable it.
1471
+ """
1472
+
1473
+ head_dtype = _get_head_dtype(config=self.hf_config,
1474
+ dtype=self.dtype,
1475
+ runner_type=self.runner_type)
1476
+
1477
+ if self.runner_type != "pooling" and head_dtype != self.dtype:
1478
+ logger.warning_once(
1479
+ "`head_dtype` currently only supports pooling models."
1480
+ "fallback to model dtype [%s].", self.dtype)
1481
+ return self.dtype
1482
+
1483
+ if head_dtype not in current_platform.supported_dtypes:
1484
+ logger.warning_once(
1485
+ "The current platform does not support [%s] head dtype, "
1486
+ "fallback to model dtype [%s].", head_dtype, self.dtype)
1487
+ return self.dtype
1488
+
1489
+ logger.debug_once("head dtype: %s", head_dtype)
1490
+ return head_dtype
1491
+
1492
+ def get_and_verify_max_len(self, max_model_len: int):
1493
+ # Consider max_model_len in tokenizer_config only when
1494
+ # pooling models use absolute position_embedding.
1495
+ tokenizer_config = None
1496
+ if (self.runner_type == "pooling" and getattr(
1497
+ self.hf_config, "position_embedding_type", "") == "absolute"):
1498
+ tokenizer_config = try_get_tokenizer_config(
1499
+ self.tokenizer,
1500
+ trust_remote_code=self.trust_remote_code,
1501
+ revision=self.tokenizer_revision)
1502
+ max_model_len = _get_and_verify_max_len(
1503
+ hf_config=self.hf_text_config,
1504
+ tokenizer_config=tokenizer_config,
1505
+ max_model_len=max_model_len,
1506
+ disable_sliding_window=self.disable_sliding_window,
1507
+ sliding_window=self.get_sliding_window(),
1508
+ spec_target_max_model_len=self.spec_target_max_model_len,
1509
+ encoder_config=self.encoder_config)
1510
+ logger.info("Using max model len %s", max_model_len)
1511
+ return max_model_len
1512
+
1513
+
1514
+ def get_served_model_name(model: str,
1515
+ served_model_name: Optional[Union[str, list[str]]]):
1516
+ """
1517
+ If the input is a non-empty list, the first model_name in
1518
+ `served_model_name` is taken.
1519
+ If the input is a non-empty string, it is used directly.
1520
+ For cases where the input is either an empty string or an
1521
+ empty list, the fallback is to use `self.model`.
1522
+ """
1523
+ if not served_model_name:
1524
+ return model
1525
+ if isinstance(served_model_name, list):
1526
+ return served_model_name[0]
1527
+ return served_model_name
1528
+
1529
+
1530
+ # Some model suffixes are based on auto classes from Transformers:
1531
+ # https://huggingface.co/docs/transformers/en/model_doc/auto
1532
+ # NOTE: Items higher on this list priority over lower ones
1533
+ _SUFFIX_TO_DEFAULTS: list[tuple[str, tuple[RunnerType, ConvertType]]] = [
1534
+ ("ForCausalLM", ("generate", "none")),
1535
+ ("ForConditionalGeneration", ("generate", "none")),
1536
+ ("ChatModel", ("generate", "none")),
1537
+ ("LMHeadModel", ("generate", "none")),
1538
+ ("ForTextEncoding", ("pooling", "embed")),
1539
+ ("EmbeddingModel", ("pooling", "embed")),
1540
+ ("ForSequenceClassification", ("pooling", "classify")),
1541
+ ("ForAudioClassification", ("pooling", "classify")),
1542
+ ("ForImageClassification", ("pooling", "classify")),
1543
+ ("ForVideoClassification", ("pooling", "classify")),
1544
+ ("ClassificationModel", ("pooling", "classify")),
1545
+ ("ForRewardModeling", ("pooling", "reward")),
1546
+ ("RewardModel", ("pooling", "reward")),
1547
+ # Let other `*Model`s take priority
1548
+ ("Model", ("pooling", "embed")),
1549
+ ]
1550
+
1551
+
1552
+ def iter_architecture_defaults():
1553
+ yield from _SUFFIX_TO_DEFAULTS
1554
+
1555
+
1556
+ def try_match_architecture_defaults(
1557
+ architecture: str,
1558
+ *,
1559
+ runner_type: Optional[RunnerType] = None,
1560
+ convert_type: Optional[ConvertType] = None,
1561
+ ) -> Optional[tuple[str, tuple[RunnerType, ConvertType]]]:
1562
+ for suffix, (default_runner_type,
1563
+ default_convert_type) in iter_architecture_defaults():
1564
+ if ((runner_type is None or runner_type == default_runner_type) and
1565
+ (convert_type is None or convert_type == default_convert_type)
1566
+ and architecture.endswith(suffix)):
1567
+ return suffix, (default_runner_type, default_convert_type)
1568
+
1569
+ return None
1570
+
1571
+
1572
+ _STR_DTYPE_TO_TORCH_DTYPE = {
1573
+ "half": torch.float16,
1574
+ "float16": torch.float16,
1575
+ "float": torch.float32,
1576
+ "float32": torch.float32,
1577
+ "bfloat16": torch.bfloat16,
1578
+ }
1579
+
1580
+ # model_type -> reason
1581
+ _FLOAT16_NOT_SUPPORTED_MODELS = {
1582
+ "gemma2": "Numerical instability. Please use bfloat16 or float32 instead.",
1583
+ "gemma3": "Numerical instability. Please use bfloat16 or float32 instead.",
1584
+ "gemma3_text":
1585
+ "Numerical instability. Please use bfloat16 or float32 instead.",
1586
+ "plamo2": "Numerical instability. Please use bfloat16 or float32 instead.",
1587
+ "glm4": "Numerical instability. Please use bfloat16 or float32 instead.",
1588
+ }
1589
+
1590
+
1591
+ def _is_valid_dtype(model_type: str, dtype: torch.dtype):
1592
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16: # noqa: E501, SIM103
1593
+ return False
1594
+
1595
+ return True
1596
+
1597
+
1598
+ def _check_valid_dtype(model_type: str, dtype: torch.dtype):
1599
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16:
1600
+ reason = _FLOAT16_NOT_SUPPORTED_MODELS[model_type]
1601
+ raise ValueError(f"The model type {model_type!r} "
1602
+ f"does not support float16. Reason: {reason}")
1603
+
1604
+ return True
1605
+
1606
+
1607
+ def _find_dtype(
1608
+ model_id: str,
1609
+ config: PretrainedConfig,
1610
+ *,
1611
+ revision: Optional[str],
1612
+ ):
1613
+ # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
1614
+ # because config.torch_dtype can be None.
1615
+ config_dtype = getattr(config, "torch_dtype", None)
1616
+
1617
+ # Fallbacks for multi-modal models if the root config
1618
+ # does not define torch_dtype
1619
+ if config_dtype is None:
1620
+ config_dtype = getattr(config.get_text_config(), "torch_dtype", None)
1621
+ if config_dtype is None and hasattr(config, "vision_config"):
1622
+ config_dtype = getattr(config.vision_config, "torch_dtype", None)
1623
+ if config_dtype is None and hasattr(config, "encoder_config"):
1624
+ config_dtype = getattr(config.encoder_config, "torch_dtype", None)
1625
+
1626
+ # Try to read the dtype of the weights if they are in safetensors format
1627
+ if config_dtype is None:
1628
+ repo_mt = try_get_safetensors_metadata(model_id, revision=revision)
1629
+
1630
+ if repo_mt and (files_mt := repo_mt.files_metadata):
1631
+ param_dtypes: set[torch.dtype] = {
1632
+ _SAFETENSORS_TO_TORCH_DTYPE[dtype_str]
1633
+ for file_mt in files_mt.values()
1634
+ for dtype_str in file_mt.parameter_count
1635
+ if dtype_str in _SAFETENSORS_TO_TORCH_DTYPE
1636
+ }
1637
+
1638
+ if param_dtypes:
1639
+ return common_broadcastable_dtype(param_dtypes)
1640
+
1641
+ if config_dtype is None:
1642
+ config_dtype = torch.float32
1643
+
1644
+ return config_dtype
1645
+
1646
+
1647
+ def _resolve_auto_dtype(
1648
+ model_type: str,
1649
+ config_dtype: torch.dtype,
1650
+ *,
1651
+ is_pooling_model: bool,
1652
+ ):
1653
+ from vllm.platforms import current_platform
1654
+
1655
+ supported_dtypes = [
1656
+ dtype for dtype in current_platform.supported_dtypes
1657
+ if _is_valid_dtype(model_type, dtype)
1658
+ ]
1659
+
1660
+ if is_pooling_model and torch.float16 in supported_dtypes:
1661
+ preferred_dtype = torch.float16
1662
+ else:
1663
+ preferred_dtype = supported_dtypes[0]
1664
+
1665
+ # Downcast for float32 models
1666
+ if config_dtype == torch.float32:
1667
+ config_dtype = preferred_dtype
1668
+
1669
+ if config_dtype in supported_dtypes:
1670
+ return config_dtype
1671
+
1672
+ # Ensure device compatibility
1673
+ device_name = current_platform.get_device_name()
1674
+ device_capability = current_platform.get_device_capability()
1675
+
1676
+ if device_capability is None:
1677
+ device_str = f"{device_name!r}"
1678
+ else:
1679
+ version_str = device_capability.as_version_str()
1680
+ device_str = f"{device_name!r} (with compute capability {version_str})"
1681
+
1682
+ logger.warning(
1683
+ "Your device %s doesn't support %s. "
1684
+ "Falling back to %s for compatibility.",
1685
+ device_str,
1686
+ config_dtype,
1687
+ preferred_dtype,
1688
+ )
1689
+
1690
+ return preferred_dtype
1691
+
1692
+
1693
+ def _get_and_verify_dtype(
1694
+ model_id: str,
1695
+ config: PretrainedConfig,
1696
+ dtype: Union[str, torch.dtype],
1697
+ *,
1698
+ is_pooling_model: bool,
1699
+ revision: Optional[str] = None,
1700
+ ) -> torch.dtype:
1701
+ config_dtype = _find_dtype(model_id, config, revision=revision)
1702
+ model_type = config.model_type
1703
+
1704
+ if isinstance(dtype, str):
1705
+ dtype = dtype.lower()
1706
+ if dtype == "auto":
1707
+ # Set default dtype from model config
1708
+ torch_dtype = _resolve_auto_dtype(
1709
+ model_type,
1710
+ config_dtype,
1711
+ is_pooling_model=is_pooling_model,
1712
+ )
1713
+ else:
1714
+ if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
1715
+ raise ValueError(f"Unknown dtype: {dtype!r}")
1716
+ torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
1717
+ elif isinstance(dtype, torch.dtype):
1718
+ torch_dtype = dtype
1719
+ else:
1720
+ raise ValueError(f"Unknown dtype: {dtype}")
1721
+
1722
+ _check_valid_dtype(model_type, torch_dtype)
1723
+
1724
+ if torch_dtype != config_dtype:
1725
+ if torch_dtype == torch.float32:
1726
+ # Upcasting to float32 is allowed.
1727
+ logger.info("Upcasting %s to %s.", config_dtype, torch_dtype)
1728
+ elif config_dtype == torch.float32:
1729
+ # Downcasting from float32 to float16 or bfloat16 is allowed.
1730
+ logger.info("Downcasting %s to %s.", config_dtype, torch_dtype)
1731
+ else:
1732
+ # Casting between float16 and bfloat16 is allowed with a warning.
1733
+ logger.warning("Casting %s to %s.", config_dtype, torch_dtype)
1734
+
1735
+ return torch_dtype
1736
+
1737
+
1738
+ def _get_head_dtype(config: PretrainedConfig, dtype: torch.dtype,
1739
+ runner_type: str) -> torch.dtype:
1740
+ head_dtype: Optional[Union[str,
1741
+ torch.dtype]] = getattr(config, "head_dtype",
1742
+ None)
1743
+
1744
+ if head_dtype == "model":
1745
+ return dtype
1746
+ elif isinstance(head_dtype, str):
1747
+ head_dtype = head_dtype.lower()
1748
+ if head_dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
1749
+ raise ValueError(f"Unknown dtype: {head_dtype!r}")
1750
+ return _STR_DTYPE_TO_TORCH_DTYPE[head_dtype]
1751
+ elif isinstance(head_dtype, torch.dtype):
1752
+ return head_dtype
1753
+ elif head_dtype is None:
1754
+ if torch.float32 not in current_platform.supported_dtypes:
1755
+ return dtype
1756
+ if runner_type == "pooling":
1757
+ return torch.float32
1758
+ return dtype
1759
+ else:
1760
+ raise ValueError(f"Unknown dtype: {head_dtype}")
1761
+
1762
+
1763
+ def _get_and_verify_max_len(
1764
+ hf_config: PretrainedConfig,
1765
+ tokenizer_config: Optional[dict],
1766
+ max_model_len: Optional[int],
1767
+ disable_sliding_window: bool,
1768
+ sliding_window: Optional[int],
1769
+ spec_target_max_model_len: Optional[int] = None,
1770
+ encoder_config: Optional[Any] = None,
1771
+ ) -> int:
1772
+ """Get and verify the model's maximum length."""
1773
+ derived_max_model_len = float("inf")
1774
+ possible_keys = [
1775
+ # OPT
1776
+ "max_position_embeddings",
1777
+ # GPT-2
1778
+ "n_positions",
1779
+ # MPT
1780
+ "max_seq_len",
1781
+ # ChatGLM2
1782
+ "seq_length",
1783
+ # Command-R
1784
+ "model_max_length",
1785
+ # Whisper
1786
+ "max_target_positions",
1787
+ # Others
1788
+ "max_sequence_length",
1789
+ "max_seq_length",
1790
+ "seq_len",
1791
+ ]
1792
+ # Choose the smallest "max_length" from the possible keys
1793
+ max_len_key = None
1794
+ for key in possible_keys:
1795
+ max_len = getattr(hf_config, key, None)
1796
+ if max_len is not None:
1797
+ max_len_key = key if max_len < derived_max_model_len \
1798
+ else max_len_key
1799
+ derived_max_model_len = min(derived_max_model_len, max_len)
1800
+ # For Command-R / Cohere, Cohere2 / Aya Vision models
1801
+ if tmp_max_len := getattr(hf_config, "model_max_length", None):
1802
+ max_len_key = "model_max_length"
1803
+ derived_max_model_len = tmp_max_len
1804
+
1805
+ # If sliding window is manually disabled, max_length should be less
1806
+ # than the sliding window length in the model config.
1807
+ if (disable_sliding_window and sliding_window is not None
1808
+ and sliding_window < derived_max_model_len):
1809
+ max_len_key = "sliding_window"
1810
+ derived_max_model_len = sliding_window
1811
+
1812
+ # Consider model_max_length in tokenizer_config
1813
+ if tokenizer_config:
1814
+ tokenizer_model_max_length = tokenizer_config.get(
1815
+ "model_max_length", derived_max_model_len)
1816
+ derived_max_model_len = min(derived_max_model_len,
1817
+ tokenizer_model_max_length)
1818
+
1819
+ # If none of the keys were found in the config, use a default and
1820
+ # log a warning.
1821
+ if derived_max_model_len == float("inf"):
1822
+ if max_model_len is not None:
1823
+ # If max_model_len is specified, we use it.
1824
+ return max_model_len
1825
+
1826
+ if spec_target_max_model_len is not None:
1827
+ # If this is a speculative draft model, we use the max model len
1828
+ # from the target model.
1829
+ return spec_target_max_model_len
1830
+
1831
+ default_max_len = 2048
1832
+ logger.warning(
1833
+ "The model's config.json does not contain any of the following "
1834
+ "keys to determine the original maximum length of the model: "
1835
+ "%s. Assuming the model's maximum length is %d.", possible_keys,
1836
+ default_max_len)
1837
+ derived_max_model_len = default_max_len
1838
+
1839
+ rope_scaling = getattr(hf_config, "rope_scaling", None)
1840
+ # NOTE(woosuk): Gemma3's max_model_len (128K) is already scaled by RoPE
1841
+ # scaling, so we skip applying the scaling factor again.
1842
+ if rope_scaling is not None and "gemma3" not in hf_config.model_type:
1843
+ # No need to consider "type" key because of patch_rope_scaling when
1844
+ # loading HF config
1845
+ rope_type = rope_scaling["rope_type"]
1846
+
1847
+ if rope_type not in ("su", "longrope", "llama3"):
1848
+ if disable_sliding_window:
1849
+ # TODO(robertgshaw): Find a model that supports rope_scaling
1850
+ # with sliding window to see if this case should be allowed.
1851
+ raise NotImplementedError(
1852
+ "Disabling sliding window is not supported for models "
1853
+ "with rope_scaling. Please raise an issue so we can "
1854
+ "investigate.")
1855
+
1856
+ # NOTE: rope_type == "default" does not define factor
1857
+ # https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/modeling_rope_utils.py
1858
+ scaling_factor = rope_scaling.get("factor", 1.0)
1859
+
1860
+ if rope_type == "yarn":
1861
+ derived_max_model_len = rope_scaling[
1862
+ "original_max_position_embeddings"]
1863
+ derived_max_model_len *= scaling_factor
1864
+
1865
+ if encoder_config and "max_seq_length" in encoder_config:
1866
+ derived_max_model_len = encoder_config["max_seq_length"]
1867
+
1868
+ # If the user specified a max length, make sure it is smaller than the
1869
+ # derived length from the HF model config.
1870
+ if max_model_len is None:
1871
+ max_model_len = int(derived_max_model_len)
1872
+ if current_platform.is_tpu():
1873
+ logger.warning(
1874
+ "--max-model-len is not specified, "
1875
+ "it's currently using model's default length %s, "
1876
+ "which might be too large."
1877
+ "Please input with --max-model-len based on your "
1878
+ "request input length and output length, to avoid "
1879
+ "unnecessary degradation.", max_model_len)
1880
+ elif max_model_len > derived_max_model_len:
1881
+ # Some models might have a separate key for specifying model_max_length
1882
+ # that will be bigger than derived_max_model_len. We compare user input
1883
+ # with model_max_length and allow this override when it's smaller.
1884
+ model_max_length = getattr(hf_config, "model_max_length", None)
1885
+ if model_max_length is not None and max_model_len <= model_max_length:
1886
+ if disable_sliding_window:
1887
+ # TODO(robertgshaw): Find a model that has model_max_length
1888
+ # with sliding window to see if this case should be allowed.
1889
+ raise NotImplementedError(
1890
+ "Disabling sliding window is not supported for models "
1891
+ "model_max_length in the config. Please raise an issue "
1892
+ "so we can investigate.")
1893
+ else:
1894
+ msg = (
1895
+ f"User-specified max_model_len ({max_model_len}) is greater "
1896
+ f"than the derived max_model_len ({max_len_key}="
1897
+ f"{derived_max_model_len} or model_max_length="
1898
+ f"{model_max_length} in model's config.json).")
1899
+ warning = (
1900
+ "VLLM_ALLOW_LONG_MAX_MODEL_LEN must be used with extreme "
1901
+ "caution. If the model uses relative position encoding (RoPE), "
1902
+ "positions exceeding derived_max_model_len lead to nan. If the "
1903
+ "model uses absolute position encoding, positions exceeding "
1904
+ "derived_max_model_len will cause a CUDA array out-of-bounds "
1905
+ "error.")
1906
+ if envs.VLLM_ALLOW_LONG_MAX_MODEL_LEN:
1907
+ logger.warning_once("%s %s", msg, warning)
1908
+ else:
1909
+ raise ValueError(
1910
+ f"{msg} To allow overriding this maximum, set "
1911
+ f"the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN=1. {warning}")
1912
+ return int(max_model_len)