vllm-cpu-avx512vnni 0.10.2.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.

Files changed (1395) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2022 -0
  5. vllm/_ipex_ops.py +404 -0
  6. vllm/_version.py +34 -0
  7. vllm/adapter_commons/__init__.py +0 -0
  8. vllm/adapter_commons/layers.py +16 -0
  9. vllm/adapter_commons/models.py +106 -0
  10. vllm/adapter_commons/request.py +26 -0
  11. vllm/adapter_commons/utils.py +93 -0
  12. vllm/adapter_commons/worker_manager.py +39 -0
  13. vllm/assets/__init__.py +0 -0
  14. vllm/assets/audio.py +45 -0
  15. vllm/assets/base.py +41 -0
  16. vllm/assets/image.py +50 -0
  17. vllm/assets/video.py +138 -0
  18. vllm/attention/__init__.py +19 -0
  19. vllm/attention/backends/__init__.py +0 -0
  20. vllm/attention/backends/abstract.py +348 -0
  21. vllm/attention/backends/differential_flash_attn.py +935 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1499 -0
  23. vllm/attention/backends/flash_attn.py +933 -0
  24. vllm/attention/backends/flashmla.py +238 -0
  25. vllm/attention/backends/mla/__init__.py +0 -0
  26. vllm/attention/backends/mla/common.py +1310 -0
  27. vllm/attention/backends/placeholder_attn.py +340 -0
  28. vllm/attention/backends/rocm_aiter_mla.py +410 -0
  29. vllm/attention/backends/rocm_flash_attn.py +953 -0
  30. vllm/attention/backends/triton_mla.py +111 -0
  31. vllm/attention/backends/utils.py +610 -0
  32. vllm/attention/backends/xformers.py +805 -0
  33. vllm/attention/layer.py +552 -0
  34. vllm/attention/layers/__init__.py +0 -0
  35. vllm/attention/layers/chunked_local_attention.py +91 -0
  36. vllm/attention/layers/cross_attention.py +159 -0
  37. vllm/attention/layers/encoder_only_attention.py +86 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  40. vllm/attention/ops/common.py +139 -0
  41. vllm/attention/ops/flashmla.py +123 -0
  42. vllm/attention/ops/merge_attn_states.py +43 -0
  43. vllm/attention/ops/paged_attn.py +261 -0
  44. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  45. vllm/attention/ops/prefix_prefill.py +928 -0
  46. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  47. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  48. vllm/attention/ops/triton_decode_attention.py +676 -0
  49. vllm/attention/ops/triton_flash_attention.py +984 -0
  50. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  51. vllm/attention/ops/triton_unified_attention.py +854 -0
  52. vllm/attention/selector.py +243 -0
  53. vllm/attention/utils/__init__.py +0 -0
  54. vllm/attention/utils/fa_utils.py +85 -0
  55. vllm/attention/utils/kv_sharing_utils.py +33 -0
  56. vllm/beam_search.py +87 -0
  57. vllm/benchmarks/__init__.py +0 -0
  58. vllm/benchmarks/datasets.py +2651 -0
  59. vllm/benchmarks/latency.py +170 -0
  60. vllm/benchmarks/lib/__init__.py +3 -0
  61. vllm/benchmarks/lib/endpoint_request_func.py +510 -0
  62. vllm/benchmarks/lib/ready_checker.py +72 -0
  63. vllm/benchmarks/lib/utils.py +80 -0
  64. vllm/benchmarks/serve.py +1247 -0
  65. vllm/benchmarks/throughput.py +696 -0
  66. vllm/collect_env.py +823 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/activation_quant_fusion.py +193 -0
  69. vllm/compilation/backends.py +641 -0
  70. vllm/compilation/base_static_graph.py +51 -0
  71. vllm/compilation/collective_fusion.py +1190 -0
  72. vllm/compilation/compiler_interface.py +572 -0
  73. vllm/compilation/counter.py +47 -0
  74. vllm/compilation/cuda_graph.py +193 -0
  75. vllm/compilation/cuda_piecewise_backend.py +117 -0
  76. vllm/compilation/decorators.py +316 -0
  77. vllm/compilation/fix_functionalization.py +208 -0
  78. vllm/compilation/fusion.py +600 -0
  79. vllm/compilation/fusion_attn.py +303 -0
  80. vllm/compilation/fx_utils.py +84 -0
  81. vllm/compilation/inductor_pass.py +136 -0
  82. vllm/compilation/monitor.py +57 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +165 -0
  85. vllm/compilation/pass_manager.py +88 -0
  86. vllm/compilation/sequence_parallelism.py +484 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +50 -0
  89. vllm/compilation/wrapper.py +138 -0
  90. vllm/config/__init__.py +3921 -0
  91. vllm/config/cache.py +214 -0
  92. vllm/config/compilation.py +580 -0
  93. vllm/config/kv_events.py +50 -0
  94. vllm/config/kv_transfer.py +111 -0
  95. vllm/config/load.py +113 -0
  96. vllm/config/lora.py +132 -0
  97. vllm/config/parallel.py +446 -0
  98. vllm/config/scheduler.py +304 -0
  99. vllm/config/utils.py +29 -0
  100. vllm/connections.py +174 -0
  101. vllm/core/__init__.py +0 -0
  102. vllm/core/block/__init__.py +0 -0
  103. vllm/core/block/block_table.py +399 -0
  104. vllm/core/block/common.py +371 -0
  105. vllm/core/block/cpu_gpu_block_allocator.py +439 -0
  106. vllm/core/block/interfaces.py +319 -0
  107. vllm/core/block/naive_block.py +466 -0
  108. vllm/core/block/prefix_caching_block.py +1135 -0
  109. vllm/core/block/utils.py +28 -0
  110. vllm/core/block_manager.py +523 -0
  111. vllm/core/evictor.py +157 -0
  112. vllm/core/interfaces.py +139 -0
  113. vllm/core/placeholder_block_space_manager.py +103 -0
  114. vllm/core/scheduler.py +2028 -0
  115. vllm/device_allocator/__init__.py +0 -0
  116. vllm/device_allocator/cumem.py +286 -0
  117. vllm/distributed/__init__.py +6 -0
  118. vllm/distributed/communication_op.py +41 -0
  119. vllm/distributed/device_communicators/__init__.py +0 -0
  120. vllm/distributed/device_communicators/all2all.py +259 -0
  121. vllm/distributed/device_communicators/all_reduce_utils.py +292 -0
  122. vllm/distributed/device_communicators/base_device_communicator.py +277 -0
  123. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  124. vllm/distributed/device_communicators/cuda_communicator.py +294 -0
  125. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  126. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  127. vllm/distributed/device_communicators/pynccl.py +290 -0
  128. vllm/distributed/device_communicators/pynccl_wrapper.py +382 -0
  129. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  130. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  131. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  132. vllm/distributed/device_communicators/symm_mem.py +136 -0
  133. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  134. vllm/distributed/device_communicators/xpu_communicator.py +69 -0
  135. vllm/distributed/eplb/__init__.py +8 -0
  136. vllm/distributed/eplb/eplb_state.py +619 -0
  137. vllm/distributed/eplb/rebalance_algo.py +234 -0
  138. vllm/distributed/eplb/rebalance_execute.py +424 -0
  139. vllm/distributed/kv_events.py +362 -0
  140. vllm/distributed/kv_transfer/README.md +29 -0
  141. vllm/distributed/kv_transfer/__init__.py +13 -0
  142. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  143. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  145. vllm/distributed/kv_transfer/kv_connector/factory.py +108 -0
  146. vllm/distributed/kv_transfer/kv_connector/utils.py +246 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/base.py +356 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +266 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1319 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +484 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +542 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +266 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +414 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  158. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  159. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  160. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  161. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  162. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  163. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  164. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  165. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  166. vllm/distributed/parallel_state.py +1489 -0
  167. vllm/distributed/tpu_distributed_utils.py +178 -0
  168. vllm/distributed/utils.py +536 -0
  169. vllm/engine/__init__.py +0 -0
  170. vllm/engine/arg_utils.py +1857 -0
  171. vllm/engine/async_llm_engine.py +1044 -0
  172. vllm/engine/async_timeout.py +173 -0
  173. vllm/engine/llm_engine.py +1849 -0
  174. vllm/engine/metrics.py +577 -0
  175. vllm/engine/metrics_types.py +84 -0
  176. vllm/engine/multiprocessing/__init__.py +145 -0
  177. vllm/engine/multiprocessing/client.py +643 -0
  178. vllm/engine/multiprocessing/engine.py +470 -0
  179. vllm/engine/output_processor/__init__.py +0 -0
  180. vllm/engine/output_processor/interfaces.py +61 -0
  181. vllm/engine/output_processor/single_step.py +145 -0
  182. vllm/engine/output_processor/stop_checker.py +131 -0
  183. vllm/engine/output_processor/util.py +28 -0
  184. vllm/engine/protocol.py +343 -0
  185. vllm/entrypoints/__init__.py +0 -0
  186. vllm/entrypoints/api_server.py +178 -0
  187. vllm/entrypoints/chat_utils.py +1535 -0
  188. vllm/entrypoints/cli/__init__.py +12 -0
  189. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  190. vllm/entrypoints/cli/benchmark/base.py +25 -0
  191. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  192. vllm/entrypoints/cli/benchmark/main.py +58 -0
  193. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  194. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  195. vllm/entrypoints/cli/collect_env.py +36 -0
  196. vllm/entrypoints/cli/main.py +60 -0
  197. vllm/entrypoints/cli/openai.py +214 -0
  198. vllm/entrypoints/cli/run_batch.py +69 -0
  199. vllm/entrypoints/cli/serve.py +232 -0
  200. vllm/entrypoints/cli/types.py +29 -0
  201. vllm/entrypoints/constants.py +10 -0
  202. vllm/entrypoints/context.py +444 -0
  203. vllm/entrypoints/harmony_utils.py +431 -0
  204. vllm/entrypoints/launcher.py +168 -0
  205. vllm/entrypoints/llm.py +1579 -0
  206. vllm/entrypoints/logger.py +79 -0
  207. vllm/entrypoints/openai/__init__.py +0 -0
  208. vllm/entrypoints/openai/api_server.py +2011 -0
  209. vllm/entrypoints/openai/cli_args.py +281 -0
  210. vllm/entrypoints/openai/logits_processors.py +90 -0
  211. vllm/entrypoints/openai/protocol.py +2590 -0
  212. vllm/entrypoints/openai/run_batch.py +497 -0
  213. vllm/entrypoints/openai/serving_chat.py +1591 -0
  214. vllm/entrypoints/openai/serving_classification.py +176 -0
  215. vllm/entrypoints/openai/serving_completion.py +688 -0
  216. vllm/entrypoints/openai/serving_embedding.py +632 -0
  217. vllm/entrypoints/openai/serving_engine.py +996 -0
  218. vllm/entrypoints/openai/serving_models.py +288 -0
  219. vllm/entrypoints/openai/serving_pooling.py +277 -0
  220. vllm/entrypoints/openai/serving_responses.py +1690 -0
  221. vllm/entrypoints/openai/serving_score.py +479 -0
  222. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  223. vllm/entrypoints/openai/serving_transcription.py +136 -0
  224. vllm/entrypoints/openai/speech_to_text.py +388 -0
  225. vllm/entrypoints/openai/tool_parsers/__init__.py +51 -0
  226. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  227. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  228. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  229. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  230. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  231. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  232. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +418 -0
  233. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  234. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  235. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  236. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  237. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  238. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  239. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  240. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  241. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +73 -0
  242. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  243. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  244. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  245. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  246. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  247. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  248. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  249. vllm/entrypoints/renderer.py +395 -0
  250. vllm/entrypoints/score_utils.py +232 -0
  251. vllm/entrypoints/ssl.py +75 -0
  252. vllm/entrypoints/tool.py +139 -0
  253. vllm/entrypoints/tool_server.py +195 -0
  254. vllm/entrypoints/utils.py +328 -0
  255. vllm/env_override.py +23 -0
  256. vllm/envs.py +1354 -0
  257. vllm/executor/__init__.py +0 -0
  258. vllm/executor/executor_base.py +378 -0
  259. vllm/executor/mp_distributed_executor.py +244 -0
  260. vllm/executor/msgspec_utils.py +35 -0
  261. vllm/executor/multiproc_worker_utils.py +279 -0
  262. vllm/executor/ray_distributed_executor.py +699 -0
  263. vllm/executor/ray_utils.py +410 -0
  264. vllm/executor/uniproc_executor.py +152 -0
  265. vllm/forward_context.py +273 -0
  266. vllm/inputs/__init__.py +44 -0
  267. vllm/inputs/data.py +356 -0
  268. vllm/inputs/parse.py +151 -0
  269. vllm/inputs/preprocess.py +973 -0
  270. vllm/inputs/registry.py +251 -0
  271. vllm/logger.py +229 -0
  272. vllm/logging_utils/__init__.py +8 -0
  273. vllm/logging_utils/dump_input.py +81 -0
  274. vllm/logging_utils/formatter.py +79 -0
  275. vllm/logits_process.py +119 -0
  276. vllm/logprobs.py +28 -0
  277. vllm/lora/__init__.py +0 -0
  278. vllm/lora/layers/__init__.py +34 -0
  279. vllm/lora/layers/base.py +69 -0
  280. vllm/lora/layers/base_linear.py +184 -0
  281. vllm/lora/layers/column_parallel_linear.py +622 -0
  282. vllm/lora/layers/logits_processor.py +247 -0
  283. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  284. vllm/lora/layers/replicated_linear.py +61 -0
  285. vllm/lora/layers/row_parallel_linear.py +201 -0
  286. vllm/lora/layers/utils.py +60 -0
  287. vllm/lora/layers/vocal_parallel_embedding.py +172 -0
  288. vllm/lora/lora.py +199 -0
  289. vllm/lora/models.py +792 -0
  290. vllm/lora/ops/__init__.py +0 -0
  291. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  292. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  293. vllm/lora/ops/torch_ops/__init__.py +16 -0
  294. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  295. vllm/lora/ops/triton_ops/__init__.py +12 -0
  296. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  297. vllm/lora/ops/triton_ops/lora_expand_op.py +291 -0
  298. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  299. vllm/lora/ops/triton_ops/lora_shrink_op.py +245 -0
  300. vllm/lora/ops/triton_ops/utils.py +126 -0
  301. vllm/lora/ops/xla_ops/__init__.py +7 -0
  302. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  303. vllm/lora/peft_helper.py +127 -0
  304. vllm/lora/punica_wrapper/__init__.py +10 -0
  305. vllm/lora/punica_wrapper/punica_base.py +458 -0
  306. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  307. vllm/lora/punica_wrapper/punica_gpu.py +279 -0
  308. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  309. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  310. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  311. vllm/lora/punica_wrapper/utils.py +136 -0
  312. vllm/lora/request.py +99 -0
  313. vllm/lora/resolver.py +85 -0
  314. vllm/lora/utils.py +246 -0
  315. vllm/lora/worker_manager.py +256 -0
  316. vllm/model_executor/__init__.py +16 -0
  317. vllm/model_executor/custom_op.py +194 -0
  318. vllm/model_executor/layers/__init__.py +0 -0
  319. vllm/model_executor/layers/activation.py +575 -0
  320. vllm/model_executor/layers/attention_layer_base.py +23 -0
  321. vllm/model_executor/layers/fla/__init__.py +8 -0
  322. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  323. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  324. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  325. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  326. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  327. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  328. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  329. vllm/model_executor/layers/fla/ops/index.py +39 -0
  330. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  331. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  332. vllm/model_executor/layers/fla/ops/op.py +39 -0
  333. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  334. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  335. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  336. vllm/model_executor/layers/fused_moe/__init__.py +80 -0
  337. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +304 -0
  338. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +164 -0
  339. vllm/model_executor/layers/fused_moe/config.py +497 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  560. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +297 -0
  561. vllm/model_executor/layers/fused_moe/cutlass_moe.py +996 -0
  562. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +370 -0
  563. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  564. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +280 -0
  565. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +229 -0
  566. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +243 -0
  567. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +97 -0
  568. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1042 -0
  569. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +240 -0
  570. vllm/model_executor/layers/fused_moe/fused_moe.py +2081 -0
  571. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +247 -0
  572. vllm/model_executor/layers/fused_moe/layer.py +1951 -0
  573. vllm/model_executor/layers/fused_moe/modular_kernel.py +892 -0
  574. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  575. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  576. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  577. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  578. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +321 -0
  579. vllm/model_executor/layers/fused_moe/prepare_finalize.py +72 -0
  580. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +431 -0
  581. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  582. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  583. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +171 -0
  584. vllm/model_executor/layers/fused_moe/trtllm_moe.py +197 -0
  585. vllm/model_executor/layers/fused_moe/utils.py +270 -0
  586. vllm/model_executor/layers/layernorm.py +381 -0
  587. vllm/model_executor/layers/lightning_attn.py +661 -0
  588. vllm/model_executor/layers/linear.py +1567 -0
  589. vllm/model_executor/layers/logits_processor.py +199 -0
  590. vllm/model_executor/layers/mamba/__init__.py +0 -0
  591. vllm/model_executor/layers/mamba/abstract.py +45 -0
  592. vllm/model_executor/layers/mamba/linear_attn.py +432 -0
  593. vllm/model_executor/layers/mamba/mamba2_metadata.py +186 -0
  594. vllm/model_executor/layers/mamba/mamba_mixer.py +517 -0
  595. vllm/model_executor/layers/mamba/mamba_mixer2.py +803 -0
  596. vllm/model_executor/layers/mamba/mamba_utils.py +202 -0
  597. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  598. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +982 -0
  599. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  600. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  601. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  602. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +574 -0
  603. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  604. vllm/model_executor/layers/mamba/ops/ssd_combined.py +248 -0
  605. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +248 -0
  606. vllm/model_executor/layers/mamba/short_conv.py +270 -0
  607. vllm/model_executor/layers/mla.py +158 -0
  608. vllm/model_executor/layers/pooler.py +732 -0
  609. vllm/model_executor/layers/quantization/__init__.py +157 -0
  610. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  611. vllm/model_executor/layers/quantization/awq.py +228 -0
  612. vllm/model_executor/layers/quantization/awq_marlin.py +548 -0
  613. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  614. vllm/model_executor/layers/quantization/base_config.py +164 -0
  615. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  616. vllm/model_executor/layers/quantization/bitsandbytes.py +621 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +795 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1651 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  625. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +161 -0
  626. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  627. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  628. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  629. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +156 -0
  630. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  631. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  632. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +227 -0
  633. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +135 -0
  634. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +21 -0
  635. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  636. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  637. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  638. vllm/model_executor/layers/quantization/deepgemm.py +81 -0
  639. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  640. vllm/model_executor/layers/quantization/experts_int8.py +215 -0
  641. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  642. vllm/model_executor/layers/quantization/fp8.py +1179 -0
  643. vllm/model_executor/layers/quantization/gguf.py +597 -0
  644. vllm/model_executor/layers/quantization/gptq.py +300 -0
  645. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  646. vllm/model_executor/layers/quantization/gptq_marlin.py +700 -0
  647. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  648. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  649. vllm/model_executor/layers/quantization/inc.py +61 -0
  650. vllm/model_executor/layers/quantization/input_quant_fp8.py +103 -0
  651. vllm/model_executor/layers/quantization/ipex_quant.py +410 -0
  652. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  653. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  654. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  655. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  656. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  657. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  658. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  659. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  660. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  661. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  662. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  663. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  664. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  665. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +163 -0
  666. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  667. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  668. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  669. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  670. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  671. vllm/model_executor/layers/quantization/modelopt.py +1548 -0
  672. vllm/model_executor/layers/quantization/moe_wna16.py +473 -0
  673. vllm/model_executor/layers/quantization/mxfp4.py +951 -0
  674. vllm/model_executor/layers/quantization/petit.py +306 -0
  675. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  676. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  677. vllm/model_executor/layers/quantization/quark/quark.py +431 -0
  678. vllm/model_executor/layers/quantization/quark/quark_moe.py +434 -0
  679. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  680. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  681. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +112 -0
  682. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  683. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  684. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  685. vllm/model_executor/layers/quantization/rtn.py +456 -0
  686. vllm/model_executor/layers/quantization/schema.py +86 -0
  687. vllm/model_executor/layers/quantization/torchao.py +214 -0
  688. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  689. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  690. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  691. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  902. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  903. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +85 -0
  904. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +258 -0
  905. vllm/model_executor/layers/quantization/utils/fp8_utils.py +795 -0
  906. vllm/model_executor/layers/quantization/utils/gptq_utils.py +96 -0
  907. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  908. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  909. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  910. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  911. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  912. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  913. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  914. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  915. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +132 -0
  916. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  917. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  918. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  919. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  920. vllm/model_executor/layers/quantization/utils/quant_utils.py +627 -0
  921. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  922. vllm/model_executor/layers/resampler.py +270 -0
  923. vllm/model_executor/layers/rotary_embedding/__init__.py +190 -0
  924. vllm/model_executor/layers/rotary_embedding/base.py +156 -0
  925. vllm/model_executor/layers/rotary_embedding/common.py +105 -0
  926. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +140 -0
  927. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  928. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  929. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  930. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  931. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  932. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  933. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  934. vllm/model_executor/layers/rotary_embedding/mrope.py +1140 -0
  935. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  936. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  937. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  938. vllm/model_executor/layers/sampler.py +1198 -0
  939. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  940. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  941. vllm/model_executor/layers/utils.py +196 -0
  942. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  943. vllm/model_executor/model_loader/__init__.py +138 -0
  944. vllm/model_executor/model_loader/base_loader.py +52 -0
  945. vllm/model_executor/model_loader/bitsandbytes_loader.py +787 -0
  946. vllm/model_executor/model_loader/default_loader.py +278 -0
  947. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  948. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  949. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  950. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  951. vllm/model_executor/model_loader/tensorizer.py +743 -0
  952. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  953. vllm/model_executor/model_loader/tpu.py +114 -0
  954. vllm/model_executor/model_loader/utils.py +271 -0
  955. vllm/model_executor/model_loader/weight_utils.py +946 -0
  956. vllm/model_executor/models/__init__.py +30 -0
  957. vllm/model_executor/models/adapters.py +542 -0
  958. vllm/model_executor/models/aimv2.py +246 -0
  959. vllm/model_executor/models/apertus.py +582 -0
  960. vllm/model_executor/models/arcee.py +423 -0
  961. vllm/model_executor/models/arctic.py +560 -0
  962. vllm/model_executor/models/aria.py +662 -0
  963. vllm/model_executor/models/aya_vision.py +470 -0
  964. vllm/model_executor/models/baichuan.py +475 -0
  965. vllm/model_executor/models/bailing_moe.py +529 -0
  966. vllm/model_executor/models/bamba.py +582 -0
  967. vllm/model_executor/models/bart.py +1343 -0
  968. vllm/model_executor/models/bert.py +613 -0
  969. vllm/model_executor/models/bert_with_rope.py +687 -0
  970. vllm/model_executor/models/blip.py +339 -0
  971. vllm/model_executor/models/blip2.py +716 -0
  972. vllm/model_executor/models/bloom.py +374 -0
  973. vllm/model_executor/models/chameleon.py +1141 -0
  974. vllm/model_executor/models/chatglm.py +479 -0
  975. vllm/model_executor/models/clip.py +407 -0
  976. vllm/model_executor/models/cohere2_vision.py +484 -0
  977. vllm/model_executor/models/commandr.py +467 -0
  978. vllm/model_executor/models/config.py +434 -0
  979. vllm/model_executor/models/constant_size_cache.py +137 -0
  980. vllm/model_executor/models/dbrx.py +473 -0
  981. vllm/model_executor/models/deepseek.py +491 -0
  982. vllm/model_executor/models/deepseek_eagle.py +241 -0
  983. vllm/model_executor/models/deepseek_mtp.py +282 -0
  984. vllm/model_executor/models/deepseek_v2.py +1058 -0
  985. vllm/model_executor/models/deepseek_vl2.py +661 -0
  986. vllm/model_executor/models/donut.py +387 -0
  987. vllm/model_executor/models/dots1.py +547 -0
  988. vllm/model_executor/models/ernie45.py +43 -0
  989. vllm/model_executor/models/ernie45_moe.py +608 -0
  990. vllm/model_executor/models/ernie45_vl.py +1510 -0
  991. vllm/model_executor/models/ernie45_vl_moe.py +728 -0
  992. vllm/model_executor/models/ernie_mtp.py +287 -0
  993. vllm/model_executor/models/exaone.py +552 -0
  994. vllm/model_executor/models/exaone4.py +535 -0
  995. vllm/model_executor/models/fairseq2_llama.py +154 -0
  996. vllm/model_executor/models/falcon.py +511 -0
  997. vllm/model_executor/models/falcon_h1.py +739 -0
  998. vllm/model_executor/models/florence2.py +1107 -0
  999. vllm/model_executor/models/fuyu.py +401 -0
  1000. vllm/model_executor/models/gemma.py +428 -0
  1001. vllm/model_executor/models/gemma2.py +425 -0
  1002. vllm/model_executor/models/gemma3.py +542 -0
  1003. vllm/model_executor/models/gemma3_mm.py +723 -0
  1004. vllm/model_executor/models/gemma3n.py +830 -0
  1005. vllm/model_executor/models/gemma3n_mm.py +767 -0
  1006. vllm/model_executor/models/glm.py +23 -0
  1007. vllm/model_executor/models/glm4.py +305 -0
  1008. vllm/model_executor/models/glm4_1v.py +1669 -0
  1009. vllm/model_executor/models/glm4_moe.py +703 -0
  1010. vllm/model_executor/models/glm4_moe_mtp.py +306 -0
  1011. vllm/model_executor/models/glm4v.py +654 -0
  1012. vllm/model_executor/models/gpt2.py +383 -0
  1013. vllm/model_executor/models/gpt_bigcode.py +346 -0
  1014. vllm/model_executor/models/gpt_j.py +340 -0
  1015. vllm/model_executor/models/gpt_neox.py +333 -0
  1016. vllm/model_executor/models/gpt_oss.py +687 -0
  1017. vllm/model_executor/models/granite.py +498 -0
  1018. vllm/model_executor/models/granite_speech.py +799 -0
  1019. vllm/model_executor/models/granitemoe.py +541 -0
  1020. vllm/model_executor/models/granitemoehybrid.py +684 -0
  1021. vllm/model_executor/models/granitemoeshared.py +342 -0
  1022. vllm/model_executor/models/gritlm.py +262 -0
  1023. vllm/model_executor/models/grok1.py +550 -0
  1024. vllm/model_executor/models/h2ovl.py +536 -0
  1025. vllm/model_executor/models/hunyuan_v1.py +937 -0
  1026. vllm/model_executor/models/hyperclovax_vision.py +1206 -0
  1027. vllm/model_executor/models/idefics2_vision_model.py +416 -0
  1028. vllm/model_executor/models/idefics3.py +758 -0
  1029. vllm/model_executor/models/interfaces.py +854 -0
  1030. vllm/model_executor/models/interfaces_base.py +195 -0
  1031. vllm/model_executor/models/intern_vit.py +481 -0
  1032. vllm/model_executor/models/internlm2.py +453 -0
  1033. vllm/model_executor/models/internlm2_ve.py +148 -0
  1034. vllm/model_executor/models/interns1.py +832 -0
  1035. vllm/model_executor/models/interns1_vit.py +418 -0
  1036. vllm/model_executor/models/internvl.py +1423 -0
  1037. vllm/model_executor/models/jais.py +374 -0
  1038. vllm/model_executor/models/jamba.py +630 -0
  1039. vllm/model_executor/models/jina_vl.py +144 -0
  1040. vllm/model_executor/models/keye.py +1684 -0
  1041. vllm/model_executor/models/keye_vl1_5.py +601 -0
  1042. vllm/model_executor/models/kimi_vl.py +620 -0
  1043. vllm/model_executor/models/lfm2.py +558 -0
  1044. vllm/model_executor/models/llama.py +671 -0
  1045. vllm/model_executor/models/llama4.py +732 -0
  1046. vllm/model_executor/models/llama4_eagle.py +241 -0
  1047. vllm/model_executor/models/llama_eagle.py +171 -0
  1048. vllm/model_executor/models/llama_eagle3.py +292 -0
  1049. vllm/model_executor/models/llava.py +872 -0
  1050. vllm/model_executor/models/llava_next.py +572 -0
  1051. vllm/model_executor/models/llava_next_video.py +479 -0
  1052. vllm/model_executor/models/llava_onevision.py +945 -0
  1053. vllm/model_executor/models/mamba.py +310 -0
  1054. vllm/model_executor/models/mamba2.py +346 -0
  1055. vllm/model_executor/models/mamba_cache.py +83 -0
  1056. vllm/model_executor/models/medusa.py +219 -0
  1057. vllm/model_executor/models/midashenglm.py +788 -0
  1058. vllm/model_executor/models/mimo.py +191 -0
  1059. vllm/model_executor/models/mimo_mtp.py +273 -0
  1060. vllm/model_executor/models/minicpm.py +593 -0
  1061. vllm/model_executor/models/minicpm3.py +230 -0
  1062. vllm/model_executor/models/minicpm_eagle.py +391 -0
  1063. vllm/model_executor/models/minicpmo.py +804 -0
  1064. vllm/model_executor/models/minicpmv.py +1786 -0
  1065. vllm/model_executor/models/minimax_cache.py +36 -0
  1066. vllm/model_executor/models/minimax_text_01.py +1027 -0
  1067. vllm/model_executor/models/minimax_vl_01.py +431 -0
  1068. vllm/model_executor/models/mistral3.py +628 -0
  1069. vllm/model_executor/models/mixtral.py +494 -0
  1070. vllm/model_executor/models/mllama.py +1697 -0
  1071. vllm/model_executor/models/mllama4.py +1079 -0
  1072. vllm/model_executor/models/mlp_speculator.py +206 -0
  1073. vllm/model_executor/models/modernbert.py +374 -0
  1074. vllm/model_executor/models/module_mapping.py +72 -0
  1075. vllm/model_executor/models/molmo.py +1569 -0
  1076. vllm/model_executor/models/moonvit.py +663 -0
  1077. vllm/model_executor/models/motif.py +345 -0
  1078. vllm/model_executor/models/mpt.py +332 -0
  1079. vllm/model_executor/models/nano_nemotron_vl.py +1395 -0
  1080. vllm/model_executor/models/nemotron.py +509 -0
  1081. vllm/model_executor/models/nemotron_h.py +633 -0
  1082. vllm/model_executor/models/nemotron_nas.py +484 -0
  1083. vllm/model_executor/models/nemotron_vl.py +655 -0
  1084. vllm/model_executor/models/nvlm_d.py +203 -0
  1085. vllm/model_executor/models/olmo.py +406 -0
  1086. vllm/model_executor/models/olmo2.py +428 -0
  1087. vllm/model_executor/models/olmoe.py +485 -0
  1088. vllm/model_executor/models/opt.py +413 -0
  1089. vllm/model_executor/models/orion.py +350 -0
  1090. vllm/model_executor/models/ovis.py +572 -0
  1091. vllm/model_executor/models/ovis2_5.py +644 -0
  1092. vllm/model_executor/models/paligemma.py +414 -0
  1093. vllm/model_executor/models/persimmon.py +345 -0
  1094. vllm/model_executor/models/phi.py +357 -0
  1095. vllm/model_executor/models/phi3.py +19 -0
  1096. vllm/model_executor/models/phi3v.py +701 -0
  1097. vllm/model_executor/models/phi4_multimodal.py +1478 -0
  1098. vllm/model_executor/models/phi4flash.py +737 -0
  1099. vllm/model_executor/models/phi4mm.py +1281 -0
  1100. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1101. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1102. vllm/model_executor/models/phimoe.py +681 -0
  1103. vllm/model_executor/models/pixtral.py +1348 -0
  1104. vllm/model_executor/models/plamo2.py +1126 -0
  1105. vllm/model_executor/models/qwen.py +363 -0
  1106. vllm/model_executor/models/qwen2.py +526 -0
  1107. vllm/model_executor/models/qwen2_5_omni_thinker.py +985 -0
  1108. vllm/model_executor/models/qwen2_5_vl.py +1256 -0
  1109. vllm/model_executor/models/qwen2_audio.py +492 -0
  1110. vllm/model_executor/models/qwen2_moe.py +558 -0
  1111. vllm/model_executor/models/qwen2_rm.py +122 -0
  1112. vllm/model_executor/models/qwen2_vl.py +1512 -0
  1113. vllm/model_executor/models/qwen3.py +344 -0
  1114. vllm/model_executor/models/qwen3_moe.py +704 -0
  1115. vllm/model_executor/models/qwen3_next.py +1298 -0
  1116. vllm/model_executor/models/qwen3_next_mtp.py +285 -0
  1117. vllm/model_executor/models/qwen_vl.py +795 -0
  1118. vllm/model_executor/models/registry.py +891 -0
  1119. vllm/model_executor/models/roberta.py +252 -0
  1120. vllm/model_executor/models/rvl.py +103 -0
  1121. vllm/model_executor/models/seed_oss.py +488 -0
  1122. vllm/model_executor/models/siglip.py +524 -0
  1123. vllm/model_executor/models/siglip2navit.py +688 -0
  1124. vllm/model_executor/models/skyworkr1v.py +914 -0
  1125. vllm/model_executor/models/smolvlm.py +44 -0
  1126. vllm/model_executor/models/solar.py +506 -0
  1127. vllm/model_executor/models/stablelm.py +344 -0
  1128. vllm/model_executor/models/starcoder2.py +357 -0
  1129. vllm/model_executor/models/step3_text.py +521 -0
  1130. vllm/model_executor/models/step3_vl.py +1091 -0
  1131. vllm/model_executor/models/swin.py +475 -0
  1132. vllm/model_executor/models/tarsier.py +649 -0
  1133. vllm/model_executor/models/telechat2.py +151 -0
  1134. vllm/model_executor/models/teleflm.py +79 -0
  1135. vllm/model_executor/models/terratorch.py +294 -0
  1136. vllm/model_executor/models/transformers.py +883 -0
  1137. vllm/model_executor/models/ultravox.py +667 -0
  1138. vllm/model_executor/models/utils.py +770 -0
  1139. vllm/model_executor/models/vision.py +125 -0
  1140. vllm/model_executor/models/voxtral.py +789 -0
  1141. vllm/model_executor/models/whisper.py +966 -0
  1142. vllm/model_executor/models/zamba2.py +1056 -0
  1143. vllm/model_executor/parameter.py +599 -0
  1144. vllm/model_executor/sampling_metadata.py +597 -0
  1145. vllm/model_executor/utils.py +97 -0
  1146. vllm/model_executor/warmup/__init__.py +0 -0
  1147. vllm/model_executor/warmup/deep_gemm_warmup.py +223 -0
  1148. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1149. vllm/multimodal/__init__.py +35 -0
  1150. vllm/multimodal/audio.py +116 -0
  1151. vllm/multimodal/base.py +219 -0
  1152. vllm/multimodal/cache.py +507 -0
  1153. vllm/multimodal/hasher.py +110 -0
  1154. vllm/multimodal/image.py +130 -0
  1155. vllm/multimodal/inputs.py +979 -0
  1156. vllm/multimodal/parse.py +496 -0
  1157. vllm/multimodal/processing.py +1921 -0
  1158. vllm/multimodal/profiling.py +313 -0
  1159. vllm/multimodal/registry.py +375 -0
  1160. vllm/multimodal/utils.py +754 -0
  1161. vllm/multimodal/video.py +312 -0
  1162. vllm/outputs.py +517 -0
  1163. vllm/platforms/__init__.py +263 -0
  1164. vllm/platforms/cpu.py +353 -0
  1165. vllm/platforms/cuda.py +731 -0
  1166. vllm/platforms/interface.py +599 -0
  1167. vllm/platforms/rocm.py +504 -0
  1168. vllm/platforms/tpu.py +236 -0
  1169. vllm/platforms/xpu.py +243 -0
  1170. vllm/plugins/__init__.py +72 -0
  1171. vllm/plugins/io_processors/__init__.py +68 -0
  1172. vllm/plugins/io_processors/interface.py +67 -0
  1173. vllm/plugins/lora_resolvers/README.md +16 -0
  1174. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1175. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1176. vllm/pooling_params.py +183 -0
  1177. vllm/profiler/__init__.py +0 -0
  1178. vllm/profiler/layerwise_profile.py +375 -0
  1179. vllm/profiler/utils.py +148 -0
  1180. vllm/py.typed +2 -0
  1181. vllm/ray/__init__.py +0 -0
  1182. vllm/ray/lazy_utils.py +22 -0
  1183. vllm/ray/ray_env.py +72 -0
  1184. vllm/reasoning/__init__.py +25 -0
  1185. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1186. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1187. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1188. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1189. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1190. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1191. vllm/reasoning/mistral_reasoning_parser.py +47 -0
  1192. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1193. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1194. vllm/sampling_params.py +577 -0
  1195. vllm/scalar_type.py +349 -0
  1196. vllm/scripts.py +15 -0
  1197. vllm/sequence.py +1465 -0
  1198. vllm/tasks.py +11 -0
  1199. vllm/test_utils.py +130 -0
  1200. vllm/third_party/__init__.py +0 -0
  1201. vllm/third_party/pynvml.py +6140 -0
  1202. vllm/tracing.py +136 -0
  1203. vllm/transformers_utils/__init__.py +24 -0
  1204. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1205. vllm/transformers_utils/chat_templates/registry.py +71 -0
  1206. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1207. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1208. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1209. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1210. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1211. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1212. vllm/transformers_utils/config.py +1043 -0
  1213. vllm/transformers_utils/config_parser_base.py +20 -0
  1214. vllm/transformers_utils/configs/__init__.py +55 -0
  1215. vllm/transformers_utils/configs/arctic.py +207 -0
  1216. vllm/transformers_utils/configs/chatglm.py +72 -0
  1217. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1218. vllm/transformers_utils/configs/eagle.py +84 -0
  1219. vllm/transformers_utils/configs/falcon.py +90 -0
  1220. vllm/transformers_utils/configs/jais.py +238 -0
  1221. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1222. vllm/transformers_utils/configs/medusa.py +63 -0
  1223. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1224. vllm/transformers_utils/configs/mistral.py +165 -0
  1225. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1226. vllm/transformers_utils/configs/moonvit.py +33 -0
  1227. vllm/transformers_utils/configs/nemotron.py +205 -0
  1228. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1229. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1230. vllm/transformers_utils/configs/ovis.py +176 -0
  1231. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1232. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1233. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1234. vllm/transformers_utils/configs/speculators/base.py +91 -0
  1235. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1236. vllm/transformers_utils/configs/ultravox.py +120 -0
  1237. vllm/transformers_utils/detokenizer.py +169 -0
  1238. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1239. vllm/transformers_utils/dynamic_module.py +60 -0
  1240. vllm/transformers_utils/processor.py +245 -0
  1241. vllm/transformers_utils/processors/__init__.py +16 -0
  1242. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1243. vllm/transformers_utils/processors/ovis.py +420 -0
  1244. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1245. vllm/transformers_utils/runai_utils.py +99 -0
  1246. vllm/transformers_utils/s3_utils.py +90 -0
  1247. vllm/transformers_utils/tokenizer.py +293 -0
  1248. vllm/transformers_utils/tokenizer_base.py +149 -0
  1249. vllm/transformers_utils/tokenizer_group.py +132 -0
  1250. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1251. vllm/transformers_utils/tokenizers/mistral.py +520 -0
  1252. vllm/transformers_utils/utils.py +99 -0
  1253. vllm/triton_utils/__init__.py +16 -0
  1254. vllm/triton_utils/importing.py +95 -0
  1255. vllm/usage/__init__.py +0 -0
  1256. vllm/usage/usage_lib.py +259 -0
  1257. vllm/utils/__init__.py +3438 -0
  1258. vllm/utils/deep_gemm.py +212 -0
  1259. vllm/utils/flashinfer.py +372 -0
  1260. vllm/utils/jsontree.py +90 -0
  1261. vllm/utils/tensor_schema.py +236 -0
  1262. vllm/v1/__init__.py +0 -0
  1263. vllm/v1/attention/__init__.py +0 -0
  1264. vllm/v1/attention/backends/__init__.py +0 -0
  1265. vllm/v1/attention/backends/cpu_attn.py +922 -0
  1266. vllm/v1/attention/backends/flash_attn.py +800 -0
  1267. vllm/v1/attention/backends/flashinfer.py +1128 -0
  1268. vllm/v1/attention/backends/flex_attention.py +796 -0
  1269. vllm/v1/attention/backends/gdn_attn.py +320 -0
  1270. vllm/v1/attention/backends/linear_attn.py +68 -0
  1271. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1272. vllm/v1/attention/backends/mamba2_attn.py +224 -0
  1273. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1274. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1275. vllm/v1/attention/backends/mla/common.py +1608 -0
  1276. vllm/v1/attention/backends/mla/cutlass_mla.py +301 -0
  1277. vllm/v1/attention/backends/mla/flashattn_mla.py +273 -0
  1278. vllm/v1/attention/backends/mla/flashinfer_mla.py +110 -0
  1279. vllm/v1/attention/backends/mla/flashmla.py +213 -0
  1280. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1281. vllm/v1/attention/backends/mla/triton_mla.py +175 -0
  1282. vllm/v1/attention/backends/pallas.py +413 -0
  1283. vllm/v1/attention/backends/rocm_aiter_fa.py +548 -0
  1284. vllm/v1/attention/backends/short_conv_attn.py +82 -0
  1285. vllm/v1/attention/backends/tree_attn.py +450 -0
  1286. vllm/v1/attention/backends/triton_attn.py +430 -0
  1287. vllm/v1/attention/backends/utils.py +834 -0
  1288. vllm/v1/attention/backends/xformers.py +437 -0
  1289. vllm/v1/core/__init__.py +0 -0
  1290. vllm/v1/core/block_pool.py +330 -0
  1291. vllm/v1/core/encoder_cache_manager.py +333 -0
  1292. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1293. vllm/v1/core/kv_cache_manager.py +398 -0
  1294. vllm/v1/core/kv_cache_utils.py +1169 -0
  1295. vllm/v1/core/sched/__init__.py +0 -0
  1296. vllm/v1/core/sched/async_scheduler.py +47 -0
  1297. vllm/v1/core/sched/interface.py +158 -0
  1298. vllm/v1/core/sched/output.py +162 -0
  1299. vllm/v1/core/sched/request_queue.py +224 -0
  1300. vllm/v1/core/sched/scheduler.py +1287 -0
  1301. vllm/v1/core/sched/utils.py +69 -0
  1302. vllm/v1/core/single_type_kv_cache_manager.py +670 -0
  1303. vllm/v1/cudagraph_dispatcher.py +121 -0
  1304. vllm/v1/engine/__init__.py +202 -0
  1305. vllm/v1/engine/async_llm.py +757 -0
  1306. vllm/v1/engine/coordinator.py +357 -0
  1307. vllm/v1/engine/core.py +1245 -0
  1308. vllm/v1/engine/core_client.py +1333 -0
  1309. vllm/v1/engine/detokenizer.py +300 -0
  1310. vllm/v1/engine/exceptions.py +17 -0
  1311. vllm/v1/engine/llm_engine.py +332 -0
  1312. vllm/v1/engine/logprobs.py +201 -0
  1313. vllm/v1/engine/output_processor.py +558 -0
  1314. vllm/v1/engine/parallel_sampling.py +133 -0
  1315. vllm/v1/engine/processor.py +524 -0
  1316. vllm/v1/engine/utils.py +857 -0
  1317. vllm/v1/executor/__init__.py +0 -0
  1318. vllm/v1/executor/abstract.py +126 -0
  1319. vllm/v1/executor/multiproc_executor.py +683 -0
  1320. vllm/v1/executor/ray_distributed_executor.py +109 -0
  1321. vllm/v1/kv_cache_interface.py +275 -0
  1322. vllm/v1/metrics/__init__.py +0 -0
  1323. vllm/v1/metrics/loggers.py +717 -0
  1324. vllm/v1/metrics/prometheus.py +82 -0
  1325. vllm/v1/metrics/ray_wrappers.py +133 -0
  1326. vllm/v1/metrics/reader.py +246 -0
  1327. vllm/v1/metrics/stats.py +248 -0
  1328. vllm/v1/outputs.py +147 -0
  1329. vllm/v1/pool/__init__.py +0 -0
  1330. vllm/v1/pool/metadata.py +77 -0
  1331. vllm/v1/request.py +237 -0
  1332. vllm/v1/sample/__init__.py +0 -0
  1333. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1334. vllm/v1/sample/logits_processor/builtin.py +273 -0
  1335. vllm/v1/sample/logits_processor/interface.py +97 -0
  1336. vllm/v1/sample/logits_processor/state.py +161 -0
  1337. vllm/v1/sample/metadata.py +43 -0
  1338. vllm/v1/sample/ops/__init__.py +0 -0
  1339. vllm/v1/sample/ops/bad_words.py +39 -0
  1340. vllm/v1/sample/ops/logprobs.py +26 -0
  1341. vllm/v1/sample/ops/penalties.py +43 -0
  1342. vllm/v1/sample/ops/topk_topp_sampler.py +254 -0
  1343. vllm/v1/sample/rejection_sampler.py +623 -0
  1344. vllm/v1/sample/sampler.py +281 -0
  1345. vllm/v1/sample/tpu/__init__.py +0 -0
  1346. vllm/v1/sample/tpu/metadata.py +124 -0
  1347. vllm/v1/sample/tpu/sampler.py +213 -0
  1348. vllm/v1/serial_utils.py +395 -0
  1349. vllm/v1/spec_decode/__init__.py +0 -0
  1350. vllm/v1/spec_decode/eagle.py +740 -0
  1351. vllm/v1/spec_decode/medusa.py +66 -0
  1352. vllm/v1/spec_decode/metadata.py +62 -0
  1353. vllm/v1/spec_decode/metrics.py +191 -0
  1354. vllm/v1/spec_decode/ngram_proposer.py +157 -0
  1355. vllm/v1/spec_decode/utils.py +14 -0
  1356. vllm/v1/structured_output/__init__.py +297 -0
  1357. vllm/v1/structured_output/backend_guidance.py +245 -0
  1358. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1359. vllm/v1/structured_output/backend_outlines.py +320 -0
  1360. vllm/v1/structured_output/backend_types.py +134 -0
  1361. vllm/v1/structured_output/backend_xgrammar.py +323 -0
  1362. vllm/v1/structured_output/request.py +86 -0
  1363. vllm/v1/structured_output/utils.py +373 -0
  1364. vllm/v1/utils.py +382 -0
  1365. vllm/v1/worker/__init__.py +0 -0
  1366. vllm/v1/worker/block_table.py +221 -0
  1367. vllm/v1/worker/cpu_model_runner.py +163 -0
  1368. vllm/v1/worker/cpu_worker.py +183 -0
  1369. vllm/v1/worker/gpu_input_batch.py +821 -0
  1370. vllm/v1/worker/gpu_model_runner.py +3743 -0
  1371. vllm/v1/worker/gpu_worker.py +697 -0
  1372. vllm/v1/worker/kv_connector_model_runner_mixin.py +122 -0
  1373. vllm/v1/worker/lora_model_runner_mixin.py +192 -0
  1374. vllm/v1/worker/tpu_input_batch.py +585 -0
  1375. vllm/v1/worker/tpu_model_runner.py +1947 -0
  1376. vllm/v1/worker/tpu_worker.py +340 -0
  1377. vllm/v1/worker/utils.py +290 -0
  1378. vllm/v1/worker/worker_base.py +65 -0
  1379. vllm/v1/worker/xpu_model_runner.py +53 -0
  1380. vllm/v1/worker/xpu_worker.py +179 -0
  1381. vllm/version.py +41 -0
  1382. vllm/vllm_flash_attn/.gitkeep +0 -0
  1383. vllm/worker/__init__.py +0 -0
  1384. vllm/worker/cache_engine.py +145 -0
  1385. vllm/worker/enc_dec_model_runner.py +553 -0
  1386. vllm/worker/model_runner.py +2016 -0
  1387. vllm/worker/model_runner_base.py +307 -0
  1388. vllm/worker/utils.py +49 -0
  1389. vllm/worker/worker.py +670 -0
  1390. vllm/worker/worker_base.py +651 -0
  1391. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/METADATA +326 -0
  1392. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/RECORD +1395 -0
  1393. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/WHEEL +5 -0
  1394. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/entry_points.txt +5 -0
  1395. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1857 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ # yapf: disable
5
+ import argparse
6
+ import copy
7
+ import dataclasses
8
+ import functools
9
+ import json
10
+ import sys
11
+ from dataclasses import MISSING, dataclass, fields, is_dataclass
12
+ from itertools import permutations
13
+ from typing import (TYPE_CHECKING, Annotated, Any, Callable, Dict, List,
14
+ Literal, Optional, Type, TypeVar, Union, cast, get_args,
15
+ get_origin)
16
+
17
+ import huggingface_hub
18
+ import regex as re
19
+ import torch
20
+ from pydantic import TypeAdapter, ValidationError
21
+ from typing_extensions import TypeIs, deprecated
22
+
23
+ import vllm.envs as envs
24
+ from vllm.config import (BlockSize, CacheConfig, CacheDType, CompilationConfig,
25
+ ConfigType, ConvertOption, DecodingConfig,
26
+ DetailedTraceModules, Device, DeviceConfig,
27
+ DistributedExecutorBackend, EPLBConfig,
28
+ GuidedDecodingBackend, HfOverrides, KVEventsConfig,
29
+ KVTransferConfig, LoadConfig, LogprobsMode,
30
+ LoRAConfig, MambaDType, MMEncoderTPMode, ModelConfig,
31
+ ModelDType, ModelImpl, MultiModalConfig,
32
+ ObservabilityConfig, ParallelConfig, PoolerConfig,
33
+ PrefixCachingHashAlgo, RunnerOption, SchedulerConfig,
34
+ SchedulerPolicy, SpeculativeConfig, TaskOption,
35
+ TokenizerMode, VllmConfig, get_attr_docs, get_field)
36
+ from vllm.logger import init_logger
37
+ from vllm.platforms import CpuArchEnum, current_platform
38
+ from vllm.plugins import load_general_plugins
39
+ from vllm.ray.lazy_utils import is_ray_initialized
40
+ from vllm.reasoning import ReasoningParserManager
41
+ from vllm.test_utils import MODEL_WEIGHTS_S3_BUCKET, MODELS_ON_S3
42
+ from vllm.transformers_utils.config import get_model_path, is_interleaved
43
+ from vllm.transformers_utils.utils import check_gguf_file
44
+ from vllm.utils import (STR_DUAL_CHUNK_FLASH_ATTN_VAL, FlexibleArgumentParser,
45
+ GiB_bytes, get_ip, is_in_ray_actor)
46
+ from vllm.v1.sample.logits_processor import LogitsProcessor
47
+
48
+ # yapf: enable
49
+
50
+ if TYPE_CHECKING:
51
+ from vllm.executor.executor_base import ExecutorBase
52
+ from vllm.model_executor.layers.quantization import QuantizationMethods
53
+ from vllm.model_executor.model_loader import LoadFormats
54
+ from vllm.usage.usage_lib import UsageContext
55
+ else:
56
+ ExecutorBase = Any
57
+ QuantizationMethods = Any
58
+ LoadFormats = Any
59
+ UsageContext = Any
60
+
61
+ logger = init_logger(__name__)
62
+
63
+ # object is used to allow for special typing forms
64
+ T = TypeVar("T")
65
+ TypeHint = Union[type[Any], object]
66
+ TypeHintT = Union[type[T], object]
67
+
68
+
69
+ def parse_type(return_type: Callable[[str], T]) -> Callable[[str], T]:
70
+
71
+ def _parse_type(val: str) -> T:
72
+ try:
73
+ return return_type(val)
74
+ except ValueError as e:
75
+ raise argparse.ArgumentTypeError(
76
+ f"Value {val} cannot be converted to {return_type}.") from e
77
+
78
+ return _parse_type
79
+
80
+
81
+ def optional_type(
82
+ return_type: Callable[[str], T]) -> Callable[[str], Optional[T]]:
83
+
84
+ def _optional_type(val: str) -> Optional[T]:
85
+ if val == "" or val == "None":
86
+ return None
87
+ return parse_type(return_type)(val)
88
+
89
+ return _optional_type
90
+
91
+
92
+ def union_dict_and_str(val: str) -> Optional[Union[str, dict[str, str]]]:
93
+ if not re.match(r"(?s)^\s*{.*}\s*$", val):
94
+ return str(val)
95
+ return optional_type(json.loads)(val)
96
+
97
+
98
+ def is_type(type_hint: TypeHint, type: TypeHintT) -> TypeIs[TypeHintT]:
99
+ """Check if the type hint is a specific type."""
100
+ return type_hint is type or get_origin(type_hint) is type
101
+
102
+
103
+ def contains_type(type_hints: set[TypeHint], type: TypeHintT) -> bool:
104
+ """Check if the type hints contain a specific type."""
105
+ return any(is_type(type_hint, type) for type_hint in type_hints)
106
+
107
+
108
+ def get_type(type_hints: set[TypeHint], type: TypeHintT) -> TypeHintT:
109
+ """Get the specific type from the type hints."""
110
+ return next((th for th in type_hints if is_type(th, type)), None)
111
+
112
+
113
+ def literal_to_kwargs(type_hints: set[TypeHint]) -> dict[str, Any]:
114
+ """Get the `type` and `choices` from a `Literal` type hint in `type_hints`.
115
+
116
+ If `type_hints` also contains `str`, we use `metavar` instead of `choices`.
117
+ """
118
+ type_hint = get_type(type_hints, Literal)
119
+ options = get_args(type_hint)
120
+ option_type = type(options[0])
121
+ if not all(isinstance(option, option_type) for option in options):
122
+ raise ValueError(
123
+ "All options must be of the same type. "
124
+ f"Got {options} with types {[type(c) for c in options]}")
125
+ kwarg = "metavar" if contains_type(type_hints, str) else "choices"
126
+ return {"type": option_type, kwarg: sorted(options)}
127
+
128
+
129
+ def is_not_builtin(type_hint: TypeHint) -> bool:
130
+ """Check if the class is not a built-in type."""
131
+ return type_hint.__module__ != "builtins"
132
+
133
+
134
+ def get_type_hints(type_hint: TypeHint) -> set[TypeHint]:
135
+ """Extract type hints from Annotated or Union type hints."""
136
+ type_hints: set[TypeHint] = set()
137
+ origin = get_origin(type_hint)
138
+ args = get_args(type_hint)
139
+
140
+ if origin is Annotated:
141
+ type_hints.update(get_type_hints(args[0]))
142
+ elif origin is Union:
143
+ for arg in args:
144
+ type_hints.update(get_type_hints(arg))
145
+ else:
146
+ type_hints.add(type_hint)
147
+
148
+ return type_hints
149
+
150
+
151
+ def is_online_quantization(quantization: Any) -> bool:
152
+ return quantization in ["inc"]
153
+
154
+
155
+ NEEDS_HELP = (
156
+ "--help" in (argv := sys.argv) # vllm SUBCOMMAND --help
157
+ or (argv0 := argv[0]).endswith("mkdocs") # mkdocs SUBCOMMAND
158
+ or argv0.endswith("mkdocs/__main__.py") # python -m mkdocs SUBCOMMAND
159
+ )
160
+
161
+
162
+ @functools.lru_cache(maxsize=30)
163
+ def _compute_kwargs(cls: ConfigType) -> dict[str, Any]:
164
+ # Save time only getting attr docs if we're generating help text
165
+ cls_docs = get_attr_docs(cls) if NEEDS_HELP else {}
166
+ kwargs = {}
167
+ for field in fields(cls):
168
+ # Get the set of possible types for the field
169
+ type_hints: set[TypeHint] = get_type_hints(field.type)
170
+
171
+ # If the field is a dataclass, we can use the model_validate_json
172
+ generator = (th for th in type_hints if is_dataclass(th))
173
+ dataclass_cls = next(generator, None)
174
+
175
+ # Get the default value of the field
176
+ if field.default is not MISSING:
177
+ default = field.default
178
+ elif field.default_factory is not MISSING:
179
+ default = field.default_factory()
180
+
181
+ # Get the help text for the field
182
+ name = field.name
183
+ help = cls_docs.get(name, "").strip()
184
+ # Escape % for argparse
185
+ help = help.replace("%", "%%")
186
+
187
+ # Initialise the kwargs dictionary for the field
188
+ kwargs[name] = {"default": default, "help": help}
189
+
190
+ # Set other kwargs based on the type hints
191
+ json_tip = ("Should either be a valid JSON string or JSON keys passed "
192
+ "individually.")
193
+ if dataclass_cls is not None:
194
+
195
+ def parse_dataclass(val: str, cls=dataclass_cls) -> Any:
196
+ try:
197
+ return TypeAdapter(cls).validate_json(val)
198
+ except ValidationError as e:
199
+ raise argparse.ArgumentTypeError(repr(e)) from e
200
+
201
+ kwargs[name]["type"] = parse_dataclass
202
+ kwargs[name]["help"] += f"\n\n{json_tip}"
203
+ elif contains_type(type_hints, bool):
204
+ # Creates --no-<name> and --<name> flags
205
+ kwargs[name]["action"] = argparse.BooleanOptionalAction
206
+ elif contains_type(type_hints, Literal):
207
+ kwargs[name].update(literal_to_kwargs(type_hints))
208
+ elif contains_type(type_hints, tuple):
209
+ type_hint = get_type(type_hints, tuple)
210
+ types = get_args(type_hint)
211
+ tuple_type = types[0]
212
+ assert all(t is tuple_type for t in types if t is not Ellipsis), (
213
+ "All non-Ellipsis tuple elements must be of the same "
214
+ f"type. Got {types}.")
215
+ kwargs[name]["type"] = tuple_type
216
+ kwargs[name]["nargs"] = "+" if Ellipsis in types else len(types)
217
+ elif contains_type(type_hints, list):
218
+ type_hint = get_type(type_hints, list)
219
+ types = get_args(type_hint)
220
+ list_type = types[0]
221
+ if get_origin(list_type) is Union:
222
+ msg = "List type must contain str if it is a Union."
223
+ assert str in get_args(list_type), msg
224
+ list_type = str
225
+ kwargs[name]["type"] = list_type
226
+ kwargs[name]["nargs"] = "+"
227
+ elif contains_type(type_hints, int):
228
+ kwargs[name]["type"] = int
229
+ # Special case for large integers
230
+ human_readable_ints = {
231
+ "max_model_len",
232
+ "max_num_batched_tokens",
233
+ "kv_cache_memory_bytes",
234
+ }
235
+ if name in human_readable_ints:
236
+ kwargs[name]["type"] = human_readable_int
237
+ kwargs[name]["help"] += f"\n\n{human_readable_int.__doc__}"
238
+ elif contains_type(type_hints, float):
239
+ kwargs[name]["type"] = float
240
+ elif (contains_type(type_hints, dict)
241
+ and (contains_type(type_hints, str)
242
+ or any(is_not_builtin(th) for th in type_hints))):
243
+ kwargs[name]["type"] = union_dict_and_str
244
+ elif contains_type(type_hints, dict):
245
+ kwargs[name]["type"] = parse_type(json.loads)
246
+ kwargs[name]["help"] += f"\n\n{json_tip}"
247
+ elif (contains_type(type_hints, str)
248
+ or any(is_not_builtin(th) for th in type_hints)):
249
+ kwargs[name]["type"] = str
250
+ else:
251
+ raise ValueError(
252
+ f"Unsupported type {type_hints} for argument {name}.")
253
+
254
+ # If the type hint was a sequence of literals, use the helper function
255
+ # to update the type and choices
256
+ if get_origin(kwargs[name].get("type")) is Literal:
257
+ kwargs[name].update(literal_to_kwargs({kwargs[name]["type"]}))
258
+
259
+ # If None is in type_hints, make the argument optional.
260
+ # But not if it's a bool, argparse will handle this better.
261
+ if type(None) in type_hints and not contains_type(type_hints, bool):
262
+ kwargs[name]["type"] = optional_type(kwargs[name]["type"])
263
+ if kwargs[name].get("choices"):
264
+ kwargs[name]["choices"].append("None")
265
+ return kwargs
266
+
267
+
268
+ def get_kwargs(cls: ConfigType) -> dict[str, Any]:
269
+ """Return argparse kwargs for the given Config dataclass.
270
+
271
+ If `--help` or `mkdocs` are not present in the command line command, the
272
+ attribute documentation will not be included in the help output.
273
+
274
+ The heavy computation is cached via functools.lru_cache, and a deep copy
275
+ is returned so callers can mutate the dictionary without affecting the
276
+ cached version.
277
+ """
278
+ return copy.deepcopy(_compute_kwargs(cls))
279
+
280
+
281
+ @dataclass
282
+ class EngineArgs:
283
+ """Arguments for vLLM engine."""
284
+ model: str = ModelConfig.model
285
+ served_model_name: Optional[Union[
286
+ str, List[str]]] = ModelConfig.served_model_name
287
+ tokenizer: Optional[str] = ModelConfig.tokenizer
288
+ hf_config_path: Optional[str] = ModelConfig.hf_config_path
289
+ runner: RunnerOption = ModelConfig.runner
290
+ convert: ConvertOption = ModelConfig.convert
291
+ task: Optional[TaskOption] = ModelConfig.task
292
+ skip_tokenizer_init: bool = ModelConfig.skip_tokenizer_init
293
+ enable_prompt_embeds: bool = ModelConfig.enable_prompt_embeds
294
+ tokenizer_mode: TokenizerMode = ModelConfig.tokenizer_mode
295
+ trust_remote_code: bool = ModelConfig.trust_remote_code
296
+ allowed_local_media_path: str = ModelConfig.allowed_local_media_path
297
+ download_dir: Optional[str] = LoadConfig.download_dir
298
+ safetensors_load_strategy: str = LoadConfig.safetensors_load_strategy
299
+ load_format: Union[str, LoadFormats] = LoadConfig.load_format
300
+ config_format: str = ModelConfig.config_format
301
+ dtype: ModelDType = ModelConfig.dtype
302
+ kv_cache_dtype: CacheDType = CacheConfig.cache_dtype
303
+ seed: Optional[int] = ModelConfig.seed
304
+ max_model_len: Optional[int] = ModelConfig.max_model_len
305
+ cuda_graph_sizes: list[int] = get_field(SchedulerConfig,
306
+ "cuda_graph_sizes")
307
+ # Note: Specifying a custom executor backend by passing a class
308
+ # is intended for expert use only. The API may change without
309
+ # notice.
310
+ distributed_executor_backend: Optional[Union[
311
+ str, DistributedExecutorBackend,
312
+ Type[ExecutorBase]]] = ParallelConfig.distributed_executor_backend
313
+ # number of P/D disaggregation (or other disaggregation) workers
314
+ pipeline_parallel_size: int = ParallelConfig.pipeline_parallel_size
315
+ tensor_parallel_size: int = ParallelConfig.tensor_parallel_size
316
+ decode_context_parallel_size: int = \
317
+ ParallelConfig.decode_context_parallel_size
318
+ data_parallel_size: int = ParallelConfig.data_parallel_size
319
+ data_parallel_rank: Optional[int] = None
320
+ data_parallel_start_rank: Optional[int] = None
321
+ data_parallel_size_local: Optional[int] = None
322
+ data_parallel_address: Optional[str] = None
323
+ data_parallel_rpc_port: Optional[int] = None
324
+ data_parallel_hybrid_lb: bool = False
325
+ data_parallel_backend: str = ParallelConfig.data_parallel_backend
326
+ enable_expert_parallel: bool = ParallelConfig.enable_expert_parallel
327
+ eplb_config: EPLBConfig = get_field(ParallelConfig, "eplb_config")
328
+ enable_eplb: bool = ParallelConfig.enable_eplb
329
+ num_redundant_experts: int = EPLBConfig.num_redundant_experts
330
+ eplb_window_size: int = EPLBConfig.window_size
331
+ eplb_step_interval: int = EPLBConfig.step_interval
332
+ eplb_log_balancedness: bool = EPLBConfig.log_balancedness
333
+ max_parallel_loading_workers: Optional[
334
+ int] = ParallelConfig.max_parallel_loading_workers
335
+ block_size: Optional[BlockSize] = CacheConfig.block_size
336
+ enable_prefix_caching: Optional[bool] = CacheConfig.enable_prefix_caching
337
+ prefix_caching_hash_algo: PrefixCachingHashAlgo = \
338
+ CacheConfig.prefix_caching_hash_algo
339
+ disable_sliding_window: bool = ModelConfig.disable_sliding_window
340
+ disable_cascade_attn: bool = ModelConfig.disable_cascade_attn
341
+ swap_space: float = CacheConfig.swap_space
342
+ cpu_offload_gb: float = CacheConfig.cpu_offload_gb
343
+ gpu_memory_utilization: float = CacheConfig.gpu_memory_utilization
344
+ kv_cache_memory_bytes: Optional[int] = CacheConfig.kv_cache_memory_bytes
345
+ max_num_batched_tokens: Optional[
346
+ int] = SchedulerConfig.max_num_batched_tokens
347
+ max_num_partial_prefills: int = SchedulerConfig.max_num_partial_prefills
348
+ max_long_partial_prefills: int = SchedulerConfig.max_long_partial_prefills
349
+ long_prefill_token_threshold: int = \
350
+ SchedulerConfig.long_prefill_token_threshold
351
+ max_num_seqs: Optional[int] = SchedulerConfig.max_num_seqs
352
+ max_logprobs: int = ModelConfig.max_logprobs
353
+ logprobs_mode: LogprobsMode = ModelConfig.logprobs_mode
354
+ disable_log_stats: bool = False
355
+ revision: Optional[str] = ModelConfig.revision
356
+ code_revision: Optional[str] = ModelConfig.code_revision
357
+ rope_scaling: dict[str, Any] = get_field(ModelConfig, "rope_scaling")
358
+ rope_theta: Optional[float] = ModelConfig.rope_theta
359
+ hf_token: Optional[Union[bool, str]] = ModelConfig.hf_token
360
+ hf_overrides: HfOverrides = get_field(ModelConfig, "hf_overrides")
361
+ tokenizer_revision: Optional[str] = ModelConfig.tokenizer_revision
362
+ quantization: Optional[QuantizationMethods] = ModelConfig.quantization
363
+ enforce_eager: bool = ModelConfig.enforce_eager
364
+ max_seq_len_to_capture: int = ModelConfig.max_seq_len_to_capture
365
+ disable_custom_all_reduce: bool = ParallelConfig.disable_custom_all_reduce
366
+ limit_mm_per_prompt: dict[str, int] = \
367
+ get_field(MultiModalConfig, "limit_per_prompt")
368
+ interleave_mm_strings: bool = MultiModalConfig.interleave_mm_strings
369
+ media_io_kwargs: dict[str, dict[str,
370
+ Any]] = get_field(MultiModalConfig,
371
+ "media_io_kwargs")
372
+ mm_processor_kwargs: Optional[Dict[str, Any]] = \
373
+ MultiModalConfig.mm_processor_kwargs
374
+ disable_mm_preprocessor_cache: bool = False # DEPRECATED
375
+ mm_processor_cache_gb: float = MultiModalConfig.mm_processor_cache_gb
376
+ mm_encoder_tp_mode: MMEncoderTPMode = MultiModalConfig.mm_encoder_tp_mode
377
+ io_processor_plugin: Optional[str] = None
378
+ skip_mm_profiling: bool = MultiModalConfig.skip_mm_profiling
379
+ # LoRA fields
380
+ enable_lora: bool = False
381
+ enable_lora_bias: bool = LoRAConfig.bias_enabled
382
+ max_loras: int = LoRAConfig.max_loras
383
+ max_lora_rank: int = LoRAConfig.max_lora_rank
384
+ default_mm_loras: Optional[Dict[str, str]] = \
385
+ LoRAConfig.default_mm_loras
386
+ fully_sharded_loras: bool = LoRAConfig.fully_sharded_loras
387
+ max_cpu_loras: Optional[int] = LoRAConfig.max_cpu_loras
388
+ lora_dtype: Optional[Union[str, torch.dtype]] = LoRAConfig.lora_dtype
389
+ lora_extra_vocab_size: int = LoRAConfig.lora_extra_vocab_size
390
+
391
+ ray_workers_use_nsight: bool = ParallelConfig.ray_workers_use_nsight
392
+ num_gpu_blocks_override: Optional[
393
+ int] = CacheConfig.num_gpu_blocks_override
394
+ num_lookahead_slots: int = SchedulerConfig.num_lookahead_slots
395
+ model_loader_extra_config: dict = \
396
+ get_field(LoadConfig, "model_loader_extra_config")
397
+ ignore_patterns: Optional[Union[str,
398
+ List[str]]] = LoadConfig.ignore_patterns
399
+ preemption_mode: Optional[str] = SchedulerConfig.preemption_mode
400
+
401
+ scheduler_delay_factor: float = SchedulerConfig.delay_factor
402
+ enable_chunked_prefill: Optional[
403
+ bool] = SchedulerConfig.enable_chunked_prefill
404
+ disable_chunked_mm_input: bool = SchedulerConfig.disable_chunked_mm_input
405
+
406
+ disable_hybrid_kv_cache_manager: bool = (
407
+ SchedulerConfig.disable_hybrid_kv_cache_manager)
408
+
409
+ guided_decoding_backend: GuidedDecodingBackend = DecodingConfig.backend
410
+ guided_decoding_disable_fallback: bool = DecodingConfig.disable_fallback
411
+ guided_decoding_disable_any_whitespace: bool = \
412
+ DecodingConfig.disable_any_whitespace
413
+ guided_decoding_disable_additional_properties: bool = \
414
+ DecodingConfig.disable_additional_properties
415
+ logits_processor_pattern: Optional[
416
+ str] = ModelConfig.logits_processor_pattern
417
+
418
+ speculative_config: Optional[Dict[str, Any]] = None
419
+
420
+ show_hidden_metrics_for_version: Optional[str] = \
421
+ ObservabilityConfig.show_hidden_metrics_for_version
422
+ otlp_traces_endpoint: Optional[str] = \
423
+ ObservabilityConfig.otlp_traces_endpoint
424
+ collect_detailed_traces: Optional[list[DetailedTraceModules]] = \
425
+ ObservabilityConfig.collect_detailed_traces
426
+ disable_async_output_proc: bool = not ModelConfig.use_async_output_proc
427
+ scheduling_policy: SchedulerPolicy = SchedulerConfig.policy
428
+ scheduler_cls: Union[str, Type[object]] = SchedulerConfig.scheduler_cls
429
+
430
+ override_pooler_config: Optional[Union[dict, PoolerConfig]] = \
431
+ ModelConfig.override_pooler_config
432
+ compilation_config: CompilationConfig = \
433
+ get_field(VllmConfig, "compilation_config")
434
+ worker_cls: str = ParallelConfig.worker_cls
435
+ worker_extension_cls: str = ParallelConfig.worker_extension_cls
436
+
437
+ kv_transfer_config: Optional[KVTransferConfig] = None
438
+ kv_events_config: Optional[KVEventsConfig] = None
439
+
440
+ generation_config: str = ModelConfig.generation_config
441
+ enable_sleep_mode: bool = ModelConfig.enable_sleep_mode
442
+ override_generation_config: dict[str, Any] = \
443
+ get_field(ModelConfig, "override_generation_config")
444
+ model_impl: str = ModelConfig.model_impl
445
+ override_attention_dtype: str = ModelConfig.override_attention_dtype
446
+
447
+ calculate_kv_scales: bool = CacheConfig.calculate_kv_scales
448
+ mamba_cache_dtype: MambaDType = CacheConfig.mamba_cache_dtype
449
+ mamba_ssm_cache_dtype: MambaDType = CacheConfig.mamba_ssm_cache_dtype
450
+
451
+ additional_config: dict[str, Any] = \
452
+ get_field(VllmConfig, "additional_config")
453
+ reasoning_parser: str = DecodingConfig.reasoning_backend
454
+
455
+ use_tqdm_on_load: bool = LoadConfig.use_tqdm_on_load
456
+ pt_load_map_location: str = LoadConfig.pt_load_map_location
457
+
458
+ # DEPRECATED
459
+ enable_multimodal_encoder_data_parallel: bool = False
460
+
461
+ logits_processors: Optional[list[Union[
462
+ str, type[LogitsProcessor]]]] = ModelConfig.logits_processors
463
+ """Custom logitproc types"""
464
+
465
+ async_scheduling: bool = SchedulerConfig.async_scheduling
466
+
467
+ kv_sharing_fast_prefill: bool = \
468
+ CacheConfig.kv_sharing_fast_prefill
469
+
470
+ def __post_init__(self):
471
+ # support `EngineArgs(compilation_config={...})`
472
+ # without having to manually construct a
473
+ # CompilationConfig object
474
+ if isinstance(self.compilation_config, dict):
475
+ self.compilation_config = CompilationConfig(
476
+ **self.compilation_config)
477
+ if isinstance(self.eplb_config, dict):
478
+ self.eplb_config = EPLBConfig(**self.eplb_config)
479
+ # Setup plugins
480
+ from vllm.plugins import load_general_plugins
481
+ load_general_plugins()
482
+ # when use hf offline,replace model id to local model path
483
+ if huggingface_hub.constants.HF_HUB_OFFLINE:
484
+ model_id = self.model
485
+ self.model = get_model_path(self.model, self.revision)
486
+ logger.info(
487
+ "HF_HUB_OFFLINE is True, replace model_id [%s] " \
488
+ "to model_path [%s]",model_id, self.model)
489
+
490
+ @staticmethod
491
+ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
492
+ """Shared CLI arguments for vLLM engine."""
493
+
494
+ # Model arguments
495
+ model_kwargs = get_kwargs(ModelConfig)
496
+ model_group = parser.add_argument_group(
497
+ title="ModelConfig",
498
+ description=ModelConfig.__doc__,
499
+ )
500
+ if not ('serve' in sys.argv[1:] and '--help' in sys.argv[1:]):
501
+ model_group.add_argument("--model", **model_kwargs["model"])
502
+ model_group.add_argument("--runner", **model_kwargs["runner"])
503
+ model_group.add_argument("--convert", **model_kwargs["convert"])
504
+ model_group.add_argument("--task",
505
+ **model_kwargs["task"],
506
+ deprecated=True)
507
+ model_group.add_argument("--tokenizer", **model_kwargs["tokenizer"])
508
+ model_group.add_argument("--tokenizer-mode",
509
+ **model_kwargs["tokenizer_mode"])
510
+ model_group.add_argument("--trust-remote-code",
511
+ **model_kwargs["trust_remote_code"])
512
+ model_group.add_argument("--dtype", **model_kwargs["dtype"])
513
+ model_group.add_argument("--seed", **model_kwargs["seed"])
514
+ model_group.add_argument("--hf-config-path",
515
+ **model_kwargs["hf_config_path"])
516
+ model_group.add_argument("--allowed-local-media-path",
517
+ **model_kwargs["allowed_local_media_path"])
518
+ model_group.add_argument("--revision", **model_kwargs["revision"])
519
+ model_group.add_argument("--code-revision",
520
+ **model_kwargs["code_revision"])
521
+ model_group.add_argument("--rope-scaling",
522
+ **model_kwargs["rope_scaling"])
523
+ model_group.add_argument("--rope-theta", **model_kwargs["rope_theta"])
524
+ model_group.add_argument("--tokenizer-revision",
525
+ **model_kwargs["tokenizer_revision"])
526
+ model_group.add_argument("--max-model-len",
527
+ **model_kwargs["max_model_len"])
528
+ model_group.add_argument("--quantization", "-q",
529
+ **model_kwargs["quantization"])
530
+ model_group.add_argument("--enforce-eager",
531
+ **model_kwargs["enforce_eager"])
532
+ model_group.add_argument("--max-seq-len-to-capture",
533
+ **model_kwargs["max_seq_len_to_capture"])
534
+ model_group.add_argument("--max-logprobs",
535
+ **model_kwargs["max_logprobs"])
536
+ model_group.add_argument("--logprobs-mode",
537
+ choices=[f.value for f in LogprobsMode],
538
+ **model_kwargs["logprobs_mode"])
539
+ model_group.add_argument("--disable-sliding-window",
540
+ **model_kwargs["disable_sliding_window"])
541
+ model_group.add_argument("--disable-cascade-attn",
542
+ **model_kwargs["disable_cascade_attn"])
543
+ model_group.add_argument("--skip-tokenizer-init",
544
+ **model_kwargs["skip_tokenizer_init"])
545
+ model_group.add_argument("--enable-prompt-embeds",
546
+ **model_kwargs["enable_prompt_embeds"])
547
+ model_group.add_argument("--served-model-name",
548
+ **model_kwargs["served_model_name"])
549
+ # This one is a special case because it is the
550
+ # opposite of ModelConfig.use_async_output_proc
551
+ model_group.add_argument(
552
+ "--disable-async-output-proc",
553
+ action="store_true",
554
+ default=EngineArgs.disable_async_output_proc,
555
+ help="Disable async output processing. This may result in "
556
+ "lower performance.")
557
+ model_group.add_argument("--config-format",
558
+ **model_kwargs["config_format"])
559
+ # This one is a special case because it can bool
560
+ # or str. TODO: Handle this in get_kwargs
561
+ model_group.add_argument("--hf-token",
562
+ type=str,
563
+ nargs="?",
564
+ const=True,
565
+ default=model_kwargs["hf_token"]["default"],
566
+ help=model_kwargs["hf_token"]["help"])
567
+ model_group.add_argument("--hf-overrides",
568
+ **model_kwargs["hf_overrides"])
569
+ model_group.add_argument("--override-pooler-config",
570
+ **model_kwargs["override_pooler_config"])
571
+ model_group.add_argument("--logits-processor-pattern",
572
+ **model_kwargs["logits_processor_pattern"])
573
+ model_group.add_argument("--generation-config",
574
+ **model_kwargs["generation_config"])
575
+ model_group.add_argument("--override-generation-config",
576
+ **model_kwargs["override_generation_config"])
577
+ model_group.add_argument("--enable-sleep-mode",
578
+ **model_kwargs["enable_sleep_mode"])
579
+ model_group.add_argument("--model-impl",
580
+ choices=[f.value for f in ModelImpl],
581
+ **model_kwargs["model_impl"])
582
+ model_group.add_argument("--override-attention-dtype",
583
+ **model_kwargs["override_attention_dtype"])
584
+ model_group.add_argument("--logits-processors",
585
+ **model_kwargs["logits_processors"])
586
+ model_group.add_argument("--io-processor-plugin",
587
+ **model_kwargs["io_processor_plugin"])
588
+
589
+ # Model loading arguments
590
+ load_kwargs = get_kwargs(LoadConfig)
591
+ load_group = parser.add_argument_group(
592
+ title="LoadConfig",
593
+ description=LoadConfig.__doc__,
594
+ )
595
+ load_group.add_argument("--load-format", **load_kwargs["load_format"])
596
+ load_group.add_argument("--download-dir",
597
+ **load_kwargs["download_dir"])
598
+ load_group.add_argument("--safetensors-load-strategy",
599
+ **load_kwargs["safetensors_load_strategy"])
600
+ load_group.add_argument("--model-loader-extra-config",
601
+ **load_kwargs["model_loader_extra_config"])
602
+ load_group.add_argument("--ignore-patterns",
603
+ **load_kwargs["ignore_patterns"])
604
+ load_group.add_argument("--use-tqdm-on-load",
605
+ **load_kwargs["use_tqdm_on_load"])
606
+ load_group.add_argument('--pt-load-map-location',
607
+ **load_kwargs["pt_load_map_location"])
608
+
609
+ # Guided decoding arguments
610
+ guided_decoding_kwargs = get_kwargs(DecodingConfig)
611
+ guided_decoding_group = parser.add_argument_group(
612
+ title="DecodingConfig",
613
+ description=DecodingConfig.__doc__,
614
+ )
615
+ guided_decoding_group.add_argument("--guided-decoding-backend",
616
+ **guided_decoding_kwargs["backend"])
617
+ guided_decoding_group.add_argument(
618
+ "--guided-decoding-disable-fallback",
619
+ **guided_decoding_kwargs["disable_fallback"])
620
+ guided_decoding_group.add_argument(
621
+ "--guided-decoding-disable-any-whitespace",
622
+ **guided_decoding_kwargs["disable_any_whitespace"])
623
+ guided_decoding_group.add_argument(
624
+ "--guided-decoding-disable-additional-properties",
625
+ **guided_decoding_kwargs["disable_additional_properties"])
626
+ guided_decoding_group.add_argument(
627
+ "--reasoning-parser",
628
+ # This choice is a special case because it's not static
629
+ choices=list(ReasoningParserManager.reasoning_parsers),
630
+ **guided_decoding_kwargs["reasoning_backend"])
631
+
632
+ # Parallel arguments
633
+ parallel_kwargs = get_kwargs(ParallelConfig)
634
+ parallel_group = parser.add_argument_group(
635
+ title="ParallelConfig",
636
+ description=ParallelConfig.__doc__,
637
+ )
638
+ parallel_group.add_argument(
639
+ "--distributed-executor-backend",
640
+ **parallel_kwargs["distributed_executor_backend"])
641
+ parallel_group.add_argument(
642
+ "--pipeline-parallel-size", "-pp",
643
+ **parallel_kwargs["pipeline_parallel_size"])
644
+ parallel_group.add_argument("--tensor-parallel-size", "-tp",
645
+ **parallel_kwargs["tensor_parallel_size"])
646
+ parallel_group.add_argument(
647
+ "--decode-context-parallel-size", "-dcp",
648
+ **parallel_kwargs["decode_context_parallel_size"])
649
+ parallel_group.add_argument("--data-parallel-size", "-dp",
650
+ **parallel_kwargs["data_parallel_size"])
651
+ parallel_group.add_argument(
652
+ '--data-parallel-rank',
653
+ '-dpn',
654
+ type=int,
655
+ help='Data parallel rank of this instance. '
656
+ 'When set, enables external load balancer mode.')
657
+ parallel_group.add_argument('--data-parallel-start-rank',
658
+ '-dpr',
659
+ type=int,
660
+ help='Starting data parallel rank '
661
+ 'for secondary nodes.')
662
+ parallel_group.add_argument('--data-parallel-size-local',
663
+ '-dpl',
664
+ type=int,
665
+ help='Number of data parallel replicas '
666
+ 'to run on this node.')
667
+ parallel_group.add_argument('--data-parallel-address',
668
+ '-dpa',
669
+ type=str,
670
+ help='Address of data parallel cluster '
671
+ 'head-node.')
672
+ parallel_group.add_argument('--data-parallel-rpc-port',
673
+ '-dpp',
674
+ type=int,
675
+ help='Port for data parallel RPC '
676
+ 'communication.')
677
+ parallel_group.add_argument('--data-parallel-backend',
678
+ '-dpb',
679
+ type=str,
680
+ default='mp',
681
+ help='Backend for data parallel, either '
682
+ '"mp" or "ray".')
683
+ parallel_group.add_argument(
684
+ "--data-parallel-hybrid-lb",
685
+ **parallel_kwargs["data_parallel_hybrid_lb"])
686
+ parallel_group.add_argument(
687
+ "--enable-expert-parallel",
688
+ **parallel_kwargs["enable_expert_parallel"])
689
+ parallel_group.add_argument("--enable-eplb",
690
+ **parallel_kwargs["enable_eplb"])
691
+ parallel_group.add_argument("--eplb-config",
692
+ **parallel_kwargs["eplb_config"])
693
+ parallel_group.add_argument(
694
+ "--num-redundant-experts",
695
+ type=int,
696
+ help=
697
+ "[DEPRECATED] --num-redundant-experts will be removed in v0.12.0.",
698
+ deprecated=True)
699
+ parallel_group.add_argument(
700
+ "--eplb-window-size",
701
+ type=int,
702
+ help="[DEPRECATED] --eplb-window-size will be removed in v0.12.0.",
703
+ deprecated=True)
704
+ parallel_group.add_argument(
705
+ "--eplb-step-interval",
706
+ type=int,
707
+ help=
708
+ "[DEPRECATED] --eplb-step-interval will be removed in v0.12.0.",
709
+ deprecated=True)
710
+ parallel_group.add_argument(
711
+ "--eplb-log-balancedness",
712
+ action=argparse.BooleanOptionalAction,
713
+ help=
714
+ "[DEPRECATED] --eplb-log-balancedness will be removed in v0.12.0.",
715
+ deprecated=True)
716
+
717
+ parallel_group.add_argument(
718
+ "--max-parallel-loading-workers",
719
+ **parallel_kwargs["max_parallel_loading_workers"])
720
+ parallel_group.add_argument(
721
+ "--ray-workers-use-nsight",
722
+ **parallel_kwargs["ray_workers_use_nsight"])
723
+ parallel_group.add_argument(
724
+ "--disable-custom-all-reduce",
725
+ **parallel_kwargs["disable_custom_all_reduce"])
726
+ parallel_group.add_argument("--worker-cls",
727
+ **parallel_kwargs["worker_cls"])
728
+ parallel_group.add_argument("--worker-extension-cls",
729
+ **parallel_kwargs["worker_extension_cls"])
730
+ parallel_group.add_argument(
731
+ "--enable-multimodal-encoder-data-parallel",
732
+ action="store_true",
733
+ deprecated=True)
734
+
735
+ # KV cache arguments
736
+ cache_kwargs = get_kwargs(CacheConfig)
737
+ cache_group = parser.add_argument_group(
738
+ title="CacheConfig",
739
+ description=CacheConfig.__doc__,
740
+ )
741
+ cache_group.add_argument("--block-size", **cache_kwargs["block_size"])
742
+ cache_group.add_argument("--gpu-memory-utilization",
743
+ **cache_kwargs["gpu_memory_utilization"])
744
+ cache_group.add_argument("--kv-cache-memory-bytes",
745
+ **cache_kwargs["kv_cache_memory_bytes"])
746
+ cache_group.add_argument("--swap-space", **cache_kwargs["swap_space"])
747
+ cache_group.add_argument("--kv-cache-dtype",
748
+ **cache_kwargs["cache_dtype"])
749
+ cache_group.add_argument("--num-gpu-blocks-override",
750
+ **cache_kwargs["num_gpu_blocks_override"])
751
+ cache_group.add_argument("--enable-prefix-caching",
752
+ **cache_kwargs["enable_prefix_caching"])
753
+ cache_group.add_argument("--prefix-caching-hash-algo",
754
+ **cache_kwargs["prefix_caching_hash_algo"])
755
+ cache_group.add_argument("--cpu-offload-gb",
756
+ **cache_kwargs["cpu_offload_gb"])
757
+ cache_group.add_argument("--calculate-kv-scales",
758
+ **cache_kwargs["calculate_kv_scales"])
759
+ cache_group.add_argument("--kv-sharing-fast-prefill",
760
+ **cache_kwargs["kv_sharing_fast_prefill"])
761
+ cache_group.add_argument("--mamba-cache-dtype",
762
+ **cache_kwargs["mamba_cache_dtype"])
763
+ cache_group.add_argument("--mamba-ssm-cache-dtype",
764
+ **cache_kwargs["mamba_ssm_cache_dtype"])
765
+
766
+ # Multimodal related configs
767
+ multimodal_kwargs = get_kwargs(MultiModalConfig)
768
+ multimodal_group = parser.add_argument_group(
769
+ title="MultiModalConfig",
770
+ description=MultiModalConfig.__doc__,
771
+ )
772
+ multimodal_group.add_argument("--limit-mm-per-prompt",
773
+ **multimodal_kwargs["limit_per_prompt"])
774
+ multimodal_group.add_argument("--media-io-kwargs",
775
+ **multimodal_kwargs["media_io_kwargs"])
776
+ multimodal_group.add_argument(
777
+ "--mm-processor-kwargs",
778
+ **multimodal_kwargs["mm_processor_kwargs"])
779
+ multimodal_group.add_argument(
780
+ "--mm-processor-cache-gb",
781
+ **multimodal_kwargs["mm_processor_cache_gb"])
782
+ multimodal_group.add_argument("--disable-mm-preprocessor-cache",
783
+ action="store_true",
784
+ deprecated=True)
785
+ multimodal_group.add_argument(
786
+ "--mm-encoder-tp-mode", **multimodal_kwargs["mm_encoder_tp_mode"])
787
+ multimodal_group.add_argument(
788
+ "--interleave-mm-strings",
789
+ **multimodal_kwargs["interleave_mm_strings"])
790
+ multimodal_group.add_argument("--skip-mm-profiling",
791
+ **multimodal_kwargs["skip_mm_profiling"])
792
+
793
+ # LoRA related configs
794
+ lora_kwargs = get_kwargs(LoRAConfig)
795
+ lora_group = parser.add_argument_group(
796
+ title="LoRAConfig",
797
+ description=LoRAConfig.__doc__,
798
+ )
799
+ lora_group.add_argument(
800
+ "--enable-lora",
801
+ action=argparse.BooleanOptionalAction,
802
+ help="If True, enable handling of LoRA adapters.")
803
+ lora_group.add_argument("--enable-lora-bias",
804
+ **lora_kwargs["bias_enabled"])
805
+ lora_group.add_argument("--max-loras", **lora_kwargs["max_loras"])
806
+ lora_group.add_argument("--max-lora-rank",
807
+ **lora_kwargs["max_lora_rank"])
808
+ lora_group.add_argument("--lora-extra-vocab-size",
809
+ **lora_kwargs["lora_extra_vocab_size"])
810
+ lora_group.add_argument(
811
+ "--lora-dtype",
812
+ **lora_kwargs["lora_dtype"],
813
+ )
814
+ lora_group.add_argument("--max-cpu-loras",
815
+ **lora_kwargs["max_cpu_loras"])
816
+ lora_group.add_argument("--fully-sharded-loras",
817
+ **lora_kwargs["fully_sharded_loras"])
818
+ lora_group.add_argument("--default-mm-loras",
819
+ **lora_kwargs["default_mm_loras"])
820
+
821
+ # Observability arguments
822
+ observability_kwargs = get_kwargs(ObservabilityConfig)
823
+ observability_group = parser.add_argument_group(
824
+ title="ObservabilityConfig",
825
+ description=ObservabilityConfig.__doc__,
826
+ )
827
+ observability_group.add_argument(
828
+ "--show-hidden-metrics-for-version",
829
+ **observability_kwargs["show_hidden_metrics_for_version"])
830
+ observability_group.add_argument(
831
+ "--otlp-traces-endpoint",
832
+ **observability_kwargs["otlp_traces_endpoint"])
833
+ # TODO: generalise this special case
834
+ choices = observability_kwargs["collect_detailed_traces"]["choices"]
835
+ metavar = f"{{{','.join(choices)}}}"
836
+ observability_kwargs["collect_detailed_traces"]["metavar"] = metavar
837
+ observability_kwargs["collect_detailed_traces"]["choices"] += [
838
+ ",".join(p)
839
+ for p in permutations(get_args(DetailedTraceModules), r=2)
840
+ ]
841
+ observability_group.add_argument(
842
+ "--collect-detailed-traces",
843
+ **observability_kwargs["collect_detailed_traces"])
844
+
845
+ # Scheduler arguments
846
+ scheduler_kwargs = get_kwargs(SchedulerConfig)
847
+ scheduler_group = parser.add_argument_group(
848
+ title="SchedulerConfig",
849
+ description=SchedulerConfig.__doc__,
850
+ )
851
+ scheduler_group.add_argument(
852
+ "--max-num-batched-tokens",
853
+ **scheduler_kwargs["max_num_batched_tokens"])
854
+ scheduler_group.add_argument("--max-num-seqs",
855
+ **scheduler_kwargs["max_num_seqs"])
856
+ scheduler_group.add_argument(
857
+ "--max-num-partial-prefills",
858
+ **scheduler_kwargs["max_num_partial_prefills"])
859
+ scheduler_group.add_argument(
860
+ "--max-long-partial-prefills",
861
+ **scheduler_kwargs["max_long_partial_prefills"])
862
+ scheduler_group.add_argument('--cuda-graph-sizes',
863
+ **scheduler_kwargs["cuda_graph_sizes"])
864
+ scheduler_group.add_argument(
865
+ "--long-prefill-token-threshold",
866
+ **scheduler_kwargs["long_prefill_token_threshold"])
867
+ scheduler_group.add_argument("--num-lookahead-slots",
868
+ **scheduler_kwargs["num_lookahead_slots"])
869
+ scheduler_group.add_argument("--scheduler-delay-factor",
870
+ **scheduler_kwargs["delay_factor"])
871
+ scheduler_group.add_argument("--preemption-mode",
872
+ **scheduler_kwargs["preemption_mode"])
873
+ # multi-step scheduling has been removed; corresponding arguments
874
+ # are no longer supported.
875
+ scheduler_group.add_argument("--scheduling-policy",
876
+ **scheduler_kwargs["policy"])
877
+ scheduler_group.add_argument(
878
+ "--enable-chunked-prefill",
879
+ **scheduler_kwargs["enable_chunked_prefill"])
880
+ scheduler_group.add_argument(
881
+ "--disable-chunked-mm-input",
882
+ **scheduler_kwargs["disable_chunked_mm_input"])
883
+ scheduler_group.add_argument("--scheduler-cls",
884
+ **scheduler_kwargs["scheduler_cls"])
885
+ scheduler_group.add_argument(
886
+ "--disable-hybrid-kv-cache-manager",
887
+ **scheduler_kwargs["disable_hybrid_kv_cache_manager"])
888
+ scheduler_group.add_argument("--async-scheduling",
889
+ **scheduler_kwargs["async_scheduling"])
890
+
891
+ # vLLM arguments
892
+ vllm_kwargs = get_kwargs(VllmConfig)
893
+ vllm_group = parser.add_argument_group(
894
+ title="VllmConfig",
895
+ description=VllmConfig.__doc__,
896
+ )
897
+ # We construct SpeculativeConfig using fields from other configs in
898
+ # create_engine_config. So we set the type to a JSON string here to
899
+ # delay the Pydantic validation that comes with SpeculativeConfig.
900
+ vllm_kwargs["speculative_config"]["type"] = optional_type(json.loads)
901
+ vllm_group.add_argument("--speculative-config",
902
+ **vllm_kwargs["speculative_config"])
903
+ vllm_group.add_argument("--kv-transfer-config",
904
+ **vllm_kwargs["kv_transfer_config"])
905
+ vllm_group.add_argument('--kv-events-config',
906
+ **vllm_kwargs["kv_events_config"])
907
+ vllm_group.add_argument("--compilation-config", "-O",
908
+ **vllm_kwargs["compilation_config"])
909
+ vllm_group.add_argument("--additional-config",
910
+ **vllm_kwargs["additional_config"])
911
+
912
+ # Other arguments
913
+ parser.add_argument('--disable-log-stats',
914
+ action='store_true',
915
+ help='Disable logging statistics.')
916
+
917
+ return parser
918
+
919
+ @classmethod
920
+ def from_cli_args(cls, args: argparse.Namespace):
921
+ # Get the list of attributes of this dataclass.
922
+ attrs = [attr.name for attr in dataclasses.fields(cls)]
923
+ # Set the attributes from the parsed arguments.
924
+ engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
925
+ return engine_args
926
+
927
+ def create_model_config(self) -> ModelConfig:
928
+ # gguf file needs a specific model loader and doesn't use hf_repo
929
+ if check_gguf_file(self.model):
930
+ self.quantization = self.load_format = "gguf"
931
+
932
+ # NOTE: This is to allow model loading from S3 in CI
933
+ if (not isinstance(self, AsyncEngineArgs) and envs.VLLM_CI_USE_S3
934
+ and self.model in MODELS_ON_S3 and self.load_format == "auto"):
935
+ self.model = f"{MODEL_WEIGHTS_S3_BUCKET}/{self.model}"
936
+ self.load_format = "runai_streamer"
937
+
938
+ if self.disable_mm_preprocessor_cache:
939
+ logger.warning(
940
+ "`--disable-mm-preprocessor-cache` is deprecated "
941
+ "and will be removed in v0.13. "
942
+ "Please use `--mm-processor-cache-gb 0` instead.", )
943
+
944
+ self.mm_processor_cache_gb = 0
945
+ elif envs.VLLM_MM_INPUT_CACHE_GIB != 4:
946
+ logger.warning(
947
+ "VLLM_MM_INPUT_CACHE_GIB` is deprecated "
948
+ "and will be removed in v0.13. "
949
+ "Please use `--mm-processor-cache-gb %d` instead.",
950
+ envs.VLLM_MM_INPUT_CACHE_GIB,
951
+ )
952
+
953
+ self.mm_processor_cache_gb = envs.VLLM_MM_INPUT_CACHE_GIB
954
+
955
+ if self.enable_multimodal_encoder_data_parallel:
956
+ logger.warning(
957
+ "--enable-multimodal-encoder-data-parallel` is deprecated "
958
+ "and will be removed in v0.13. "
959
+ "Please use `--mm-encoder-tp-mode data` instead.")
960
+
961
+ self.mm_encoder_tp_mode = "data"
962
+
963
+ return ModelConfig(
964
+ model=self.model,
965
+ hf_config_path=self.hf_config_path,
966
+ runner=self.runner,
967
+ convert=self.convert,
968
+ task=self.task,
969
+ tokenizer=self.tokenizer,
970
+ tokenizer_mode=self.tokenizer_mode,
971
+ trust_remote_code=self.trust_remote_code,
972
+ allowed_local_media_path=self.allowed_local_media_path,
973
+ dtype=self.dtype,
974
+ seed=self.seed,
975
+ revision=self.revision,
976
+ code_revision=self.code_revision,
977
+ rope_scaling=self.rope_scaling,
978
+ rope_theta=self.rope_theta,
979
+ hf_token=self.hf_token,
980
+ hf_overrides=self.hf_overrides,
981
+ tokenizer_revision=self.tokenizer_revision,
982
+ max_model_len=self.max_model_len,
983
+ quantization=self.quantization,
984
+ enforce_eager=self.enforce_eager,
985
+ max_seq_len_to_capture=self.max_seq_len_to_capture,
986
+ max_logprobs=self.max_logprobs,
987
+ logprobs_mode=self.logprobs_mode,
988
+ disable_sliding_window=self.disable_sliding_window,
989
+ disable_cascade_attn=self.disable_cascade_attn,
990
+ skip_tokenizer_init=self.skip_tokenizer_init,
991
+ enable_prompt_embeds=self.enable_prompt_embeds,
992
+ served_model_name=self.served_model_name,
993
+ limit_mm_per_prompt=self.limit_mm_per_prompt,
994
+ interleave_mm_strings=self.interleave_mm_strings,
995
+ media_io_kwargs=self.media_io_kwargs,
996
+ skip_mm_profiling=self.skip_mm_profiling,
997
+ use_async_output_proc=not self.disable_async_output_proc,
998
+ config_format=self.config_format,
999
+ mm_processor_kwargs=self.mm_processor_kwargs,
1000
+ mm_processor_cache_gb=self.mm_processor_cache_gb,
1001
+ mm_encoder_tp_mode=self.mm_encoder_tp_mode,
1002
+ override_pooler_config=self.override_pooler_config,
1003
+ logits_processor_pattern=self.logits_processor_pattern,
1004
+ generation_config=self.generation_config,
1005
+ override_generation_config=self.override_generation_config,
1006
+ enable_sleep_mode=self.enable_sleep_mode,
1007
+ model_impl=self.model_impl,
1008
+ override_attention_dtype=self.override_attention_dtype,
1009
+ logits_processors=self.logits_processors,
1010
+ io_processor_plugin=self.io_processor_plugin,
1011
+ )
1012
+
1013
+ def validate_tensorizer_args(self):
1014
+ from vllm.model_executor.model_loader.tensorizer import (
1015
+ TensorizerConfig)
1016
+ for key in self.model_loader_extra_config:
1017
+ if key in TensorizerConfig._fields:
1018
+ self.model_loader_extra_config["tensorizer_config"][
1019
+ key] = self.model_loader_extra_config[key]
1020
+
1021
+ def create_load_config(self) -> LoadConfig:
1022
+
1023
+ if self.quantization == "bitsandbytes":
1024
+ self.load_format = "bitsandbytes"
1025
+
1026
+ if self.load_format == "tensorizer":
1027
+ if hasattr(self.model_loader_extra_config, "to_serializable"):
1028
+ self.model_loader_extra_config = (
1029
+ self.model_loader_extra_config.to_serializable())
1030
+ self.model_loader_extra_config["tensorizer_config"] = {}
1031
+ self.model_loader_extra_config["tensorizer_config"][
1032
+ "tensorizer_dir"] = self.model
1033
+ self.validate_tensorizer_args()
1034
+
1035
+ return LoadConfig(
1036
+ load_format=self.load_format,
1037
+ download_dir=self.download_dir,
1038
+ safetensors_load_strategy=self.safetensors_load_strategy,
1039
+ device="cpu"
1040
+ if is_online_quantization(self.quantization) else None,
1041
+ model_loader_extra_config=self.model_loader_extra_config,
1042
+ ignore_patterns=self.ignore_patterns,
1043
+ use_tqdm_on_load=self.use_tqdm_on_load,
1044
+ pt_load_map_location=self.pt_load_map_location,
1045
+ )
1046
+
1047
+ def create_speculative_config(
1048
+ self,
1049
+ target_model_config: ModelConfig,
1050
+ target_parallel_config: ParallelConfig,
1051
+ enable_chunked_prefill: bool,
1052
+ disable_log_stats: bool,
1053
+ ) -> Optional["SpeculativeConfig"]:
1054
+ """Initializes and returns a SpeculativeConfig object based on
1055
+ `speculative_config`.
1056
+
1057
+ This function utilizes `speculative_config` to create a
1058
+ SpeculativeConfig object. The `speculative_config` can either be
1059
+ provided as a JSON string input via CLI arguments or directly as a
1060
+ dictionary from the engine.
1061
+ """
1062
+
1063
+ from vllm.transformers_utils.config import get_config
1064
+ from vllm.transformers_utils.configs.speculators.base import (
1065
+ SpeculatorsConfig)
1066
+
1067
+ if self.speculative_config is None:
1068
+ hf_config = get_config(
1069
+ self.hf_config_path or target_model_config.model,
1070
+ self.trust_remote_code, self.revision, self.code_revision,
1071
+ self.config_format)
1072
+
1073
+ # if loading a SpeculatorsConfig, load the speculative_config
1074
+ # details from the config directly
1075
+ # no user input required / expected
1076
+ if isinstance(hf_config, SpeculatorsConfig):
1077
+ # We create one since we don't create one
1078
+ self.speculative_config = {}
1079
+ self.speculative_config[
1080
+ "num_speculative_tokens"] = hf_config.num_lookahead_tokens
1081
+ self.speculative_config["model"] = target_model_config.model
1082
+ self.speculative_config["method"] = hf_config.method
1083
+ else:
1084
+ return None
1085
+
1086
+ # Note(Shangming): These parameters are not obtained from the cli arg
1087
+ # '--speculative-config' and must be passed in when creating the engine
1088
+ # config.
1089
+ self.speculative_config.update({
1090
+ "target_model_config": target_model_config,
1091
+ "target_parallel_config": target_parallel_config,
1092
+ "enable_chunked_prefill": enable_chunked_prefill,
1093
+ "disable_log_stats": disable_log_stats,
1094
+ })
1095
+ return SpeculativeConfig(**self.speculative_config)
1096
+
1097
+ def create_engine_config(
1098
+ self,
1099
+ usage_context: Optional[UsageContext] = None,
1100
+ headless: bool = False,
1101
+ ) -> VllmConfig:
1102
+ """
1103
+ Create the VllmConfig.
1104
+
1105
+ NOTE: for autoselection of V0 vs V1 engine, we need to
1106
+ create the ModelConfig first, since ModelConfig's attrs
1107
+ (e.g. the model arch) are needed to make the decision.
1108
+
1109
+ This function set VLLM_USE_V1=X if VLLM_USE_V1 is
1110
+ unspecified by the user.
1111
+
1112
+ If VLLM_USE_V1 is specified by the user but the VllmConfig
1113
+ is incompatible, we raise an error.
1114
+ """
1115
+ current_platform.pre_register_and_update()
1116
+
1117
+ device_config = DeviceConfig(
1118
+ device=cast(Device, current_platform.device_type))
1119
+ model_config = self.create_model_config()
1120
+
1121
+ # * If VLLM_USE_V1 is unset, we enable V1 for "supported features"
1122
+ # and fall back to V0 for experimental or unsupported features.
1123
+ # * If VLLM_USE_V1=1, we enable V1 for supported + experimental
1124
+ # features and raise error for unsupported features.
1125
+ # * If VLLM_USE_V1=0, we disable V1.
1126
+ use_v1 = False
1127
+ try_v1 = envs.VLLM_USE_V1 or not envs.is_set("VLLM_USE_V1")
1128
+ if try_v1 and self._is_v1_supported_oracle(model_config):
1129
+ use_v1 = True
1130
+
1131
+ # If user explicitly set VLLM_USE_V1, sanity check we respect it.
1132
+ if envs.is_set("VLLM_USE_V1"):
1133
+ assert use_v1 == envs.VLLM_USE_V1
1134
+ # Otherwise, set the VLLM_USE_V1 variable globally.
1135
+ else:
1136
+ envs.set_vllm_use_v1(use_v1)
1137
+
1138
+ # Set default arguments for V0 or V1 Engine.
1139
+ if use_v1:
1140
+ self._set_default_args_v1(usage_context, model_config)
1141
+ # Disable chunked prefill for POWER (ppc64le)/ARM/s390x CPUs in V1
1142
+ if current_platform.is_cpu(
1143
+ ) and current_platform.get_cpu_architecture() in (
1144
+ CpuArchEnum.POWERPC, CpuArchEnum.S390X, CpuArchEnum.ARM):
1145
+ logger.info(
1146
+ "Chunked prefill is not supported for ARM and POWER "
1147
+ "and S390X CPUs; "
1148
+ "disabling it for V1 backend.")
1149
+ self.enable_chunked_prefill = False
1150
+ else:
1151
+ self._set_default_args_v0(model_config)
1152
+ assert self.enable_chunked_prefill is not None
1153
+
1154
+ if envs.VLLM_ATTENTION_BACKEND in [STR_DUAL_CHUNK_FLASH_ATTN_VAL]:
1155
+ assert self.enforce_eager, (
1156
+ "Cuda graph is not supported with DualChunkFlashAttention. "
1157
+ "To run the model in eager mode, set 'enforce_eager=True' "
1158
+ "or use '--enforce-eager' in the CLI.")
1159
+ assert current_platform.is_cuda(), (
1160
+ "DualChunkFlashAttention is only supported on CUDA platform.")
1161
+ assert not use_v1, (
1162
+ "DualChunkFlashAttention is not supported on V1 engine. "
1163
+ "To run the model in V0 engine, try set 'VLLM_USE_V1=0'")
1164
+
1165
+ sliding_window: Optional[int] = None
1166
+ if not is_interleaved(model_config.hf_text_config):
1167
+ # Only set CacheConfig.sliding_window if the model is all sliding
1168
+ # window. Otherwise CacheConfig.sliding_window will override the
1169
+ # global layers in interleaved sliding window models.
1170
+ sliding_window = model_config.get_sliding_window()
1171
+
1172
+ # Note(hc): In the current implementation of decode context
1173
+ # parallel(DCP), tp_size needs to be divisible by dcp_size,
1174
+ # because the world size does not change by dcp, it simply
1175
+ # reuses the GPUs of TP group, and split one TP group into
1176
+ # tp_size//dcp_size DCP groups.
1177
+ assert self.tensor_parallel_size % self.decode_context_parallel_size \
1178
+ == 0, (
1179
+ f"tp_size={self.tensor_parallel_size} must be divisible by"
1180
+ f"dcp_size={self.decode_context_parallel_size}."
1181
+ )
1182
+
1183
+ cache_config = CacheConfig(
1184
+ block_size=self.block_size,
1185
+ gpu_memory_utilization=self.gpu_memory_utilization,
1186
+ kv_cache_memory_bytes=self.kv_cache_memory_bytes,
1187
+ swap_space=self.swap_space,
1188
+ cache_dtype=self.kv_cache_dtype,
1189
+ is_attention_free=model_config.is_attention_free,
1190
+ num_gpu_blocks_override=self.num_gpu_blocks_override,
1191
+ sliding_window=sliding_window,
1192
+ enable_prefix_caching=self.enable_prefix_caching,
1193
+ prefix_caching_hash_algo=self.prefix_caching_hash_algo,
1194
+ cpu_offload_gb=self.cpu_offload_gb,
1195
+ calculate_kv_scales=self.calculate_kv_scales,
1196
+ kv_sharing_fast_prefill=self.kv_sharing_fast_prefill,
1197
+ mamba_cache_dtype=self.mamba_cache_dtype,
1198
+ mamba_ssm_cache_dtype=self.mamba_ssm_cache_dtype,
1199
+ )
1200
+
1201
+ ray_runtime_env = None
1202
+ if is_ray_initialized():
1203
+ # Ray Serve LLM calls `create_engine_config` in the context
1204
+ # of a Ray task, therefore we check is_ray_initialized()
1205
+ # as opposed to is_in_ray_actor().
1206
+ import ray
1207
+ ray_runtime_env = ray.get_runtime_context().runtime_env
1208
+ logger.info("Using ray runtime env: %s", ray_runtime_env)
1209
+
1210
+ # Get the current placement group if Ray is initialized and
1211
+ # we are in a Ray actor. If so, then the placement group will be
1212
+ # passed to spawned processes.
1213
+ placement_group = None
1214
+ if is_in_ray_actor():
1215
+ import ray
1216
+
1217
+ # This call initializes Ray automatically if it is not initialized,
1218
+ # but we should not do this here.
1219
+ placement_group = ray.util.get_current_placement_group()
1220
+
1221
+ assert not headless or not self.data_parallel_hybrid_lb, (
1222
+ "data_parallel_hybrid_lb is not applicable in "
1223
+ "headless mode")
1224
+
1225
+ data_parallel_external_lb = self.data_parallel_rank is not None
1226
+ # Local DP rank = 1, use pure-external LB.
1227
+ if data_parallel_external_lb:
1228
+ assert self.data_parallel_size_local in (1, None), (
1229
+ "data_parallel_size_local must be 1 when data_parallel_rank "
1230
+ "is set")
1231
+ data_parallel_size_local = 1
1232
+ # Use full external lb if we have local_size of 1.
1233
+ self.data_parallel_hybrid_lb = False
1234
+ elif self.data_parallel_size_local is not None:
1235
+ data_parallel_size_local = self.data_parallel_size_local
1236
+
1237
+ if self.data_parallel_start_rank and not headless:
1238
+ # Infer hybrid LB mode.
1239
+ self.data_parallel_hybrid_lb = True
1240
+
1241
+ if self.data_parallel_hybrid_lb and data_parallel_size_local == 1:
1242
+ # Use full external lb if we have local_size of 1.
1243
+ data_parallel_external_lb = True
1244
+ self.data_parallel_hybrid_lb = False
1245
+
1246
+ if data_parallel_size_local == self.data_parallel_size:
1247
+ # Disable hybrid LB mode if set for a single node
1248
+ self.data_parallel_hybrid_lb = False
1249
+
1250
+ self.data_parallel_rank = self.data_parallel_start_rank or 0
1251
+ else:
1252
+ assert not self.data_parallel_hybrid_lb, (
1253
+ "data_parallel_size_local must be set to use "
1254
+ "data_parallel_hybrid_lb.")
1255
+
1256
+ # Local DP size defaults to global DP size if not set.
1257
+ data_parallel_size_local = self.data_parallel_size
1258
+
1259
+ # DP address, used in multi-node case for torch distributed group
1260
+ # and ZMQ sockets.
1261
+ if self.data_parallel_address is None:
1262
+ if self.data_parallel_backend == "ray":
1263
+ host_ip = get_ip()
1264
+ logger.info(
1265
+ "Using host IP %s as ray-based data parallel address",
1266
+ host_ip)
1267
+ data_parallel_address = host_ip
1268
+ else:
1269
+ assert self.data_parallel_backend == "mp", (
1270
+ "data_parallel_backend can only be ray or mp, got %s",
1271
+ self.data_parallel_backend)
1272
+ data_parallel_address = ParallelConfig.data_parallel_master_ip
1273
+ else:
1274
+ data_parallel_address = self.data_parallel_address
1275
+
1276
+ # This port is only used when there are remote data parallel engines,
1277
+ # otherwise the local IPC transport is used.
1278
+ data_parallel_rpc_port = self.data_parallel_rpc_port if (
1279
+ self.data_parallel_rpc_port
1280
+ is not None) else ParallelConfig.data_parallel_rpc_port
1281
+
1282
+ if self.async_scheduling:
1283
+ # Async scheduling does not work with the uniprocess backend.
1284
+ if self.distributed_executor_backend is None:
1285
+ self.distributed_executor_backend = "mp"
1286
+ logger.info("Using mp-based distributed executor backend "
1287
+ "for async scheduling.")
1288
+ if self.distributed_executor_backend == "uni":
1289
+ raise ValueError("Async scheduling is not supported with "
1290
+ "uni-process backend.")
1291
+ if self.pipeline_parallel_size > 1:
1292
+ raise ValueError("Async scheduling is not supported with "
1293
+ "pipeline-parallel-size > 1.")
1294
+
1295
+ # Currently, async scheduling does not support speculative decoding.
1296
+ # TODO(woosuk): Support it.
1297
+ if self.speculative_config is not None:
1298
+ raise ValueError(
1299
+ "Currently, speculative decoding is not supported with "
1300
+ "async scheduling.")
1301
+
1302
+ # Forward the deprecated CLI args to the EPLB config.
1303
+ if self.num_redundant_experts is not None:
1304
+ self.eplb_config.num_redundant_experts = self.num_redundant_experts
1305
+ if self.eplb_window_size is not None:
1306
+ self.eplb_config.window_size = self.eplb_window_size
1307
+ if self.eplb_step_interval is not None:
1308
+ self.eplb_config.step_interval = self.eplb_step_interval
1309
+ if self.eplb_log_balancedness is not None:
1310
+ self.eplb_config.log_balancedness = self.eplb_log_balancedness
1311
+
1312
+ parallel_config = ParallelConfig(
1313
+ pipeline_parallel_size=self.pipeline_parallel_size,
1314
+ tensor_parallel_size=self.tensor_parallel_size,
1315
+ data_parallel_size=self.data_parallel_size,
1316
+ data_parallel_rank=self.data_parallel_rank or 0,
1317
+ data_parallel_external_lb=data_parallel_external_lb,
1318
+ data_parallel_size_local=data_parallel_size_local,
1319
+ data_parallel_master_ip=data_parallel_address,
1320
+ data_parallel_rpc_port=data_parallel_rpc_port,
1321
+ data_parallel_backend=self.data_parallel_backend,
1322
+ data_parallel_hybrid_lb=self.data_parallel_hybrid_lb,
1323
+ enable_expert_parallel=self.enable_expert_parallel,
1324
+ enable_eplb=self.enable_eplb,
1325
+ eplb_config=self.eplb_config,
1326
+ max_parallel_loading_workers=self.max_parallel_loading_workers,
1327
+ disable_custom_all_reduce=self.disable_custom_all_reduce,
1328
+ ray_workers_use_nsight=self.ray_workers_use_nsight,
1329
+ ray_runtime_env=ray_runtime_env,
1330
+ placement_group=placement_group,
1331
+ distributed_executor_backend=self.distributed_executor_backend,
1332
+ worker_cls=self.worker_cls,
1333
+ worker_extension_cls=self.worker_extension_cls,
1334
+ decode_context_parallel_size=self.decode_context_parallel_size,
1335
+ )
1336
+
1337
+ speculative_config = self.create_speculative_config(
1338
+ target_model_config=model_config,
1339
+ target_parallel_config=parallel_config,
1340
+ enable_chunked_prefill=self.enable_chunked_prefill,
1341
+ disable_log_stats=self.disable_log_stats,
1342
+ )
1343
+
1344
+ # make sure num_lookahead_slots is set appropriately depending on
1345
+ # whether speculative decoding is enabled
1346
+ num_lookahead_slots = self.num_lookahead_slots
1347
+ if speculative_config is not None:
1348
+ num_lookahead_slots = speculative_config.num_lookahead_slots
1349
+
1350
+ scheduler_config = SchedulerConfig(
1351
+ runner_type=model_config.runner_type,
1352
+ max_num_batched_tokens=self.max_num_batched_tokens,
1353
+ max_num_seqs=self.max_num_seqs,
1354
+ max_model_len=model_config.max_model_len,
1355
+ cuda_graph_sizes=self.cuda_graph_sizes,
1356
+ num_lookahead_slots=num_lookahead_slots,
1357
+ delay_factor=self.scheduler_delay_factor,
1358
+ enable_chunked_prefill=self.enable_chunked_prefill,
1359
+ disable_chunked_mm_input=self.disable_chunked_mm_input,
1360
+ is_multimodal_model=model_config.is_multimodal_model,
1361
+ preemption_mode=self.preemption_mode,
1362
+ send_delta_data=(envs.VLLM_USE_RAY_SPMD_WORKER
1363
+ and parallel_config.use_ray),
1364
+ policy=self.scheduling_policy,
1365
+ scheduler_cls=self.scheduler_cls,
1366
+ max_num_partial_prefills=self.max_num_partial_prefills,
1367
+ max_long_partial_prefills=self.max_long_partial_prefills,
1368
+ long_prefill_token_threshold=self.long_prefill_token_threshold,
1369
+ disable_hybrid_kv_cache_manager=self.
1370
+ disable_hybrid_kv_cache_manager,
1371
+ async_scheduling=self.async_scheduling,
1372
+ )
1373
+
1374
+ if not model_config.is_multimodal_model and self.default_mm_loras:
1375
+ raise ValueError(
1376
+ "Default modality-specific LoRA(s) were provided for a "
1377
+ "non multimodal model")
1378
+
1379
+ lora_config = LoRAConfig(
1380
+ bias_enabled=self.enable_lora_bias,
1381
+ max_lora_rank=self.max_lora_rank,
1382
+ max_loras=self.max_loras,
1383
+ default_mm_loras=self.default_mm_loras,
1384
+ fully_sharded_loras=self.fully_sharded_loras,
1385
+ lora_extra_vocab_size=self.lora_extra_vocab_size,
1386
+ lora_dtype=self.lora_dtype,
1387
+ max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
1388
+ and self.max_cpu_loras > 0 else None) if self.enable_lora else None
1389
+
1390
+ # bitsandbytes pre-quantized model need a specific model loader
1391
+ if model_config.quantization == "bitsandbytes":
1392
+ self.quantization = self.load_format = "bitsandbytes"
1393
+
1394
+ load_config = self.create_load_config()
1395
+
1396
+ decoding_config = DecodingConfig(
1397
+ backend=self.guided_decoding_backend,
1398
+ disable_fallback=self.guided_decoding_disable_fallback,
1399
+ disable_any_whitespace=self.guided_decoding_disable_any_whitespace,
1400
+ disable_additional_properties=\
1401
+ self.guided_decoding_disable_additional_properties,
1402
+ reasoning_backend=self.reasoning_parser
1403
+ )
1404
+
1405
+ observability_config = ObservabilityConfig(
1406
+ show_hidden_metrics_for_version=(
1407
+ self.show_hidden_metrics_for_version),
1408
+ otlp_traces_endpoint=self.otlp_traces_endpoint,
1409
+ collect_detailed_traces=self.collect_detailed_traces,
1410
+ )
1411
+
1412
+ config = VllmConfig(
1413
+ model_config=model_config,
1414
+ cache_config=cache_config,
1415
+ parallel_config=parallel_config,
1416
+ scheduler_config=scheduler_config,
1417
+ device_config=device_config,
1418
+ lora_config=lora_config,
1419
+ speculative_config=speculative_config,
1420
+ load_config=load_config,
1421
+ decoding_config=decoding_config,
1422
+ observability_config=observability_config,
1423
+ compilation_config=self.compilation_config,
1424
+ kv_transfer_config=self.kv_transfer_config,
1425
+ kv_events_config=self.kv_events_config,
1426
+ additional_config=self.additional_config,
1427
+ )
1428
+
1429
+ return config
1430
+
1431
+ def _is_v1_supported_oracle(self, model_config: ModelConfig) -> bool:
1432
+ """Oracle for whether to use V0 or V1 Engine by default."""
1433
+
1434
+ #############################################################
1435
+ # Unsupported Feature Flags on V1.
1436
+
1437
+ if self.load_format == "sharded_state":
1438
+ _raise_or_fallback(
1439
+ feature_name=f"--load_format {self.load_format}",
1440
+ recommend_to_remove=False)
1441
+ return False
1442
+
1443
+ if (self.logits_processor_pattern
1444
+ != EngineArgs.logits_processor_pattern):
1445
+ _raise_or_fallback(feature_name="--logits-processor-pattern",
1446
+ recommend_to_remove=False)
1447
+ return False
1448
+
1449
+ if self.preemption_mode != SchedulerConfig.preemption_mode:
1450
+ _raise_or_fallback(feature_name="--preemption-mode",
1451
+ recommend_to_remove=True)
1452
+ return False
1453
+
1454
+ if (self.disable_async_output_proc
1455
+ != EngineArgs.disable_async_output_proc):
1456
+ _raise_or_fallback(feature_name="--disable-async-output-proc",
1457
+ recommend_to_remove=True)
1458
+ return False
1459
+
1460
+ if self.scheduler_delay_factor != SchedulerConfig.delay_factor:
1461
+ _raise_or_fallback(feature_name="--scheduler-delay-factor",
1462
+ recommend_to_remove=True)
1463
+ return False
1464
+
1465
+ if self.kv_cache_dtype != "auto":
1466
+ supported = current_platform.is_kv_cache_dtype_supported(
1467
+ self.kv_cache_dtype, model_config)
1468
+ if not supported:
1469
+ _raise_or_fallback(feature_name="--kv-cache-dtype",
1470
+ recommend_to_remove=False)
1471
+ return False
1472
+
1473
+ # No text embedding inputs so far.
1474
+ if self.enable_prompt_embeds:
1475
+ _raise_or_fallback(feature_name="--enable-prompt-embeds",
1476
+ recommend_to_remove=False)
1477
+ return False
1478
+
1479
+ # No Mamba or Encoder-Decoder so far.
1480
+ if not model_config.is_v1_compatible:
1481
+ _raise_or_fallback(feature_name=model_config.architectures,
1482
+ recommend_to_remove=False)
1483
+ return False
1484
+
1485
+ # No Concurrent Partial Prefills so far.
1486
+ if (self.max_num_partial_prefills
1487
+ != SchedulerConfig.max_num_partial_prefills
1488
+ or self.max_long_partial_prefills
1489
+ != SchedulerConfig.max_long_partial_prefills):
1490
+ _raise_or_fallback(feature_name="Concurrent Partial Prefill",
1491
+ recommend_to_remove=False)
1492
+ return False
1493
+
1494
+ # V1 supports N-gram, Medusa, and Eagle speculative decoding.
1495
+ if (self.speculative_config is not None
1496
+ and self.speculative_config.get("method") == "draft_model"):
1497
+ raise NotImplementedError(
1498
+ "Speculative decoding with draft model is not supported yet. "
1499
+ "Please consider using other speculative decoding methods "
1500
+ "such as ngram, medusa, eagle, or deepseek_mtp.")
1501
+
1502
+ V1_BACKENDS = [
1503
+ "FLASH_ATTN_VLLM_V1",
1504
+ "FLASH_ATTN",
1505
+ "PALLAS",
1506
+ "PALLAS_VLLM_V1",
1507
+ "TRITON_ATTN_VLLM_V1",
1508
+ "TRITON_MLA",
1509
+ "CUTLASS_MLA",
1510
+ "FLASHMLA",
1511
+ "FLASHMLA_VLLM_V1",
1512
+ "FLASH_ATTN_MLA",
1513
+ "FLASHINFER",
1514
+ "FLASHINFER_VLLM_V1",
1515
+ "FLASHINFER_MLA",
1516
+ "ROCM_AITER_MLA",
1517
+ "TORCH_SDPA_VLLM_V1",
1518
+ "FLEX_ATTENTION",
1519
+ "TREE_ATTN",
1520
+ "XFORMERS_VLLM_V1",
1521
+ ]
1522
+ if (envs.is_set("VLLM_ATTENTION_BACKEND")
1523
+ and envs.VLLM_ATTENTION_BACKEND not in V1_BACKENDS):
1524
+ name = f"VLLM_ATTENTION_BACKEND={envs.VLLM_ATTENTION_BACKEND}"
1525
+ _raise_or_fallback(feature_name=name, recommend_to_remove=True)
1526
+ return False
1527
+
1528
+ # Platforms must decide if they can support v1 for this model
1529
+ if not current_platform.supports_v1(model_config=model_config):
1530
+ _raise_or_fallback(
1531
+ feature_name=f"device type={current_platform.device_type}",
1532
+ recommend_to_remove=False)
1533
+ return False
1534
+ #############################################################
1535
+ # Experimental Features - allow users to opt in.
1536
+
1537
+ if self.pipeline_parallel_size > 1:
1538
+ supports_pp = getattr(self.distributed_executor_backend,
1539
+ 'supports_pp', False)
1540
+ if not supports_pp and self.distributed_executor_backend not in (
1541
+ ParallelConfig.distributed_executor_backend, "ray", "mp",
1542
+ "external_launcher"):
1543
+ name = "Pipeline Parallelism without Ray distributed " \
1544
+ "executor or multiprocessing executor or external " \
1545
+ "launcher"
1546
+ _raise_or_fallback(feature_name=name,
1547
+ recommend_to_remove=False)
1548
+ return False
1549
+
1550
+ # The platform may be supported on V1, but off by default for now.
1551
+ if not current_platform.default_v1( # noqa: SIM103
1552
+ model_config=model_config) and _warn_or_fallback(
1553
+ current_platform.device_name):
1554
+ return False
1555
+
1556
+ if (current_platform.is_cpu()
1557
+ and model_config.get_sliding_window() is not None):
1558
+ _raise_or_fallback(feature_name="sliding window (CPU backend)",
1559
+ recommend_to_remove=False)
1560
+ return False
1561
+
1562
+ #############################################################
1563
+
1564
+ return True
1565
+
1566
+ def _set_default_args_v0(self, model_config: ModelConfig) -> None:
1567
+ """Set Default Arguments for V0 Engine."""
1568
+
1569
+ max_model_len = model_config.max_model_len
1570
+ use_long_context = max_model_len > 32768
1571
+ if self.enable_chunked_prefill is None:
1572
+ # Chunked prefill not supported for Multimodal or MLA in V0.
1573
+ if model_config.is_multimodal_model or model_config.use_mla:
1574
+ self.enable_chunked_prefill = False
1575
+
1576
+ # Enable chunked prefill by default for long context (> 32K)
1577
+ # models to avoid OOM errors in initial memory profiling phase.
1578
+ elif use_long_context:
1579
+ is_gpu = current_platform.is_cuda()
1580
+ use_sliding_window = (model_config.get_sliding_window()
1581
+ is not None)
1582
+ use_spec_decode = self.speculative_config is not None
1583
+
1584
+ if (is_gpu and not use_sliding_window and not use_spec_decode
1585
+ and not self.enable_lora):
1586
+ self.enable_chunked_prefill = True
1587
+ logger.warning(
1588
+ "Chunked prefill is enabled by default for models "
1589
+ "with max_model_len > 32K. Chunked prefill might "
1590
+ "not work with some features or models. If you "
1591
+ "encounter any issues, please disable by launching "
1592
+ "with --enable-chunked-prefill=False.")
1593
+
1594
+ if self.enable_chunked_prefill is None:
1595
+ self.enable_chunked_prefill = False
1596
+
1597
+ if not self.enable_chunked_prefill and use_long_context:
1598
+ logger.warning(
1599
+ "The model has a long context length (%s). This may cause"
1600
+ "OOM during the initial memory profiling phase, or result "
1601
+ "in low performance due to small KV cache size. Consider "
1602
+ "setting --max-model-len to a smaller value.", max_model_len)
1603
+
1604
+ # Disable prefix caching for multimodal models for VLLM_V0.
1605
+ if self.enable_prefix_caching and model_config.is_multimodal_model:
1606
+ logger.warning(
1607
+ "--enable-prefix-caching is not supported for multimodal "
1608
+ "models in V0 and has been disabled.")
1609
+ self.enable_prefix_caching = False
1610
+
1611
+ # Set max_num_seqs to 256 for VLLM_V0.
1612
+ if self.max_num_seqs is None:
1613
+ self.max_num_seqs = 256
1614
+
1615
+ def _set_default_args_v1(self, usage_context: UsageContext,
1616
+ model_config: ModelConfig) -> None:
1617
+ """Set Default Arguments for V1 Engine."""
1618
+
1619
+ # V1 always uses chunked prefills and prefix caching
1620
+ # for non-pooling tasks.
1621
+ # For pooling tasks the default is False
1622
+ if model_config.runner_type != "pooling":
1623
+ self.enable_chunked_prefill = True
1624
+ if self.enable_prefix_caching is None:
1625
+ self.enable_prefix_caching = True
1626
+ else:
1627
+
1628
+ pooling_type = model_config.pooler_config.pooling_type
1629
+ is_causal = getattr(model_config.hf_config, "is_causal", True)
1630
+ incremental_prefill_supported = (pooling_type is not None
1631
+ and pooling_type.lower() == "last"
1632
+ and is_causal)
1633
+
1634
+ action = "Enabling" if \
1635
+ incremental_prefill_supported else "Disabling"
1636
+
1637
+ if self.enable_chunked_prefill is None:
1638
+ self.enable_chunked_prefill = incremental_prefill_supported
1639
+ logger.info("(%s) chunked prefill by default", action)
1640
+ if self.enable_prefix_caching is None:
1641
+ self.enable_prefix_caching = incremental_prefill_supported
1642
+ logger.info("(%s) prefix caching by default", action)
1643
+
1644
+ # V1 should use the new scheduler by default.
1645
+ # Swap it only if this arg is set to the original V0 default
1646
+ if self.scheduler_cls == EngineArgs.scheduler_cls:
1647
+ self.scheduler_cls = "vllm.v1.core.sched.scheduler.Scheduler"
1648
+
1649
+ # When no user override, set the default values based on the usage
1650
+ # context.
1651
+ # Use different default values for different hardware.
1652
+
1653
+ # Try to query the device name on the current platform. If it fails,
1654
+ # it may be because the platform that imports vLLM is not the same
1655
+ # as the platform that vLLM is running on (e.g. the case of scaling
1656
+ # vLLM with Ray) and has no GPUs. In this case we use the default
1657
+ # values for non-H100/H200 GPUs.
1658
+ try:
1659
+ device_memory = current_platform.get_device_total_memory()
1660
+ device_name = current_platform.get_device_name().lower()
1661
+ except Exception:
1662
+ # This is only used to set default_max_num_batched_tokens
1663
+ device_memory = 0
1664
+
1665
+ # NOTE(Kuntai): Setting large `max_num_batched_tokens` for A100 reduces
1666
+ # throughput, see PR #17885 for more details.
1667
+ # So here we do an extra device name check to prevent such regression.
1668
+ from vllm.usage.usage_lib import UsageContext
1669
+ if device_memory >= 70 * GiB_bytes and "a100" not in device_name:
1670
+ # For GPUs like H100 and MI300x, use larger default values.
1671
+ default_max_num_batched_tokens = {
1672
+ UsageContext.LLM_CLASS: 16384,
1673
+ UsageContext.OPENAI_API_SERVER: 8192,
1674
+ }
1675
+ default_max_num_seqs = {
1676
+ UsageContext.LLM_CLASS: 1024,
1677
+ UsageContext.OPENAI_API_SERVER: 1024,
1678
+ }
1679
+ else:
1680
+ # TODO(woosuk): Tune the default values for other hardware.
1681
+ default_max_num_batched_tokens = {
1682
+ UsageContext.LLM_CLASS: 8192,
1683
+ UsageContext.OPENAI_API_SERVER: 2048,
1684
+ }
1685
+ default_max_num_seqs = {
1686
+ UsageContext.LLM_CLASS: 256,
1687
+ UsageContext.OPENAI_API_SERVER: 256,
1688
+ }
1689
+
1690
+ # tpu specific default values.
1691
+ if current_platform.is_tpu():
1692
+ default_max_num_batched_tokens_tpu = {
1693
+ UsageContext.LLM_CLASS: {
1694
+ 'V6E': 2048,
1695
+ 'V5E': 1024,
1696
+ 'V5P': 512,
1697
+ },
1698
+ UsageContext.OPENAI_API_SERVER: {
1699
+ 'V6E': 1024,
1700
+ 'V5E': 512,
1701
+ 'V5P': 256,
1702
+ }
1703
+ }
1704
+
1705
+ # cpu specific default values.
1706
+ if current_platform.is_cpu():
1707
+ world_size = self.pipeline_parallel_size * self.tensor_parallel_size
1708
+ default_max_num_batched_tokens = {
1709
+ UsageContext.LLM_CLASS: 4096 * world_size,
1710
+ UsageContext.OPENAI_API_SERVER: 2048 * world_size,
1711
+ }
1712
+ default_max_num_seqs = {
1713
+ UsageContext.LLM_CLASS: 256 * world_size,
1714
+ UsageContext.OPENAI_API_SERVER: 128 * world_size,
1715
+ }
1716
+
1717
+ use_context_value = usage_context.value if usage_context else None
1718
+ if (self.max_num_batched_tokens is None
1719
+ and usage_context in default_max_num_batched_tokens):
1720
+ if current_platform.is_tpu():
1721
+ chip_name = current_platform.get_device_name()
1722
+ if chip_name in default_max_num_batched_tokens_tpu[
1723
+ usage_context]:
1724
+ self.max_num_batched_tokens = \
1725
+ default_max_num_batched_tokens_tpu[
1726
+ usage_context][chip_name]
1727
+ else:
1728
+ self.max_num_batched_tokens = \
1729
+ default_max_num_batched_tokens[usage_context]
1730
+ else:
1731
+ if not self.enable_chunked_prefill:
1732
+ self.max_num_batched_tokens = model_config.max_model_len
1733
+ else:
1734
+ self.max_num_batched_tokens = \
1735
+ default_max_num_batched_tokens[usage_context]
1736
+ logger.debug(
1737
+ "Setting max_num_batched_tokens to %d for %s usage context.",
1738
+ self.max_num_batched_tokens, use_context_value)
1739
+
1740
+ if (self.max_num_seqs is None
1741
+ and usage_context in default_max_num_seqs):
1742
+ self.max_num_seqs = min(default_max_num_seqs[usage_context],
1743
+ self.max_num_batched_tokens or sys.maxsize)
1744
+
1745
+ logger.debug("Setting max_num_seqs to %d for %s usage context.",
1746
+ self.max_num_seqs, use_context_value)
1747
+
1748
+
1749
+ @dataclass
1750
+ class AsyncEngineArgs(EngineArgs):
1751
+ """Arguments for asynchronous vLLM engine."""
1752
+ enable_log_requests: bool = False
1753
+
1754
+ @property
1755
+ @deprecated(
1756
+ "`disable_log_requests` is deprecated and has been replaced with "
1757
+ "`enable_log_requests`. This will be removed in v0.12.0. Please use "
1758
+ "`enable_log_requests` instead.")
1759
+ def disable_log_requests(self) -> bool:
1760
+ return not self.enable_log_requests
1761
+
1762
+ @disable_log_requests.setter
1763
+ @deprecated(
1764
+ "`disable_log_requests` is deprecated and has been replaced with "
1765
+ "`enable_log_requests`. This will be removed in v0.12.0. Please use "
1766
+ "`enable_log_requests` instead.")
1767
+ def disable_log_requests(self, value: bool):
1768
+ self.enable_log_requests = not value
1769
+
1770
+ @staticmethod
1771
+ def add_cli_args(parser: FlexibleArgumentParser,
1772
+ async_args_only: bool = False) -> FlexibleArgumentParser:
1773
+ # Initialize plugin to update the parser, for example, The plugin may
1774
+ # add a new kind of quantization method to --quantization argument or
1775
+ # a new device to --device argument.
1776
+ load_general_plugins()
1777
+ if not async_args_only:
1778
+ parser = EngineArgs.add_cli_args(parser)
1779
+ parser.add_argument('--enable-log-requests',
1780
+ action=argparse.BooleanOptionalAction,
1781
+ default=AsyncEngineArgs.enable_log_requests,
1782
+ help='Enable logging requests.')
1783
+ parser.add_argument('--disable-log-requests',
1784
+ action=argparse.BooleanOptionalAction,
1785
+ default=not AsyncEngineArgs.enable_log_requests,
1786
+ help='[DEPRECATED] Disable logging requests.',
1787
+ deprecated=True)
1788
+ current_platform.pre_register_and_update(parser)
1789
+ return parser
1790
+
1791
+
1792
+ def _raise_or_fallback(feature_name: str, recommend_to_remove: bool):
1793
+ if envs.is_set("VLLM_USE_V1") and envs.VLLM_USE_V1:
1794
+ raise NotImplementedError(
1795
+ f"VLLM_USE_V1=1 is not supported with {feature_name}.")
1796
+ msg = f"{feature_name} is not supported by the V1 Engine. "
1797
+ msg += "Falling back to V0. "
1798
+ if recommend_to_remove:
1799
+ msg += f"We recommend to remove {feature_name} from your config "
1800
+ msg += "in favor of the V1 Engine."
1801
+ logger.warning(msg)
1802
+
1803
+
1804
+ def _warn_or_fallback(feature_name: str) -> bool:
1805
+ if envs.is_set("VLLM_USE_V1") and envs.VLLM_USE_V1:
1806
+ logger.warning(
1807
+ "Detected VLLM_USE_V1=1 with %s. Usage should "
1808
+ "be considered experimental. Please report any "
1809
+ "issues on Github.", feature_name)
1810
+ should_exit = False
1811
+ else:
1812
+ logger.info(
1813
+ "%s is experimental on VLLM_USE_V1=1. "
1814
+ "Falling back to V0 Engine.", feature_name)
1815
+ should_exit = True
1816
+ return should_exit
1817
+
1818
+
1819
+ def human_readable_int(value):
1820
+ """Parse human-readable integers like '1k', '2M', etc.
1821
+ Including decimal values with decimal multipliers.
1822
+
1823
+ Examples:
1824
+ - '1k' -> 1,000
1825
+ - '1K' -> 1,024
1826
+ - '25.6k' -> 25,600
1827
+ """
1828
+ value = value.strip()
1829
+ match = re.fullmatch(r'(\d+(?:\.\d+)?)([kKmMgGtT])', value)
1830
+ if match:
1831
+ decimal_multiplier = {
1832
+ 'k': 10**3,
1833
+ 'm': 10**6,
1834
+ 'g': 10**9,
1835
+ }
1836
+ binary_multiplier = {
1837
+ 'K': 2**10,
1838
+ 'M': 2**20,
1839
+ 'G': 2**30,
1840
+ }
1841
+
1842
+ number, suffix = match.groups()
1843
+ if suffix in decimal_multiplier:
1844
+ mult = decimal_multiplier[suffix]
1845
+ return int(float(number) * mult)
1846
+ elif suffix in binary_multiplier:
1847
+ mult = binary_multiplier[suffix]
1848
+ # Do not allow decimals with binary multipliers
1849
+ try:
1850
+ return int(number) * mult
1851
+ except ValueError as e:
1852
+ raise argparse.ArgumentTypeError("Decimals are not allowed " \
1853
+ f"with binary suffixes like {suffix}. Did you mean to use " \
1854
+ f"{number}{suffix.lower()} instead?") from e
1855
+
1856
+ # Regular plain number.
1857
+ return int(value)