vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1398) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2044 -0
  5. vllm/_ipex_ops.py +393 -0
  6. vllm/_version.py +34 -0
  7. vllm/assets/__init__.py +0 -0
  8. vllm/assets/audio.py +45 -0
  9. vllm/assets/base.py +41 -0
  10. vllm/assets/image.py +50 -0
  11. vllm/assets/video.py +145 -0
  12. vllm/attention/__init__.py +15 -0
  13. vllm/attention/backends/__init__.py +0 -0
  14. vllm/attention/backends/abstract.py +204 -0
  15. vllm/attention/backends/utils.py +33 -0
  16. vllm/attention/layer.py +645 -0
  17. vllm/attention/layers/__init__.py +0 -0
  18. vllm/attention/layers/chunked_local_attention.py +93 -0
  19. vllm/attention/layers/cross_attention.py +162 -0
  20. vllm/attention/layers/encoder_only_attention.py +86 -0
  21. vllm/attention/ops/__init__.py +0 -0
  22. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  23. vllm/attention/ops/common.py +345 -0
  24. vllm/attention/ops/flashmla.py +192 -0
  25. vllm/attention/ops/merge_attn_states.py +43 -0
  26. vllm/attention/ops/paged_attn.py +262 -0
  27. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  28. vllm/attention/ops/prefix_prefill.py +928 -0
  29. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  30. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  31. vllm/attention/ops/triton_decode_attention.py +691 -0
  32. vllm/attention/ops/triton_flash_attention.py +984 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
  35. vllm/attention/ops/triton_unified_attention.py +894 -0
  36. vllm/attention/selector.py +245 -0
  37. vllm/attention/utils/__init__.py +0 -0
  38. vllm/attention/utils/fa_utils.py +85 -0
  39. vllm/attention/utils/kv_sharing_utils.py +33 -0
  40. vllm/beam_search.py +87 -0
  41. vllm/benchmarks/__init__.py +0 -0
  42. vllm/benchmarks/datasets.py +2723 -0
  43. vllm/benchmarks/latency.py +170 -0
  44. vllm/benchmarks/lib/__init__.py +3 -0
  45. vllm/benchmarks/lib/endpoint_request_func.py +533 -0
  46. vllm/benchmarks/lib/ready_checker.py +73 -0
  47. vllm/benchmarks/lib/utils.py +80 -0
  48. vllm/benchmarks/serve.py +1358 -0
  49. vllm/benchmarks/throughput.py +696 -0
  50. vllm/collect_env.py +823 -0
  51. vllm/compilation/__init__.py +0 -0
  52. vllm/compilation/activation_quant_fusion.py +189 -0
  53. vllm/compilation/backends.py +650 -0
  54. vllm/compilation/base_static_graph.py +56 -0
  55. vllm/compilation/collective_fusion.py +1188 -0
  56. vllm/compilation/compiler_interface.py +573 -0
  57. vllm/compilation/counter.py +47 -0
  58. vllm/compilation/cuda_graph.py +199 -0
  59. vllm/compilation/cuda_piecewise_backend.py +117 -0
  60. vllm/compilation/decorators.py +400 -0
  61. vllm/compilation/fix_functionalization.py +205 -0
  62. vllm/compilation/fusion.py +383 -0
  63. vllm/compilation/fusion_attn.py +295 -0
  64. vllm/compilation/fx_utils.py +84 -0
  65. vllm/compilation/inductor_pass.py +136 -0
  66. vllm/compilation/monitor.py +57 -0
  67. vllm/compilation/noop_elimination.py +158 -0
  68. vllm/compilation/pass_manager.py +125 -0
  69. vllm/compilation/post_cleanup.py +20 -0
  70. vllm/compilation/sequence_parallelism.py +478 -0
  71. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  72. vllm/compilation/vllm_inductor_pass.py +156 -0
  73. vllm/compilation/wrapper.py +136 -0
  74. vllm/config/__init__.py +814 -0
  75. vllm/config/cache.py +220 -0
  76. vllm/config/compilation.py +673 -0
  77. vllm/config/device.py +74 -0
  78. vllm/config/kv_events.py +50 -0
  79. vllm/config/kv_transfer.py +111 -0
  80. vllm/config/load.py +113 -0
  81. vllm/config/lora.py +132 -0
  82. vllm/config/model.py +1912 -0
  83. vllm/config/multimodal.py +129 -0
  84. vllm/config/observability.py +99 -0
  85. vllm/config/parallel.py +524 -0
  86. vllm/config/pooler.py +97 -0
  87. vllm/config/scheduler.py +287 -0
  88. vllm/config/speculative.py +568 -0
  89. vllm/config/speech_to_text.py +39 -0
  90. vllm/config/structured_outputs.py +64 -0
  91. vllm/config/utils.py +145 -0
  92. vllm/connections.py +186 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +311 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +41 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +440 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +295 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +323 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
  106. vllm/distributed/device_communicators/pynccl.py +340 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +589 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +635 -0
  113. vllm/distributed/device_communicators/symm_mem.py +136 -0
  114. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  115. vllm/distributed/device_communicators/xpu_communicator.py +94 -0
  116. vllm/distributed/eplb/__init__.py +8 -0
  117. vllm/distributed/eplb/eplb_state.py +620 -0
  118. vllm/distributed/eplb/rebalance_algo.py +239 -0
  119. vllm/distributed/eplb/rebalance_execute.py +424 -0
  120. vllm/distributed/kv_events.py +362 -0
  121. vllm/distributed/kv_transfer/README.md +29 -0
  122. vllm/distributed/kv_transfer/__init__.py +13 -0
  123. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  124. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  125. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  126. vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
  132. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
  133. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
  134. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
  135. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
  140. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  141. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  142. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  144. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  145. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  146. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  147. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  148. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  149. vllm/distributed/parallel_state.py +1532 -0
  150. vllm/distributed/tpu_distributed_utils.py +178 -0
  151. vllm/distributed/utils.py +536 -0
  152. vllm/engine/__init__.py +0 -0
  153. vllm/engine/arg_utils.py +1778 -0
  154. vllm/engine/async_llm_engine.py +6 -0
  155. vllm/engine/llm_engine.py +6 -0
  156. vllm/engine/metrics.py +577 -0
  157. vllm/engine/metrics_types.py +84 -0
  158. vllm/engine/protocol.py +333 -0
  159. vllm/entrypoints/__init__.py +0 -0
  160. vllm/entrypoints/api_server.py +178 -0
  161. vllm/entrypoints/chat_utils.py +1705 -0
  162. vllm/entrypoints/cli/__init__.py +12 -0
  163. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  164. vllm/entrypoints/cli/benchmark/base.py +25 -0
  165. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  166. vllm/entrypoints/cli/benchmark/main.py +55 -0
  167. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  168. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  169. vllm/entrypoints/cli/collect_env.py +36 -0
  170. vllm/entrypoints/cli/main.py +60 -0
  171. vllm/entrypoints/cli/openai.py +233 -0
  172. vllm/entrypoints/cli/run_batch.py +67 -0
  173. vllm/entrypoints/cli/serve.py +232 -0
  174. vllm/entrypoints/cli/types.py +29 -0
  175. vllm/entrypoints/constants.py +10 -0
  176. vllm/entrypoints/context.py +481 -0
  177. vllm/entrypoints/harmony_utils.py +436 -0
  178. vllm/entrypoints/launcher.py +164 -0
  179. vllm/entrypoints/llm.py +1629 -0
  180. vllm/entrypoints/logger.py +79 -0
  181. vllm/entrypoints/openai/__init__.py +0 -0
  182. vllm/entrypoints/openai/api_server.py +1953 -0
  183. vllm/entrypoints/openai/cli_args.py +288 -0
  184. vllm/entrypoints/openai/logits_processors.py +90 -0
  185. vllm/entrypoints/openai/protocol.py +2757 -0
  186. vllm/entrypoints/openai/run_batch.py +491 -0
  187. vllm/entrypoints/openai/serving_chat.py +1597 -0
  188. vllm/entrypoints/openai/serving_classification.py +173 -0
  189. vllm/entrypoints/openai/serving_completion.py +692 -0
  190. vllm/entrypoints/openai/serving_embedding.py +631 -0
  191. vllm/entrypoints/openai/serving_engine.py +992 -0
  192. vllm/entrypoints/openai/serving_models.py +288 -0
  193. vllm/entrypoints/openai/serving_pooling.py +276 -0
  194. vllm/entrypoints/openai/serving_responses.py +1709 -0
  195. vllm/entrypoints/openai/serving_score.py +479 -0
  196. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  197. vllm/entrypoints/openai/serving_transcription.py +136 -0
  198. vllm/entrypoints/openai/speech_to_text.py +388 -0
  199. vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
  200. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  201. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  202. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  203. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  204. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  205. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  206. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
  207. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  208. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  209. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  210. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  211. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  212. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  213. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
  214. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  216. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
  217. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  218. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  219. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  220. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
  221. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  222. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  223. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  224. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  225. vllm/entrypoints/renderer.py +395 -0
  226. vllm/entrypoints/score_utils.py +232 -0
  227. vllm/entrypoints/ssl.py +75 -0
  228. vllm/entrypoints/tool.py +139 -0
  229. vllm/entrypoints/tool_server.py +206 -0
  230. vllm/entrypoints/utils.py +233 -0
  231. vllm/env_override.py +23 -0
  232. vllm/envs.py +1590 -0
  233. vllm/executor/__init__.py +0 -0
  234. vllm/executor/executor_base.py +381 -0
  235. vllm/executor/msgspec_utils.py +35 -0
  236. vllm/executor/ray_distributed_executor.py +699 -0
  237. vllm/executor/ray_utils.py +410 -0
  238. vllm/executor/uniproc_executor.py +176 -0
  239. vllm/forward_context.py +402 -0
  240. vllm/inputs/__init__.py +30 -0
  241. vllm/inputs/data.py +356 -0
  242. vllm/inputs/parse.py +151 -0
  243. vllm/inputs/preprocess.py +664 -0
  244. vllm/logger.py +229 -0
  245. vllm/logging_utils/__init__.py +10 -0
  246. vllm/logging_utils/dump_input.py +81 -0
  247. vllm/logging_utils/formatter.py +79 -0
  248. vllm/logging_utils/log_time.py +32 -0
  249. vllm/logits_process.py +119 -0
  250. vllm/logprobs.py +28 -0
  251. vllm/lora/__init__.py +0 -0
  252. vllm/lora/layers/__init__.py +34 -0
  253. vllm/lora/layers/base.py +69 -0
  254. vllm/lora/layers/base_linear.py +185 -0
  255. vllm/lora/layers/column_parallel_linear.py +609 -0
  256. vllm/lora/layers/logits_processor.py +247 -0
  257. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  258. vllm/lora/layers/replicated_linear.py +60 -0
  259. vllm/lora/layers/row_parallel_linear.py +196 -0
  260. vllm/lora/layers/utils.py +65 -0
  261. vllm/lora/layers/vocal_parallel_embedding.py +174 -0
  262. vllm/lora/lora_weights.py +199 -0
  263. vllm/lora/models.py +816 -0
  264. vllm/lora/ops/__init__.py +0 -0
  265. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  266. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  267. vllm/lora/ops/torch_ops/__init__.py +16 -0
  268. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  269. vllm/lora/ops/triton_ops/__init__.py +12 -0
  270. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  271. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  272. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  273. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  274. vllm/lora/ops/triton_ops/utils.py +126 -0
  275. vllm/lora/ops/xla_ops/__init__.py +7 -0
  276. vllm/lora/ops/xla_ops/lora_ops.py +144 -0
  277. vllm/lora/peft_helper.py +127 -0
  278. vllm/lora/punica_wrapper/__init__.py +10 -0
  279. vllm/lora/punica_wrapper/punica_base.py +458 -0
  280. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  281. vllm/lora/punica_wrapper/punica_gpu.py +272 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  284. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  285. vllm/lora/punica_wrapper/utils.py +136 -0
  286. vllm/lora/request.py +97 -0
  287. vllm/lora/resolver.py +85 -0
  288. vllm/lora/utils.py +246 -0
  289. vllm/lora/worker_manager.py +267 -0
  290. vllm/model_executor/__init__.py +12 -0
  291. vllm/model_executor/custom_op.py +194 -0
  292. vllm/model_executor/layers/__init__.py +0 -0
  293. vllm/model_executor/layers/activation.py +575 -0
  294. vllm/model_executor/layers/attention_layer_base.py +23 -0
  295. vllm/model_executor/layers/fla/__init__.py +8 -0
  296. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  297. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  298. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  299. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  300. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  301. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  302. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  303. vllm/model_executor/layers/fla/ops/index.py +39 -0
  304. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  305. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  306. vllm/model_executor/layers/fla/ops/op.py +39 -0
  307. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  308. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  309. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  310. vllm/model_executor/layers/fused_moe/__init__.py +89 -0
  311. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
  312. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
  313. vllm/model_executor/layers/fused_moe/config.py +804 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  545. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
  546. vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
  547. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
  548. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  549. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
  550. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
  551. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
  552. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
  553. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
  554. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
  555. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
  556. vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
  557. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
  558. vllm/model_executor/layers/fused_moe/layer.py +2195 -0
  559. vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
  560. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  561. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  562. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  563. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  564. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
  565. vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
  566. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
  567. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  568. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  569. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
  570. vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
  571. vllm/model_executor/layers/fused_moe/utils.py +274 -0
  572. vllm/model_executor/layers/layernorm.py +395 -0
  573. vllm/model_executor/layers/lightning_attn.py +661 -0
  574. vllm/model_executor/layers/linear.py +1603 -0
  575. vllm/model_executor/layers/logits_processor.py +106 -0
  576. vllm/model_executor/layers/mamba/__init__.py +0 -0
  577. vllm/model_executor/layers/mamba/abstract.py +42 -0
  578. vllm/model_executor/layers/mamba/linear_attn.py +403 -0
  579. vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
  580. vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
  581. vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
  582. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  583. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
  584. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  585. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  586. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
  587. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
  588. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
  589. vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
  590. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
  591. vllm/model_executor/layers/mamba/short_conv.py +253 -0
  592. vllm/model_executor/layers/mla.py +173 -0
  593. vllm/model_executor/layers/pooler.py +719 -0
  594. vllm/model_executor/layers/quantization/__init__.py +157 -0
  595. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  596. vllm/model_executor/layers/quantization/awq.py +228 -0
  597. vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
  598. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  599. vllm/model_executor/layers/quantization/base_config.py +170 -0
  600. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  601. vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
  602. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  603. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
  604. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
  605. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  606. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  607. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  608. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  609. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  610. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
  611. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  612. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  613. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  614. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
  615. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  616. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  625. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  626. vllm/model_executor/layers/quantization/experts_int8.py +223 -0
  627. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  628. vllm/model_executor/layers/quantization/fp8.py +1098 -0
  629. vllm/model_executor/layers/quantization/gguf.py +599 -0
  630. vllm/model_executor/layers/quantization/gptq.py +340 -0
  631. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  632. vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
  633. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  634. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  635. vllm/model_executor/layers/quantization/inc.py +61 -0
  636. vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
  637. vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
  638. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  639. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  640. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  641. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  642. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  643. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  644. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  645. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  646. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  647. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  648. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  649. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  650. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  651. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
  652. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  653. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  654. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  655. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  656. vllm/model_executor/layers/quantization/kv_cache.py +143 -0
  657. vllm/model_executor/layers/quantization/modelopt.py +1596 -0
  658. vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
  659. vllm/model_executor/layers/quantization/mxfp4.py +988 -0
  660. vllm/model_executor/layers/quantization/petit.py +306 -0
  661. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  662. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  663. vllm/model_executor/layers/quantization/quark/quark.py +432 -0
  664. vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
  665. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  666. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  667. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
  668. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  669. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  670. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  671. vllm/model_executor/layers/quantization/rtn.py +466 -0
  672. vllm/model_executor/layers/quantization/schema.py +86 -0
  673. vllm/model_executor/layers/quantization/torchao.py +214 -0
  674. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  675. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  676. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  677. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  889. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
  890. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
  891. vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
  892. vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
  893. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  894. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  895. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  896. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  897. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  898. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  899. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  900. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  901. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
  902. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  903. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  904. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  905. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  906. vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
  907. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  908. vllm/model_executor/layers/resampler.py +270 -0
  909. vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
  910. vllm/model_executor/layers/rotary_embedding/base.py +177 -0
  911. vllm/model_executor/layers/rotary_embedding/common.py +150 -0
  912. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
  913. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  914. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  915. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  916. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  917. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  918. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  919. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  920. vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
  921. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  922. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  923. vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
  924. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  925. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  926. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  927. vllm/model_executor/layers/utils.py +195 -0
  928. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  929. vllm/model_executor/model_loader/__init__.py +138 -0
  930. vllm/model_executor/model_loader/base_loader.py +52 -0
  931. vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
  932. vllm/model_executor/model_loader/default_loader.py +277 -0
  933. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  934. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  935. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  936. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  937. vllm/model_executor/model_loader/tensorizer.py +738 -0
  938. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  939. vllm/model_executor/model_loader/tpu.py +114 -0
  940. vllm/model_executor/model_loader/utils.py +292 -0
  941. vllm/model_executor/model_loader/weight_utils.py +990 -0
  942. vllm/model_executor/models/__init__.py +33 -0
  943. vllm/model_executor/models/adapters.py +542 -0
  944. vllm/model_executor/models/aimv2.py +246 -0
  945. vllm/model_executor/models/apertus.py +579 -0
  946. vllm/model_executor/models/arcee.py +422 -0
  947. vllm/model_executor/models/arctic.py +558 -0
  948. vllm/model_executor/models/aria.py +650 -0
  949. vllm/model_executor/models/aya_vision.py +468 -0
  950. vllm/model_executor/models/baichuan.py +474 -0
  951. vllm/model_executor/models/bailing_moe.py +642 -0
  952. vllm/model_executor/models/bamba.py +514 -0
  953. vllm/model_executor/models/bert.py +665 -0
  954. vllm/model_executor/models/bert_with_rope.py +687 -0
  955. vllm/model_executor/models/blip.py +339 -0
  956. vllm/model_executor/models/blip2.py +712 -0
  957. vllm/model_executor/models/bloom.py +374 -0
  958. vllm/model_executor/models/chameleon.py +1139 -0
  959. vllm/model_executor/models/chatglm.py +476 -0
  960. vllm/model_executor/models/clip.py +407 -0
  961. vllm/model_executor/models/cohere2_vision.py +481 -0
  962. vllm/model_executor/models/commandr.py +465 -0
  963. vllm/model_executor/models/config.py +445 -0
  964. vllm/model_executor/models/dbrx.py +471 -0
  965. vllm/model_executor/models/deepseek.py +497 -0
  966. vllm/model_executor/models/deepseek_eagle.py +240 -0
  967. vllm/model_executor/models/deepseek_mtp.py +289 -0
  968. vllm/model_executor/models/deepseek_v2.py +1444 -0
  969. vllm/model_executor/models/deepseek_vl2.py +658 -0
  970. vllm/model_executor/models/dots1.py +546 -0
  971. vllm/model_executor/models/dots_ocr.py +873 -0
  972. vllm/model_executor/models/ernie45.py +43 -0
  973. vllm/model_executor/models/ernie45_moe.py +607 -0
  974. vllm/model_executor/models/ernie45_vl.py +1527 -0
  975. vllm/model_executor/models/ernie45_vl_moe.py +727 -0
  976. vllm/model_executor/models/ernie_mtp.py +268 -0
  977. vllm/model_executor/models/exaone.py +550 -0
  978. vllm/model_executor/models/exaone4.py +533 -0
  979. vllm/model_executor/models/fairseq2_llama.py +154 -0
  980. vllm/model_executor/models/falcon.py +509 -0
  981. vllm/model_executor/models/falcon_h1.py +674 -0
  982. vllm/model_executor/models/fuyu.py +399 -0
  983. vllm/model_executor/models/gemma.py +425 -0
  984. vllm/model_executor/models/gemma2.py +422 -0
  985. vllm/model_executor/models/gemma3.py +555 -0
  986. vllm/model_executor/models/gemma3_mm.py +721 -0
  987. vllm/model_executor/models/gemma3n.py +1113 -0
  988. vllm/model_executor/models/gemma3n_mm.py +761 -0
  989. vllm/model_executor/models/glm.py +23 -0
  990. vllm/model_executor/models/glm4.py +304 -0
  991. vllm/model_executor/models/glm4_1v.py +1690 -0
  992. vllm/model_executor/models/glm4_moe.py +727 -0
  993. vllm/model_executor/models/glm4_moe_mtp.py +301 -0
  994. vllm/model_executor/models/glm4v.py +654 -0
  995. vllm/model_executor/models/gpt2.py +380 -0
  996. vllm/model_executor/models/gpt_bigcode.py +344 -0
  997. vllm/model_executor/models/gpt_j.py +339 -0
  998. vllm/model_executor/models/gpt_neox.py +330 -0
  999. vllm/model_executor/models/gpt_oss.py +712 -0
  1000. vllm/model_executor/models/granite.py +489 -0
  1001. vllm/model_executor/models/granite_speech.py +794 -0
  1002. vllm/model_executor/models/granitemoe.py +550 -0
  1003. vllm/model_executor/models/granitemoehybrid.py +614 -0
  1004. vllm/model_executor/models/granitemoeshared.py +332 -0
  1005. vllm/model_executor/models/gritlm.py +262 -0
  1006. vllm/model_executor/models/grok1.py +547 -0
  1007. vllm/model_executor/models/h2ovl.py +536 -0
  1008. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1009. vllm/model_executor/models/hyperclovax_vision.py +1192 -0
  1010. vllm/model_executor/models/idefics2_vision_model.py +417 -0
  1011. vllm/model_executor/models/idefics3.py +756 -0
  1012. vllm/model_executor/models/interfaces.py +959 -0
  1013. vllm/model_executor/models/interfaces_base.py +192 -0
  1014. vllm/model_executor/models/intern_vit.py +441 -0
  1015. vllm/model_executor/models/internlm2.py +450 -0
  1016. vllm/model_executor/models/internlm2_ve.py +148 -0
  1017. vllm/model_executor/models/interns1.py +838 -0
  1018. vllm/model_executor/models/interns1_vit.py +418 -0
  1019. vllm/model_executor/models/internvl.py +1423 -0
  1020. vllm/model_executor/models/jais.py +373 -0
  1021. vllm/model_executor/models/jamba.py +591 -0
  1022. vllm/model_executor/models/jina_vl.py +144 -0
  1023. vllm/model_executor/models/keye.py +1680 -0
  1024. vllm/model_executor/models/keye_vl1_5.py +602 -0
  1025. vllm/model_executor/models/kimi_vl.py +618 -0
  1026. vllm/model_executor/models/lfm2.py +548 -0
  1027. vllm/model_executor/models/llama.py +669 -0
  1028. vllm/model_executor/models/llama4.py +746 -0
  1029. vllm/model_executor/models/llama4_eagle.py +239 -0
  1030. vllm/model_executor/models/llama_eagle.py +179 -0
  1031. vllm/model_executor/models/llama_eagle3.py +296 -0
  1032. vllm/model_executor/models/llava.py +870 -0
  1033. vllm/model_executor/models/llava_next.py +571 -0
  1034. vllm/model_executor/models/llava_next_video.py +476 -0
  1035. vllm/model_executor/models/llava_onevision.py +942 -0
  1036. vllm/model_executor/models/longcat_flash.py +715 -0
  1037. vllm/model_executor/models/longcat_flash_mtp.py +352 -0
  1038. vllm/model_executor/models/mamba.py +275 -0
  1039. vllm/model_executor/models/mamba2.py +291 -0
  1040. vllm/model_executor/models/medusa.py +169 -0
  1041. vllm/model_executor/models/midashenglm.py +792 -0
  1042. vllm/model_executor/models/mimo.py +188 -0
  1043. vllm/model_executor/models/mimo_mtp.py +280 -0
  1044. vllm/model_executor/models/minicpm.py +631 -0
  1045. vllm/model_executor/models/minicpm3.py +230 -0
  1046. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1047. vllm/model_executor/models/minicpmo.py +770 -0
  1048. vllm/model_executor/models/minicpmv.py +1784 -0
  1049. vllm/model_executor/models/minimax_text_01.py +986 -0
  1050. vllm/model_executor/models/minimax_vl_01.py +426 -0
  1051. vllm/model_executor/models/mistral3.py +628 -0
  1052. vllm/model_executor/models/mixtral.py +606 -0
  1053. vllm/model_executor/models/mllama4.py +1076 -0
  1054. vllm/model_executor/models/mlp_speculator.py +206 -0
  1055. vllm/model_executor/models/modernbert.py +374 -0
  1056. vllm/model_executor/models/module_mapping.py +72 -0
  1057. vllm/model_executor/models/molmo.py +1567 -0
  1058. vllm/model_executor/models/moonvit.py +673 -0
  1059. vllm/model_executor/models/motif.py +345 -0
  1060. vllm/model_executor/models/mpt.py +329 -0
  1061. vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
  1062. vllm/model_executor/models/nemotron.py +507 -0
  1063. vllm/model_executor/models/nemotron_h.py +565 -0
  1064. vllm/model_executor/models/nemotron_nas.py +481 -0
  1065. vllm/model_executor/models/nemotron_vl.py +652 -0
  1066. vllm/model_executor/models/nvlm_d.py +203 -0
  1067. vllm/model_executor/models/olmo.py +404 -0
  1068. vllm/model_executor/models/olmo2.py +439 -0
  1069. vllm/model_executor/models/olmoe.py +483 -0
  1070. vllm/model_executor/models/opt.py +412 -0
  1071. vllm/model_executor/models/orion.py +348 -0
  1072. vllm/model_executor/models/ovis.py +559 -0
  1073. vllm/model_executor/models/ovis2_5.py +642 -0
  1074. vllm/model_executor/models/paligemma.py +411 -0
  1075. vllm/model_executor/models/persimmon.py +343 -0
  1076. vllm/model_executor/models/phi.py +356 -0
  1077. vllm/model_executor/models/phi3.py +19 -0
  1078. vllm/model_executor/models/phi3v.py +698 -0
  1079. vllm/model_executor/models/phi4_multimodal.py +1475 -0
  1080. vllm/model_executor/models/phi4mm.py +1279 -0
  1081. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1082. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1083. vllm/model_executor/models/phimoe.py +679 -0
  1084. vllm/model_executor/models/pixtral.py +1345 -0
  1085. vllm/model_executor/models/plamo2.py +978 -0
  1086. vllm/model_executor/models/qwen.py +361 -0
  1087. vllm/model_executor/models/qwen2.py +523 -0
  1088. vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
  1089. vllm/model_executor/models/qwen2_5_vl.py +1481 -0
  1090. vllm/model_executor/models/qwen2_audio.py +489 -0
  1091. vllm/model_executor/models/qwen2_moe.py +558 -0
  1092. vllm/model_executor/models/qwen2_rm.py +122 -0
  1093. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1094. vllm/model_executor/models/qwen3.py +341 -0
  1095. vllm/model_executor/models/qwen3_moe.py +692 -0
  1096. vllm/model_executor/models/qwen3_next.py +1266 -0
  1097. vllm/model_executor/models/qwen3_next_mtp.py +281 -0
  1098. vllm/model_executor/models/qwen3_vl.py +1613 -0
  1099. vllm/model_executor/models/qwen3_vl_moe.py +358 -0
  1100. vllm/model_executor/models/qwen_vl.py +795 -0
  1101. vllm/model_executor/models/radio.py +576 -0
  1102. vllm/model_executor/models/registry.py +990 -0
  1103. vllm/model_executor/models/roberta.py +252 -0
  1104. vllm/model_executor/models/rvl.py +103 -0
  1105. vllm/model_executor/models/seed_oss.py +485 -0
  1106. vllm/model_executor/models/siglip.py +540 -0
  1107. vllm/model_executor/models/siglip2navit.py +689 -0
  1108. vllm/model_executor/models/skyworkr1v.py +911 -0
  1109. vllm/model_executor/models/smolvlm.py +44 -0
  1110. vllm/model_executor/models/solar.py +504 -0
  1111. vllm/model_executor/models/stablelm.py +341 -0
  1112. vllm/model_executor/models/starcoder2.py +354 -0
  1113. vllm/model_executor/models/step3_text.py +510 -0
  1114. vllm/model_executor/models/step3_vl.py +1072 -0
  1115. vllm/model_executor/models/swin.py +475 -0
  1116. vllm/model_executor/models/tarsier.py +639 -0
  1117. vllm/model_executor/models/telechat2.py +151 -0
  1118. vllm/model_executor/models/teleflm.py +79 -0
  1119. vllm/model_executor/models/terratorch.py +294 -0
  1120. vllm/model_executor/models/transformers.py +948 -0
  1121. vllm/model_executor/models/ultravox.py +654 -0
  1122. vllm/model_executor/models/utils.py +808 -0
  1123. vllm/model_executor/models/vision.py +404 -0
  1124. vllm/model_executor/models/voxtral.py +786 -0
  1125. vllm/model_executor/models/whisper.py +963 -0
  1126. vllm/model_executor/models/zamba2.py +960 -0
  1127. vllm/model_executor/parameter.py +620 -0
  1128. vllm/model_executor/utils.py +86 -0
  1129. vllm/model_executor/warmup/__init__.py +0 -0
  1130. vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
  1131. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1132. vllm/multimodal/__init__.py +33 -0
  1133. vllm/multimodal/audio.py +116 -0
  1134. vllm/multimodal/base.py +27 -0
  1135. vllm/multimodal/cache.py +697 -0
  1136. vllm/multimodal/evs.py +273 -0
  1137. vllm/multimodal/hasher.py +102 -0
  1138. vllm/multimodal/image.py +130 -0
  1139. vllm/multimodal/inputs.py +987 -0
  1140. vllm/multimodal/parse.py +511 -0
  1141. vllm/multimodal/processing.py +2148 -0
  1142. vllm/multimodal/profiling.py +284 -0
  1143. vllm/multimodal/registry.py +345 -0
  1144. vllm/multimodal/utils.py +503 -0
  1145. vllm/multimodal/video.py +319 -0
  1146. vllm/outputs.py +324 -0
  1147. vllm/platforms/__init__.py +263 -0
  1148. vllm/platforms/cpu.py +340 -0
  1149. vllm/platforms/cuda.py +668 -0
  1150. vllm/platforms/interface.py +620 -0
  1151. vllm/platforms/rocm.py +497 -0
  1152. vllm/platforms/tpu.py +233 -0
  1153. vllm/platforms/xpu.py +243 -0
  1154. vllm/plugins/__init__.py +72 -0
  1155. vllm/plugins/io_processors/__init__.py +68 -0
  1156. vllm/plugins/io_processors/interface.py +67 -0
  1157. vllm/plugins/lora_resolvers/README.md +16 -0
  1158. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1159. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1160. vllm/pooling_params.py +191 -0
  1161. vllm/profiler/__init__.py +0 -0
  1162. vllm/profiler/layerwise_profile.py +375 -0
  1163. vllm/profiler/utils.py +148 -0
  1164. vllm/py.typed +2 -0
  1165. vllm/ray/__init__.py +0 -0
  1166. vllm/ray/lazy_utils.py +22 -0
  1167. vllm/ray/ray_env.py +72 -0
  1168. vllm/reasoning/__init__.py +29 -0
  1169. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1170. vllm/reasoning/basic_parsers.py +156 -0
  1171. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1172. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1173. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1174. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1175. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1176. vllm/reasoning/mistral_reasoning_parser.py +56 -0
  1177. vllm/reasoning/qwen3_reasoning_parser.py +72 -0
  1178. vllm/reasoning/seedoss_reasoning_parser.py +28 -0
  1179. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1180. vllm/sampling_params.py +593 -0
  1181. vllm/scalar_type.py +349 -0
  1182. vllm/scripts.py +15 -0
  1183. vllm/sequence.py +103 -0
  1184. vllm/tasks.py +11 -0
  1185. vllm/test_utils.py +129 -0
  1186. vllm/third_party/__init__.py +0 -0
  1187. vllm/third_party/pynvml.py +6140 -0
  1188. vllm/tracing.py +136 -0
  1189. vllm/transformers_utils/__init__.py +24 -0
  1190. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1191. vllm/transformers_utils/chat_templates/registry.py +70 -0
  1192. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1193. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1194. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1195. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1196. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1197. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1198. vllm/transformers_utils/config.py +1102 -0
  1199. vllm/transformers_utils/config_parser_base.py +20 -0
  1200. vllm/transformers_utils/configs/__init__.py +63 -0
  1201. vllm/transformers_utils/configs/arctic.py +207 -0
  1202. vllm/transformers_utils/configs/chatglm.py +72 -0
  1203. vllm/transformers_utils/configs/deepseek_v3.py +101 -0
  1204. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1205. vllm/transformers_utils/configs/dotsocr.py +69 -0
  1206. vllm/transformers_utils/configs/eagle.py +84 -0
  1207. vllm/transformers_utils/configs/falcon.py +90 -0
  1208. vllm/transformers_utils/configs/jais.py +237 -0
  1209. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1210. vllm/transformers_utils/configs/medusa.py +63 -0
  1211. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1212. vllm/transformers_utils/configs/mistral.py +165 -0
  1213. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1214. vllm/transformers_utils/configs/moonvit.py +33 -0
  1215. vllm/transformers_utils/configs/nemotron.py +205 -0
  1216. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1217. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1218. vllm/transformers_utils/configs/olmo3.py +80 -0
  1219. vllm/transformers_utils/configs/ovis.py +176 -0
  1220. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1221. vllm/transformers_utils/configs/radio.py +91 -0
  1222. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1223. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1224. vllm/transformers_utils/configs/speculators/base.py +111 -0
  1225. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1226. vllm/transformers_utils/configs/ultravox.py +116 -0
  1227. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1228. vllm/transformers_utils/dynamic_module.py +60 -0
  1229. vllm/transformers_utils/processor.py +299 -0
  1230. vllm/transformers_utils/processors/__init__.py +16 -0
  1231. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1232. vllm/transformers_utils/processors/ovis.py +420 -0
  1233. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1234. vllm/transformers_utils/runai_utils.py +104 -0
  1235. vllm/transformers_utils/s3_utils.py +93 -0
  1236. vllm/transformers_utils/tokenizer.py +292 -0
  1237. vllm/transformers_utils/tokenizer_base.py +154 -0
  1238. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1239. vllm/transformers_utils/tokenizers/mistral.py +521 -0
  1240. vllm/transformers_utils/utils.py +108 -0
  1241. vllm/triton_utils/__init__.py +16 -0
  1242. vllm/triton_utils/importing.py +96 -0
  1243. vllm/usage/__init__.py +0 -0
  1244. vllm/usage/usage_lib.py +259 -0
  1245. vllm/utils/__init__.py +3566 -0
  1246. vllm/utils/deep_gemm.py +319 -0
  1247. vllm/utils/flashinfer.py +443 -0
  1248. vllm/utils/jsontree.py +178 -0
  1249. vllm/utils/tensor_schema.py +235 -0
  1250. vllm/v1/__init__.py +0 -0
  1251. vllm/v1/attention/__init__.py +0 -0
  1252. vllm/v1/attention/backends/__init__.py +0 -0
  1253. vllm/v1/attention/backends/cpu_attn.py +919 -0
  1254. vllm/v1/attention/backends/flash_attn.py +795 -0
  1255. vllm/v1/attention/backends/flashinfer.py +1181 -0
  1256. vllm/v1/attention/backends/flex_attention.py +861 -0
  1257. vllm/v1/attention/backends/gdn_attn.py +332 -0
  1258. vllm/v1/attention/backends/linear_attn.py +67 -0
  1259. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1260. vllm/v1/attention/backends/mamba2_attn.py +232 -0
  1261. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1262. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1263. vllm/v1/attention/backends/mla/common.py +1783 -0
  1264. vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
  1265. vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
  1266. vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
  1267. vllm/v1/attention/backends/mla/flashmla.py +203 -0
  1268. vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
  1269. vllm/v1/attention/backends/mla/indexer.py +342 -0
  1270. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1271. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1272. vllm/v1/attention/backends/pallas.py +409 -0
  1273. vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
  1274. vllm/v1/attention/backends/rocm_attn.py +426 -0
  1275. vllm/v1/attention/backends/short_conv_attn.py +94 -0
  1276. vllm/v1/attention/backends/tree_attn.py +451 -0
  1277. vllm/v1/attention/backends/triton_attn.py +361 -0
  1278. vllm/v1/attention/backends/utils.py +990 -0
  1279. vllm/v1/attention/backends/xformers.py +438 -0
  1280. vllm/v1/core/__init__.py +0 -0
  1281. vllm/v1/core/block_pool.py +416 -0
  1282. vllm/v1/core/encoder_cache_manager.py +333 -0
  1283. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1284. vllm/v1/core/kv_cache_manager.py +399 -0
  1285. vllm/v1/core/kv_cache_utils.py +1291 -0
  1286. vllm/v1/core/sched/__init__.py +0 -0
  1287. vllm/v1/core/sched/async_scheduler.py +47 -0
  1288. vllm/v1/core/sched/interface.py +158 -0
  1289. vllm/v1/core/sched/output.py +166 -0
  1290. vllm/v1/core/sched/request_queue.py +224 -0
  1291. vllm/v1/core/sched/scheduler.py +1296 -0
  1292. vllm/v1/core/sched/utils.py +69 -0
  1293. vllm/v1/core/single_type_kv_cache_manager.py +671 -0
  1294. vllm/v1/cudagraph_dispatcher.py +125 -0
  1295. vllm/v1/engine/__init__.py +203 -0
  1296. vllm/v1/engine/async_llm.py +742 -0
  1297. vllm/v1/engine/coordinator.py +357 -0
  1298. vllm/v1/engine/core.py +1235 -0
  1299. vllm/v1/engine/core_client.py +1334 -0
  1300. vllm/v1/engine/detokenizer.py +349 -0
  1301. vllm/v1/engine/exceptions.py +17 -0
  1302. vllm/v1/engine/llm_engine.py +370 -0
  1303. vllm/v1/engine/logprobs.py +201 -0
  1304. vllm/v1/engine/output_processor.py +576 -0
  1305. vllm/v1/engine/parallel_sampling.py +133 -0
  1306. vllm/v1/engine/processor.py +545 -0
  1307. vllm/v1/engine/utils.py +860 -0
  1308. vllm/v1/executor/__init__.py +0 -0
  1309. vllm/v1/executor/abstract.py +137 -0
  1310. vllm/v1/executor/multiproc_executor.py +726 -0
  1311. vllm/v1/executor/ray_distributed_executor.py +108 -0
  1312. vllm/v1/executor/utils.py +23 -0
  1313. vllm/v1/kv_cache_interface.py +375 -0
  1314. vllm/v1/kv_offload/__init__.py +0 -0
  1315. vllm/v1/kv_offload/abstract.py +165 -0
  1316. vllm/v1/kv_offload/backend.py +96 -0
  1317. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1318. vllm/v1/kv_offload/backends/cpu.py +61 -0
  1319. vllm/v1/kv_offload/cpu.py +75 -0
  1320. vllm/v1/kv_offload/factory.py +56 -0
  1321. vllm/v1/kv_offload/lru_manager.py +132 -0
  1322. vllm/v1/kv_offload/mediums.py +39 -0
  1323. vllm/v1/kv_offload/spec.py +61 -0
  1324. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1325. vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
  1326. vllm/v1/kv_offload/worker/worker.py +142 -0
  1327. vllm/v1/metrics/__init__.py +0 -0
  1328. vllm/v1/metrics/loggers.py +741 -0
  1329. vllm/v1/metrics/prometheus.py +82 -0
  1330. vllm/v1/metrics/ray_wrappers.py +152 -0
  1331. vllm/v1/metrics/reader.py +246 -0
  1332. vllm/v1/metrics/stats.py +257 -0
  1333. vllm/v1/outputs.py +161 -0
  1334. vllm/v1/pool/__init__.py +0 -0
  1335. vllm/v1/pool/metadata.py +77 -0
  1336. vllm/v1/request.py +241 -0
  1337. vllm/v1/sample/__init__.py +0 -0
  1338. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1339. vllm/v1/sample/logits_processor/builtin.py +275 -0
  1340. vllm/v1/sample/logits_processor/interface.py +97 -0
  1341. vllm/v1/sample/logits_processor/state.py +161 -0
  1342. vllm/v1/sample/metadata.py +43 -0
  1343. vllm/v1/sample/ops/__init__.py +0 -0
  1344. vllm/v1/sample/ops/bad_words.py +39 -0
  1345. vllm/v1/sample/ops/logprobs.py +26 -0
  1346. vllm/v1/sample/ops/penalties.py +43 -0
  1347. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1348. vllm/v1/sample/rejection_sampler.py +623 -0
  1349. vllm/v1/sample/sampler.py +285 -0
  1350. vllm/v1/sample/tpu/__init__.py +0 -0
  1351. vllm/v1/sample/tpu/metadata.py +124 -0
  1352. vllm/v1/sample/tpu/sampler.py +213 -0
  1353. vllm/v1/serial_utils.py +423 -0
  1354. vllm/v1/spec_decode/__init__.py +0 -0
  1355. vllm/v1/spec_decode/eagle.py +1011 -0
  1356. vllm/v1/spec_decode/medusa.py +66 -0
  1357. vllm/v1/spec_decode/metadata.py +62 -0
  1358. vllm/v1/spec_decode/metrics.py +211 -0
  1359. vllm/v1/spec_decode/ngram_proposer.py +276 -0
  1360. vllm/v1/spec_decode/utils.py +14 -0
  1361. vllm/v1/structured_output/__init__.py +295 -0
  1362. vllm/v1/structured_output/backend_guidance.py +245 -0
  1363. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1364. vllm/v1/structured_output/backend_outlines.py +320 -0
  1365. vllm/v1/structured_output/backend_types.py +134 -0
  1366. vllm/v1/structured_output/backend_xgrammar.py +327 -0
  1367. vllm/v1/structured_output/request.py +86 -0
  1368. vllm/v1/structured_output/utils.py +454 -0
  1369. vllm/v1/utils.py +396 -0
  1370. vllm/v1/worker/__init__.py +0 -0
  1371. vllm/v1/worker/block_table.py +210 -0
  1372. vllm/v1/worker/cpu_model_runner.py +175 -0
  1373. vllm/v1/worker/cpu_worker.py +156 -0
  1374. vllm/v1/worker/gpu_input_batch.py +863 -0
  1375. vllm/v1/worker/gpu_model_runner.py +4160 -0
  1376. vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
  1377. vllm/v1/worker/gpu_worker.py +710 -0
  1378. vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
  1379. vllm/v1/worker/lora_model_runner_mixin.py +183 -0
  1380. vllm/v1/worker/tpu_input_batch.py +587 -0
  1381. vllm/v1/worker/tpu_model_runner.py +1946 -0
  1382. vllm/v1/worker/tpu_worker.py +346 -0
  1383. vllm/v1/worker/ubatch_splitting.py +192 -0
  1384. vllm/v1/worker/ubatch_utils.py +27 -0
  1385. vllm/v1/worker/ubatching.py +224 -0
  1386. vllm/v1/worker/utils.py +344 -0
  1387. vllm/v1/worker/worker_base.py +65 -0
  1388. vllm/v1/worker/xpu_model_runner.py +57 -0
  1389. vllm/v1/worker/xpu_worker.py +179 -0
  1390. vllm/version.py +41 -0
  1391. vllm/vllm_flash_attn/.gitkeep +0 -0
  1392. vllm/worker/__init__.py +0 -0
  1393. vllm/worker/worker_base.py +279 -0
  1394. vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
  1395. vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
  1396. vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
  1397. vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
  1398. vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1778 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ # yapf: disable
5
+ import argparse
6
+ import copy
7
+ import dataclasses
8
+ import functools
9
+ import json
10
+ import sys
11
+ from dataclasses import MISSING, dataclass, fields, is_dataclass
12
+ from itertools import permutations
13
+ from typing import (TYPE_CHECKING, Annotated, Any, Callable, Dict, List,
14
+ Literal, Optional, Type, TypeVar, Union, cast, get_args,
15
+ get_origin)
16
+
17
+ import huggingface_hub
18
+ import regex as re
19
+ import torch
20
+ from pydantic import TypeAdapter, ValidationError
21
+ from typing_extensions import TypeIs, deprecated
22
+
23
+ import vllm.envs as envs
24
+ from vllm.config import (BlockSize, CacheConfig, CacheDType, CompilationConfig,
25
+ ConfigType, ConvertOption, DetailedTraceModules,
26
+ Device, DeviceConfig, DistributedExecutorBackend,
27
+ EPLBConfig, HfOverrides, KVEventsConfig,
28
+ KVTransferConfig, LoadConfig, LogprobsMode,
29
+ LoRAConfig, MambaDType, MMEncoderTPMode, ModelConfig,
30
+ ModelDType, ObservabilityConfig, ParallelConfig,
31
+ PoolerConfig, PrefixCachingHashAlgo, RunnerOption,
32
+ SchedulerConfig, SchedulerPolicy, SpeculativeConfig,
33
+ StructuredOutputsConfig, TaskOption, TokenizerMode,
34
+ VllmConfig, get_attr_docs)
35
+ from vllm.config.multimodal import MMCacheType, MultiModalConfig
36
+ from vllm.config.parallel import ExpertPlacementStrategy
37
+ from vllm.config.utils import get_field
38
+ from vllm.logger import init_logger
39
+ from vllm.platforms import CpuArchEnum, current_platform
40
+ from vllm.plugins import load_general_plugins
41
+ from vllm.ray.lazy_utils import is_ray_initialized
42
+ from vllm.reasoning import ReasoningParserManager
43
+ from vllm.test_utils import MODEL_WEIGHTS_S3_BUCKET, MODELS_ON_S3
44
+ from vllm.transformers_utils.config import (get_model_path, is_interleaved,
45
+ maybe_override_with_speculators)
46
+ from vllm.transformers_utils.utils import check_gguf_file
47
+ from vllm.utils import (FlexibleArgumentParser, GiB_bytes, get_ip,
48
+ is_in_ray_actor)
49
+ from vllm.v1.sample.logits_processor import LogitsProcessor
50
+
51
+ # yapf: enable
52
+
53
+ if TYPE_CHECKING:
54
+ from vllm.executor.executor_base import ExecutorBase
55
+ from vllm.model_executor.layers.quantization import QuantizationMethods
56
+ from vllm.model_executor.model_loader import LoadFormats
57
+ from vllm.usage.usage_lib import UsageContext
58
+ else:
59
+ ExecutorBase = Any
60
+ QuantizationMethods = Any
61
+ LoadFormats = Any
62
+ UsageContext = Any
63
+
64
+ logger = init_logger(__name__)
65
+
66
+ # object is used to allow for special typing forms
67
+ T = TypeVar("T")
68
+ TypeHint = Union[type[Any], object]
69
+ TypeHintT = Union[type[T], object]
70
+
71
+
72
+ def parse_type(return_type: Callable[[str], T]) -> Callable[[str], T]:
73
+
74
+ def _parse_type(val: str) -> T:
75
+ try:
76
+ return return_type(val)
77
+ except ValueError as e:
78
+ raise argparse.ArgumentTypeError(
79
+ f"Value {val} cannot be converted to {return_type}.") from e
80
+
81
+ return _parse_type
82
+
83
+
84
+ def optional_type(
85
+ return_type: Callable[[str], T]) -> Callable[[str], Optional[T]]:
86
+
87
+ def _optional_type(val: str) -> Optional[T]:
88
+ if val == "" or val == "None":
89
+ return None
90
+ return parse_type(return_type)(val)
91
+
92
+ return _optional_type
93
+
94
+
95
+ def union_dict_and_str(val: str) -> Optional[Union[str, dict[str, str]]]:
96
+ if not re.match(r"(?s)^\s*{.*}\s*$", val):
97
+ return str(val)
98
+ return optional_type(json.loads)(val)
99
+
100
+
101
+ def is_type(type_hint: TypeHint, type: TypeHintT) -> TypeIs[TypeHintT]:
102
+ """Check if the type hint is a specific type."""
103
+ return type_hint is type or get_origin(type_hint) is type
104
+
105
+
106
+ def contains_type(type_hints: set[TypeHint], type: TypeHintT) -> bool:
107
+ """Check if the type hints contain a specific type."""
108
+ return any(is_type(type_hint, type) for type_hint in type_hints)
109
+
110
+
111
+ def get_type(type_hints: set[TypeHint], type: TypeHintT) -> TypeHintT:
112
+ """Get the specific type from the type hints."""
113
+ return next((th for th in type_hints if is_type(th, type)), None)
114
+
115
+
116
+ def literal_to_kwargs(type_hints: set[TypeHint]) -> dict[str, Any]:
117
+ """Get the `type` and `choices` from a `Literal` type hint in `type_hints`.
118
+
119
+ If `type_hints` also contains `str`, we use `metavar` instead of `choices`.
120
+ """
121
+ type_hint = get_type(type_hints, Literal)
122
+ options = get_args(type_hint)
123
+ option_type = type(options[0])
124
+ if not all(isinstance(option, option_type) for option in options):
125
+ raise ValueError(
126
+ "All options must be of the same type. "
127
+ f"Got {options} with types {[type(c) for c in options]}")
128
+ kwarg = "metavar" if contains_type(type_hints, str) else "choices"
129
+ return {"type": option_type, kwarg: sorted(options)}
130
+
131
+
132
+ def is_not_builtin(type_hint: TypeHint) -> bool:
133
+ """Check if the class is not a built-in type."""
134
+ return type_hint.__module__ != "builtins"
135
+
136
+
137
+ def get_type_hints(type_hint: TypeHint) -> set[TypeHint]:
138
+ """Extract type hints from Annotated or Union type hints."""
139
+ type_hints: set[TypeHint] = set()
140
+ origin = get_origin(type_hint)
141
+ args = get_args(type_hint)
142
+
143
+ if origin is Annotated:
144
+ type_hints.update(get_type_hints(args[0]))
145
+ elif origin is Union:
146
+ for arg in args:
147
+ type_hints.update(get_type_hints(arg))
148
+ else:
149
+ type_hints.add(type_hint)
150
+
151
+ return type_hints
152
+
153
+
154
+ def is_online_quantization(quantization: Any) -> bool:
155
+ return quantization in ["inc"]
156
+
157
+
158
+ NEEDS_HELP = (
159
+ any("--help" in arg for arg in sys.argv) # vllm SUBCOMMAND --help
160
+ or (argv0 := sys.argv[0]).endswith("mkdocs") # mkdocs SUBCOMMAND
161
+ or argv0.endswith("mkdocs/__main__.py") # python -m mkdocs SUBCOMMAND
162
+ )
163
+
164
+
165
+ @functools.lru_cache(maxsize=30)
166
+ def _compute_kwargs(cls: ConfigType) -> dict[str, Any]:
167
+ # Save time only getting attr docs if we're generating help text
168
+ cls_docs = get_attr_docs(cls) if NEEDS_HELP else {}
169
+ kwargs = {}
170
+ for field in fields(cls):
171
+ # Get the set of possible types for the field
172
+ type_hints: set[TypeHint] = get_type_hints(field.type)
173
+
174
+ # If the field is a dataclass, we can use the model_validate_json
175
+ generator = (th for th in type_hints if is_dataclass(th))
176
+ dataclass_cls = next(generator, None)
177
+
178
+ # Get the default value of the field
179
+ if field.default is not MISSING:
180
+ default = field.default
181
+ elif field.default_factory is not MISSING:
182
+ default = field.default_factory()
183
+
184
+ # Get the help text for the field
185
+ name = field.name
186
+ help = cls_docs.get(name, "").strip()
187
+ # Escape % for argparse
188
+ help = help.replace("%", "%%")
189
+
190
+ # Initialise the kwargs dictionary for the field
191
+ kwargs[name] = {"default": default, "help": help}
192
+
193
+ # Set other kwargs based on the type hints
194
+ json_tip = ("Should either be a valid JSON string or JSON keys passed "
195
+ "individually.")
196
+ if dataclass_cls is not None:
197
+
198
+ def parse_dataclass(val: str, cls=dataclass_cls) -> Any:
199
+ try:
200
+ return TypeAdapter(cls).validate_json(val)
201
+ except ValidationError as e:
202
+ raise argparse.ArgumentTypeError(repr(e)) from e
203
+
204
+ kwargs[name]["type"] = parse_dataclass
205
+ kwargs[name]["help"] += f"\n\n{json_tip}"
206
+ elif contains_type(type_hints, bool):
207
+ # Creates --no-<name> and --<name> flags
208
+ kwargs[name]["action"] = argparse.BooleanOptionalAction
209
+ elif contains_type(type_hints, Literal):
210
+ kwargs[name].update(literal_to_kwargs(type_hints))
211
+ elif contains_type(type_hints, tuple):
212
+ type_hint = get_type(type_hints, tuple)
213
+ types = get_args(type_hint)
214
+ tuple_type = types[0]
215
+ assert all(t is tuple_type for t in types if t is not Ellipsis), (
216
+ "All non-Ellipsis tuple elements must be of the same "
217
+ f"type. Got {types}.")
218
+ kwargs[name]["type"] = tuple_type
219
+ kwargs[name]["nargs"] = "+" if Ellipsis in types else len(types)
220
+ elif contains_type(type_hints, list):
221
+ type_hint = get_type(type_hints, list)
222
+ types = get_args(type_hint)
223
+ list_type = types[0]
224
+ if get_origin(list_type) is Union:
225
+ msg = "List type must contain str if it is a Union."
226
+ assert str in get_args(list_type), msg
227
+ list_type = str
228
+ kwargs[name]["type"] = list_type
229
+ kwargs[name]["nargs"] = "+"
230
+ elif contains_type(type_hints, int):
231
+ kwargs[name]["type"] = int
232
+ # Special case for large integers
233
+ human_readable_ints = {
234
+ "max_model_len",
235
+ "max_num_batched_tokens",
236
+ "kv_cache_memory_bytes",
237
+ }
238
+ if name in human_readable_ints:
239
+ kwargs[name]["type"] = human_readable_int
240
+ kwargs[name]["help"] += f"\n\n{human_readable_int.__doc__}"
241
+ elif contains_type(type_hints, float):
242
+ kwargs[name]["type"] = float
243
+ elif (contains_type(type_hints, dict)
244
+ and (contains_type(type_hints, str)
245
+ or any(is_not_builtin(th) for th in type_hints))):
246
+ kwargs[name]["type"] = union_dict_and_str
247
+ elif contains_type(type_hints, dict):
248
+ kwargs[name]["type"] = parse_type(json.loads)
249
+ kwargs[name]["help"] += f"\n\n{json_tip}"
250
+ elif (contains_type(type_hints, str)
251
+ or any(is_not_builtin(th) for th in type_hints)):
252
+ kwargs[name]["type"] = str
253
+ else:
254
+ raise ValueError(
255
+ f"Unsupported type {type_hints} for argument {name}.")
256
+
257
+ # If the type hint was a sequence of literals, use the helper function
258
+ # to update the type and choices
259
+ if get_origin(kwargs[name].get("type")) is Literal:
260
+ kwargs[name].update(literal_to_kwargs({kwargs[name]["type"]}))
261
+
262
+ # If None is in type_hints, make the argument optional.
263
+ # But not if it's a bool, argparse will handle this better.
264
+ if type(None) in type_hints and not contains_type(type_hints, bool):
265
+ kwargs[name]["type"] = optional_type(kwargs[name]["type"])
266
+ if kwargs[name].get("choices"):
267
+ kwargs[name]["choices"].append("None")
268
+ return kwargs
269
+
270
+
271
+ def get_kwargs(cls: ConfigType) -> dict[str, Any]:
272
+ """Return argparse kwargs for the given Config dataclass.
273
+
274
+ If `--help` or `mkdocs` are not present in the command line command, the
275
+ attribute documentation will not be included in the help output.
276
+
277
+ The heavy computation is cached via functools.lru_cache, and a deep copy
278
+ is returned so callers can mutate the dictionary without affecting the
279
+ cached version.
280
+ """
281
+ return copy.deepcopy(_compute_kwargs(cls))
282
+
283
+
284
+ @dataclass
285
+ class EngineArgs:
286
+ """Arguments for vLLM engine."""
287
+ model: str = ModelConfig.model
288
+ served_model_name: Optional[Union[
289
+ str, List[str]]] = ModelConfig.served_model_name
290
+ tokenizer: Optional[str] = ModelConfig.tokenizer
291
+ hf_config_path: Optional[str] = ModelConfig.hf_config_path
292
+ runner: RunnerOption = ModelConfig.runner
293
+ convert: ConvertOption = ModelConfig.convert
294
+ task: Optional[TaskOption] = ModelConfig.task
295
+ skip_tokenizer_init: bool = ModelConfig.skip_tokenizer_init
296
+ enable_prompt_embeds: bool = ModelConfig.enable_prompt_embeds
297
+ tokenizer_mode: TokenizerMode = ModelConfig.tokenizer_mode
298
+ trust_remote_code: bool = ModelConfig.trust_remote_code
299
+ allowed_local_media_path: str = ModelConfig.allowed_local_media_path
300
+ allowed_media_domains: Optional[
301
+ list[str]] = ModelConfig.allowed_media_domains
302
+ download_dir: Optional[str] = LoadConfig.download_dir
303
+ safetensors_load_strategy: str = LoadConfig.safetensors_load_strategy
304
+ load_format: Union[str, LoadFormats] = LoadConfig.load_format
305
+ config_format: str = ModelConfig.config_format
306
+ dtype: ModelDType = ModelConfig.dtype
307
+ kv_cache_dtype: CacheDType = CacheConfig.cache_dtype
308
+ seed: Optional[int] = ModelConfig.seed
309
+ max_model_len: Optional[int] = ModelConfig.max_model_len
310
+ cuda_graph_sizes: list[int] = get_field(SchedulerConfig,
311
+ "cuda_graph_sizes")
312
+ # Note: Specifying a custom executor backend by passing a class
313
+ # is intended for expert use only. The API may change without
314
+ # notice.
315
+ distributed_executor_backend: Optional[Union[
316
+ str, DistributedExecutorBackend,
317
+ Type[ExecutorBase]]] = ParallelConfig.distributed_executor_backend
318
+ # number of P/D disaggregation (or other disaggregation) workers
319
+ pipeline_parallel_size: int = ParallelConfig.pipeline_parallel_size
320
+ tensor_parallel_size: int = ParallelConfig.tensor_parallel_size
321
+ decode_context_parallel_size: int = \
322
+ ParallelConfig.decode_context_parallel_size
323
+ data_parallel_size: int = ParallelConfig.data_parallel_size
324
+ data_parallel_rank: Optional[int] = None
325
+ data_parallel_start_rank: Optional[int] = None
326
+ data_parallel_size_local: Optional[int] = None
327
+ data_parallel_address: Optional[str] = None
328
+ data_parallel_rpc_port: Optional[int] = None
329
+ data_parallel_hybrid_lb: bool = False
330
+ data_parallel_backend: str = ParallelConfig.data_parallel_backend
331
+ enable_expert_parallel: bool = ParallelConfig.enable_expert_parallel
332
+ enable_dbo: bool = ParallelConfig.enable_dbo
333
+ dbo_decode_token_threshold: int = \
334
+ ParallelConfig.dbo_decode_token_threshold
335
+ dbo_prefill_token_threshold: int = \
336
+ ParallelConfig.dbo_prefill_token_threshold
337
+ eplb_config: EPLBConfig = get_field(ParallelConfig, "eplb_config")
338
+ enable_eplb: bool = ParallelConfig.enable_eplb
339
+ expert_placement_strategy: ExpertPlacementStrategy = \
340
+ ParallelConfig.expert_placement_strategy
341
+ _api_process_count: int = ParallelConfig._api_process_count
342
+ _api_process_rank: int = ParallelConfig._api_process_rank
343
+ num_redundant_experts: int = EPLBConfig.num_redundant_experts
344
+ eplb_window_size: int = EPLBConfig.window_size
345
+ eplb_step_interval: int = EPLBConfig.step_interval
346
+ eplb_log_balancedness: bool = EPLBConfig.log_balancedness
347
+ max_parallel_loading_workers: Optional[
348
+ int] = ParallelConfig.max_parallel_loading_workers
349
+ block_size: Optional[BlockSize] = CacheConfig.block_size
350
+ enable_prefix_caching: Optional[bool] = CacheConfig.enable_prefix_caching
351
+ prefix_caching_hash_algo: PrefixCachingHashAlgo = \
352
+ CacheConfig.prefix_caching_hash_algo
353
+ disable_sliding_window: bool = ModelConfig.disable_sliding_window
354
+ disable_cascade_attn: bool = ModelConfig.disable_cascade_attn
355
+ swap_space: float = CacheConfig.swap_space
356
+ cpu_offload_gb: float = CacheConfig.cpu_offload_gb
357
+ gpu_memory_utilization: float = CacheConfig.gpu_memory_utilization
358
+ kv_cache_memory_bytes: Optional[int] = CacheConfig.kv_cache_memory_bytes
359
+ max_num_batched_tokens: Optional[
360
+ int] = SchedulerConfig.max_num_batched_tokens
361
+ max_num_partial_prefills: int = SchedulerConfig.max_num_partial_prefills
362
+ max_long_partial_prefills: int = SchedulerConfig.max_long_partial_prefills
363
+ long_prefill_token_threshold: int = \
364
+ SchedulerConfig.long_prefill_token_threshold
365
+ max_num_seqs: Optional[int] = SchedulerConfig.max_num_seqs
366
+ max_logprobs: int = ModelConfig.max_logprobs
367
+ logprobs_mode: LogprobsMode = ModelConfig.logprobs_mode
368
+ disable_log_stats: bool = False
369
+ revision: Optional[str] = ModelConfig.revision
370
+ code_revision: Optional[str] = ModelConfig.code_revision
371
+ rope_scaling: dict[str, Any] = get_field(ModelConfig, "rope_scaling")
372
+ rope_theta: Optional[float] = ModelConfig.rope_theta
373
+ hf_token: Optional[Union[bool, str]] = ModelConfig.hf_token
374
+ hf_overrides: HfOverrides = get_field(ModelConfig, "hf_overrides")
375
+ tokenizer_revision: Optional[str] = ModelConfig.tokenizer_revision
376
+ quantization: Optional[QuantizationMethods] = ModelConfig.quantization
377
+ enforce_eager: bool = ModelConfig.enforce_eager
378
+ disable_custom_all_reduce: bool = ParallelConfig.disable_custom_all_reduce
379
+ limit_mm_per_prompt: dict[str, int] = \
380
+ get_field(MultiModalConfig, "limit_per_prompt")
381
+ interleave_mm_strings: bool = MultiModalConfig.interleave_mm_strings
382
+ media_io_kwargs: dict[str, dict[str,
383
+ Any]] = get_field(MultiModalConfig,
384
+ "media_io_kwargs")
385
+ mm_processor_kwargs: Optional[Dict[str, Any]] = \
386
+ MultiModalConfig.mm_processor_kwargs
387
+ disable_mm_preprocessor_cache: bool = False # DEPRECATED
388
+ mm_processor_cache_gb: float = MultiModalConfig.mm_processor_cache_gb
389
+ mm_processor_cache_type: Optional[MMCacheType] = \
390
+ MultiModalConfig.mm_processor_cache_type
391
+ mm_shm_cache_max_object_size_mb: int = \
392
+ MultiModalConfig.mm_shm_cache_max_object_size_mb
393
+ mm_encoder_tp_mode: MMEncoderTPMode = MultiModalConfig.mm_encoder_tp_mode
394
+ io_processor_plugin: Optional[str] = None
395
+ skip_mm_profiling: bool = MultiModalConfig.skip_mm_profiling
396
+ video_pruning_rate: float = MultiModalConfig.video_pruning_rate
397
+ # LoRA fields
398
+ enable_lora: bool = False
399
+ enable_lora_bias: bool = LoRAConfig.bias_enabled
400
+ max_loras: int = LoRAConfig.max_loras
401
+ max_lora_rank: int = LoRAConfig.max_lora_rank
402
+ default_mm_loras: Optional[Dict[str, str]] = \
403
+ LoRAConfig.default_mm_loras
404
+ fully_sharded_loras: bool = LoRAConfig.fully_sharded_loras
405
+ max_cpu_loras: Optional[int] = LoRAConfig.max_cpu_loras
406
+ lora_dtype: Optional[Union[str, torch.dtype]] = LoRAConfig.lora_dtype
407
+ lora_extra_vocab_size: int = LoRAConfig.lora_extra_vocab_size
408
+
409
+ ray_workers_use_nsight: bool = ParallelConfig.ray_workers_use_nsight
410
+ num_gpu_blocks_override: Optional[
411
+ int] = CacheConfig.num_gpu_blocks_override
412
+ num_lookahead_slots: int = SchedulerConfig.num_lookahead_slots
413
+ model_loader_extra_config: dict = \
414
+ get_field(LoadConfig, "model_loader_extra_config")
415
+ ignore_patterns: Optional[Union[str,
416
+ List[str]]] = LoadConfig.ignore_patterns
417
+
418
+ enable_chunked_prefill: Optional[
419
+ bool] = SchedulerConfig.enable_chunked_prefill
420
+ disable_chunked_mm_input: bool = SchedulerConfig.disable_chunked_mm_input
421
+
422
+ disable_hybrid_kv_cache_manager: bool = (
423
+ SchedulerConfig.disable_hybrid_kv_cache_manager)
424
+
425
+ structured_outputs_config: StructuredOutputsConfig = get_field(
426
+ VllmConfig, "structured_outputs_config")
427
+ reasoning_parser: str = StructuredOutputsConfig.reasoning_parser
428
+ # Deprecated guided decoding fields
429
+ guided_decoding_backend: Optional[str] = None
430
+ guided_decoding_disable_fallback: Optional[bool] = None
431
+ guided_decoding_disable_any_whitespace: Optional[bool] = None
432
+ guided_decoding_disable_additional_properties: Optional[bool] = None
433
+
434
+ logits_processor_pattern: Optional[
435
+ str] = ModelConfig.logits_processor_pattern
436
+
437
+ speculative_config: Optional[Dict[str, Any]] = None
438
+
439
+ show_hidden_metrics_for_version: Optional[str] = \
440
+ ObservabilityConfig.show_hidden_metrics_for_version
441
+ otlp_traces_endpoint: Optional[str] = \
442
+ ObservabilityConfig.otlp_traces_endpoint
443
+ collect_detailed_traces: Optional[list[DetailedTraceModules]] = \
444
+ ObservabilityConfig.collect_detailed_traces
445
+ scheduling_policy: SchedulerPolicy = SchedulerConfig.policy
446
+ scheduler_cls: Union[str, Type[object]] = SchedulerConfig.scheduler_cls
447
+
448
+ pooler_config: Optional[PoolerConfig] = ModelConfig.pooler_config
449
+ override_pooler_config: Optional[Union[dict, PoolerConfig]] = \
450
+ ModelConfig.override_pooler_config
451
+ compilation_config: CompilationConfig = \
452
+ get_field(VllmConfig, "compilation_config")
453
+ worker_cls: str = ParallelConfig.worker_cls
454
+ worker_extension_cls: str = ParallelConfig.worker_extension_cls
455
+
456
+ kv_transfer_config: Optional[KVTransferConfig] = None
457
+ kv_events_config: Optional[KVEventsConfig] = None
458
+
459
+ generation_config: str = ModelConfig.generation_config
460
+ enable_sleep_mode: bool = ModelConfig.enable_sleep_mode
461
+ override_generation_config: dict[str, Any] = \
462
+ get_field(ModelConfig, "override_generation_config")
463
+ model_impl: str = ModelConfig.model_impl
464
+ override_attention_dtype: str = ModelConfig.override_attention_dtype
465
+
466
+ calculate_kv_scales: bool = CacheConfig.calculate_kv_scales
467
+ mamba_cache_dtype: MambaDType = CacheConfig.mamba_cache_dtype
468
+ mamba_ssm_cache_dtype: MambaDType = CacheConfig.mamba_ssm_cache_dtype
469
+
470
+ additional_config: dict[str, Any] = \
471
+ get_field(VllmConfig, "additional_config")
472
+
473
+ use_tqdm_on_load: bool = LoadConfig.use_tqdm_on_load
474
+ pt_load_map_location: str = LoadConfig.pt_load_map_location
475
+
476
+ # DEPRECATED
477
+ enable_multimodal_encoder_data_parallel: bool = False
478
+
479
+ logits_processors: Optional[list[Union[
480
+ str, type[LogitsProcessor]]]] = ModelConfig.logits_processors
481
+ """Custom logitproc types"""
482
+
483
+ async_scheduling: bool = SchedulerConfig.async_scheduling
484
+
485
+ kv_sharing_fast_prefill: bool = \
486
+ CacheConfig.kv_sharing_fast_prefill
487
+
488
+ def __post_init__(self):
489
+ # support `EngineArgs(compilation_config={...})`
490
+ # without having to manually construct a
491
+ # CompilationConfig object
492
+ if isinstance(self.compilation_config, dict):
493
+ self.compilation_config = CompilationConfig(
494
+ **self.compilation_config)
495
+ if isinstance(self.eplb_config, dict):
496
+ self.eplb_config = EPLBConfig(**self.eplb_config)
497
+ # Setup plugins
498
+ from vllm.plugins import load_general_plugins
499
+ load_general_plugins()
500
+ # when use hf offline,replace model id to local model path
501
+ if huggingface_hub.constants.HF_HUB_OFFLINE:
502
+ model_id = self.model
503
+ self.model = get_model_path(self.model, self.revision)
504
+ logger.info(
505
+ "HF_HUB_OFFLINE is True, replace model_id [%s] " \
506
+ "to model_path [%s]",model_id, self.model)
507
+
508
+ @staticmethod
509
+ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
510
+ """Shared CLI arguments for vLLM engine."""
511
+
512
+ # Model arguments
513
+ model_kwargs = get_kwargs(ModelConfig)
514
+ model_group = parser.add_argument_group(
515
+ title="ModelConfig",
516
+ description=ModelConfig.__doc__,
517
+ )
518
+ if not ('serve' in sys.argv[1:] and '--help' in sys.argv[1:]):
519
+ model_group.add_argument("--model", **model_kwargs["model"])
520
+ model_group.add_argument("--runner", **model_kwargs["runner"])
521
+ model_group.add_argument("--convert", **model_kwargs["convert"])
522
+ model_group.add_argument("--task",
523
+ **model_kwargs["task"],
524
+ deprecated=True)
525
+ model_group.add_argument("--tokenizer", **model_kwargs["tokenizer"])
526
+ model_group.add_argument("--tokenizer-mode",
527
+ **model_kwargs["tokenizer_mode"])
528
+ model_group.add_argument("--trust-remote-code",
529
+ **model_kwargs["trust_remote_code"])
530
+ model_group.add_argument("--dtype", **model_kwargs["dtype"])
531
+ model_group.add_argument("--seed", **model_kwargs["seed"])
532
+ model_group.add_argument("--hf-config-path",
533
+ **model_kwargs["hf_config_path"])
534
+ model_group.add_argument("--allowed-local-media-path",
535
+ **model_kwargs["allowed_local_media_path"])
536
+ model_group.add_argument("--allowed-media-domains",
537
+ **model_kwargs["allowed_media_domains"])
538
+ model_group.add_argument("--revision", **model_kwargs["revision"])
539
+ model_group.add_argument("--code-revision",
540
+ **model_kwargs["code_revision"])
541
+ model_group.add_argument("--rope-scaling",
542
+ **model_kwargs["rope_scaling"])
543
+ model_group.add_argument("--rope-theta", **model_kwargs["rope_theta"])
544
+ model_group.add_argument("--tokenizer-revision",
545
+ **model_kwargs["tokenizer_revision"])
546
+ model_group.add_argument("--max-model-len",
547
+ **model_kwargs["max_model_len"])
548
+ model_group.add_argument("--quantization", "-q",
549
+ **model_kwargs["quantization"])
550
+ model_group.add_argument("--enforce-eager",
551
+ **model_kwargs["enforce_eager"])
552
+ model_group.add_argument("--max-logprobs",
553
+ **model_kwargs["max_logprobs"])
554
+ model_group.add_argument("--logprobs-mode",
555
+ **model_kwargs["logprobs_mode"])
556
+ model_group.add_argument("--disable-sliding-window",
557
+ **model_kwargs["disable_sliding_window"])
558
+ model_group.add_argument("--disable-cascade-attn",
559
+ **model_kwargs["disable_cascade_attn"])
560
+ model_group.add_argument("--skip-tokenizer-init",
561
+ **model_kwargs["skip_tokenizer_init"])
562
+ model_group.add_argument("--enable-prompt-embeds",
563
+ **model_kwargs["enable_prompt_embeds"])
564
+ model_group.add_argument("--served-model-name",
565
+ **model_kwargs["served_model_name"])
566
+ model_group.add_argument("--config-format",
567
+ **model_kwargs["config_format"])
568
+ # This one is a special case because it can bool
569
+ # or str. TODO: Handle this in get_kwargs
570
+ model_group.add_argument("--hf-token",
571
+ type=str,
572
+ nargs="?",
573
+ const=True,
574
+ default=model_kwargs["hf_token"]["default"],
575
+ help=model_kwargs["hf_token"]["help"])
576
+ model_group.add_argument("--hf-overrides",
577
+ **model_kwargs["hf_overrides"])
578
+ model_group.add_argument("--pooler-config",
579
+ **model_kwargs["pooler_config"])
580
+ model_group.add_argument("--override-pooler-config",
581
+ **model_kwargs["override_pooler_config"],
582
+ deprecated=True)
583
+ model_group.add_argument("--logits-processor-pattern",
584
+ **model_kwargs["logits_processor_pattern"])
585
+ model_group.add_argument("--generation-config",
586
+ **model_kwargs["generation_config"])
587
+ model_group.add_argument("--override-generation-config",
588
+ **model_kwargs["override_generation_config"])
589
+ model_group.add_argument("--enable-sleep-mode",
590
+ **model_kwargs["enable_sleep_mode"])
591
+ model_group.add_argument("--model-impl", **model_kwargs["model_impl"])
592
+ model_group.add_argument("--override-attention-dtype",
593
+ **model_kwargs["override_attention_dtype"])
594
+ model_group.add_argument("--logits-processors",
595
+ **model_kwargs["logits_processors"])
596
+ model_group.add_argument("--io-processor-plugin",
597
+ **model_kwargs["io_processor_plugin"])
598
+
599
+ # Model loading arguments
600
+ load_kwargs = get_kwargs(LoadConfig)
601
+ load_group = parser.add_argument_group(
602
+ title="LoadConfig",
603
+ description=LoadConfig.__doc__,
604
+ )
605
+ load_group.add_argument("--load-format", **load_kwargs["load_format"])
606
+ load_group.add_argument("--download-dir",
607
+ **load_kwargs["download_dir"])
608
+ load_group.add_argument("--safetensors-load-strategy",
609
+ **load_kwargs["safetensors_load_strategy"])
610
+ load_group.add_argument("--model-loader-extra-config",
611
+ **load_kwargs["model_loader_extra_config"])
612
+ load_group.add_argument("--ignore-patterns",
613
+ **load_kwargs["ignore_patterns"])
614
+ load_group.add_argument("--use-tqdm-on-load",
615
+ **load_kwargs["use_tqdm_on_load"])
616
+ load_group.add_argument('--pt-load-map-location',
617
+ **load_kwargs["pt_load_map_location"])
618
+
619
+ # Structured outputs arguments
620
+ structured_outputs_kwargs = get_kwargs(StructuredOutputsConfig)
621
+ structured_outputs_group = parser.add_argument_group(
622
+ title="StructuredOutputsConfig",
623
+ description=StructuredOutputsConfig.__doc__,
624
+ )
625
+ structured_outputs_group.add_argument(
626
+ "--reasoning-parser",
627
+ # This choice is a special case because it's not static
628
+ choices=list(ReasoningParserManager.reasoning_parsers),
629
+ **structured_outputs_kwargs["reasoning_parser"])
630
+ # Deprecated guided decoding arguments
631
+ for arg, type in [
632
+ ("--guided-decoding-backend", str),
633
+ ("--guided-decoding-disable-fallback", bool),
634
+ ("--guided-decoding-disable-any-whitespace", bool),
635
+ ("--guided-decoding-disable-additional-properties", bool),
636
+ ]:
637
+ structured_outputs_group.add_argument(
638
+ arg,
639
+ type=type,
640
+ help=(f"[DEPRECATED] {arg} will be removed in v0.12.0."),
641
+ deprecated=True)
642
+
643
+ # Parallel arguments
644
+ parallel_kwargs = get_kwargs(ParallelConfig)
645
+ parallel_group = parser.add_argument_group(
646
+ title="ParallelConfig",
647
+ description=ParallelConfig.__doc__,
648
+ )
649
+ parallel_group.add_argument(
650
+ "--distributed-executor-backend",
651
+ **parallel_kwargs["distributed_executor_backend"])
652
+ parallel_group.add_argument(
653
+ "--pipeline-parallel-size", "-pp",
654
+ **parallel_kwargs["pipeline_parallel_size"])
655
+ parallel_group.add_argument("--tensor-parallel-size", "-tp",
656
+ **parallel_kwargs["tensor_parallel_size"])
657
+ parallel_group.add_argument(
658
+ "--decode-context-parallel-size", "-dcp",
659
+ **parallel_kwargs["decode_context_parallel_size"])
660
+ parallel_group.add_argument("--data-parallel-size", "-dp",
661
+ **parallel_kwargs["data_parallel_size"])
662
+ parallel_group.add_argument(
663
+ '--data-parallel-rank',
664
+ '-dpn',
665
+ type=int,
666
+ help='Data parallel rank of this instance. '
667
+ 'When set, enables external load balancer mode.')
668
+ parallel_group.add_argument('--data-parallel-start-rank',
669
+ '-dpr',
670
+ type=int,
671
+ help='Starting data parallel rank '
672
+ 'for secondary nodes.')
673
+ parallel_group.add_argument('--data-parallel-size-local',
674
+ '-dpl',
675
+ type=int,
676
+ help='Number of data parallel replicas '
677
+ 'to run on this node.')
678
+ parallel_group.add_argument('--data-parallel-address',
679
+ '-dpa',
680
+ type=str,
681
+ help='Address of data parallel cluster '
682
+ 'head-node.')
683
+ parallel_group.add_argument('--data-parallel-rpc-port',
684
+ '-dpp',
685
+ type=int,
686
+ help='Port for data parallel RPC '
687
+ 'communication.')
688
+ parallel_group.add_argument('--data-parallel-backend',
689
+ '-dpb',
690
+ type=str,
691
+ default='mp',
692
+ help='Backend for data parallel, either '
693
+ '"mp" or "ray".')
694
+ parallel_group.add_argument(
695
+ "--data-parallel-hybrid-lb",
696
+ **parallel_kwargs["data_parallel_hybrid_lb"])
697
+ parallel_group.add_argument(
698
+ "--enable-expert-parallel",
699
+ **parallel_kwargs["enable_expert_parallel"])
700
+ parallel_group.add_argument("--enable-dbo",
701
+ **parallel_kwargs["enable_dbo"])
702
+ parallel_group.add_argument(
703
+ "--dbo-decode-token-threshold",
704
+ **parallel_kwargs["dbo_decode_token_threshold"])
705
+ parallel_group.add_argument(
706
+ "--dbo-prefill-token-threshold",
707
+ **parallel_kwargs["dbo_prefill_token_threshold"])
708
+ parallel_group.add_argument("--enable-eplb",
709
+ **parallel_kwargs["enable_eplb"])
710
+ parallel_group.add_argument("--eplb-config",
711
+ **parallel_kwargs["eplb_config"])
712
+ parallel_group.add_argument(
713
+ "--expert-placement-strategy",
714
+ **parallel_kwargs["expert_placement_strategy"])
715
+ parallel_group.add_argument(
716
+ "--num-redundant-experts",
717
+ type=int,
718
+ help=
719
+ "[DEPRECATED] --num-redundant-experts will be removed in v0.12.0.",
720
+ deprecated=True)
721
+ parallel_group.add_argument(
722
+ "--eplb-window-size",
723
+ type=int,
724
+ help="[DEPRECATED] --eplb-window-size will be removed in v0.12.0.",
725
+ deprecated=True)
726
+ parallel_group.add_argument(
727
+ "--eplb-step-interval",
728
+ type=int,
729
+ help=
730
+ "[DEPRECATED] --eplb-step-interval will be removed in v0.12.0.",
731
+ deprecated=True)
732
+ parallel_group.add_argument(
733
+ "--eplb-log-balancedness",
734
+ action=argparse.BooleanOptionalAction,
735
+ help=
736
+ "[DEPRECATED] --eplb-log-balancedness will be removed in v0.12.0.",
737
+ deprecated=True)
738
+
739
+ parallel_group.add_argument(
740
+ "--max-parallel-loading-workers",
741
+ **parallel_kwargs["max_parallel_loading_workers"])
742
+ parallel_group.add_argument(
743
+ "--ray-workers-use-nsight",
744
+ **parallel_kwargs["ray_workers_use_nsight"])
745
+ parallel_group.add_argument(
746
+ "--disable-custom-all-reduce",
747
+ **parallel_kwargs["disable_custom_all_reduce"])
748
+ parallel_group.add_argument("--worker-cls",
749
+ **parallel_kwargs["worker_cls"])
750
+ parallel_group.add_argument("--worker-extension-cls",
751
+ **parallel_kwargs["worker_extension_cls"])
752
+ parallel_group.add_argument(
753
+ "--enable-multimodal-encoder-data-parallel",
754
+ action="store_true",
755
+ deprecated=True)
756
+
757
+ # KV cache arguments
758
+ cache_kwargs = get_kwargs(CacheConfig)
759
+ cache_group = parser.add_argument_group(
760
+ title="CacheConfig",
761
+ description=CacheConfig.__doc__,
762
+ )
763
+ cache_group.add_argument("--block-size", **cache_kwargs["block_size"])
764
+ cache_group.add_argument("--gpu-memory-utilization",
765
+ **cache_kwargs["gpu_memory_utilization"])
766
+ cache_group.add_argument("--kv-cache-memory-bytes",
767
+ **cache_kwargs["kv_cache_memory_bytes"])
768
+ cache_group.add_argument("--swap-space", **cache_kwargs["swap_space"])
769
+ cache_group.add_argument("--kv-cache-dtype",
770
+ **cache_kwargs["cache_dtype"])
771
+ cache_group.add_argument("--num-gpu-blocks-override",
772
+ **cache_kwargs["num_gpu_blocks_override"])
773
+ cache_group.add_argument("--enable-prefix-caching",
774
+ **cache_kwargs["enable_prefix_caching"])
775
+ cache_group.add_argument("--prefix-caching-hash-algo",
776
+ **cache_kwargs["prefix_caching_hash_algo"])
777
+ cache_group.add_argument("--cpu-offload-gb",
778
+ **cache_kwargs["cpu_offload_gb"])
779
+ cache_group.add_argument("--calculate-kv-scales",
780
+ **cache_kwargs["calculate_kv_scales"])
781
+ cache_group.add_argument("--kv-sharing-fast-prefill",
782
+ **cache_kwargs["kv_sharing_fast_prefill"])
783
+ cache_group.add_argument("--mamba-cache-dtype",
784
+ **cache_kwargs["mamba_cache_dtype"])
785
+ cache_group.add_argument("--mamba-ssm-cache-dtype",
786
+ **cache_kwargs["mamba_ssm_cache_dtype"])
787
+
788
+ # Multimodal related configs
789
+ multimodal_kwargs = get_kwargs(MultiModalConfig)
790
+ multimodal_group = parser.add_argument_group(
791
+ title="MultiModalConfig",
792
+ description=MultiModalConfig.__doc__,
793
+ )
794
+ multimodal_group.add_argument("--limit-mm-per-prompt",
795
+ **multimodal_kwargs["limit_per_prompt"])
796
+ multimodal_group.add_argument("--media-io-kwargs",
797
+ **multimodal_kwargs["media_io_kwargs"])
798
+ multimodal_group.add_argument(
799
+ "--mm-processor-kwargs",
800
+ **multimodal_kwargs["mm_processor_kwargs"])
801
+ multimodal_group.add_argument(
802
+ "--mm-processor-cache-gb",
803
+ **multimodal_kwargs["mm_processor_cache_gb"])
804
+ multimodal_group.add_argument("--disable-mm-preprocessor-cache",
805
+ action="store_true",
806
+ deprecated=True)
807
+ multimodal_group.add_argument(
808
+ "--mm-processor-cache-type",
809
+ **multimodal_kwargs["mm_processor_cache_type"])
810
+ multimodal_group.add_argument(
811
+ "--mm-shm-cache-max-object-size-mb",
812
+ **multimodal_kwargs["mm_shm_cache_max_object_size_mb"])
813
+ multimodal_group.add_argument(
814
+ "--mm-encoder-tp-mode", **multimodal_kwargs["mm_encoder_tp_mode"])
815
+ multimodal_group.add_argument(
816
+ "--interleave-mm-strings",
817
+ **multimodal_kwargs["interleave_mm_strings"])
818
+ multimodal_group.add_argument("--skip-mm-profiling",
819
+ **multimodal_kwargs["skip_mm_profiling"])
820
+
821
+ multimodal_group.add_argument(
822
+ "--video-pruning-rate", **multimodal_kwargs["video_pruning_rate"])
823
+
824
+ # LoRA related configs
825
+ lora_kwargs = get_kwargs(LoRAConfig)
826
+ lora_group = parser.add_argument_group(
827
+ title="LoRAConfig",
828
+ description=LoRAConfig.__doc__,
829
+ )
830
+ lora_group.add_argument(
831
+ "--enable-lora",
832
+ action=argparse.BooleanOptionalAction,
833
+ help="If True, enable handling of LoRA adapters.")
834
+ lora_group.add_argument("--enable-lora-bias",
835
+ **lora_kwargs["bias_enabled"])
836
+ lora_group.add_argument("--max-loras", **lora_kwargs["max_loras"])
837
+ lora_group.add_argument("--max-lora-rank",
838
+ **lora_kwargs["max_lora_rank"])
839
+ lora_group.add_argument("--lora-extra-vocab-size",
840
+ **lora_kwargs["lora_extra_vocab_size"])
841
+ lora_group.add_argument(
842
+ "--lora-dtype",
843
+ **lora_kwargs["lora_dtype"],
844
+ )
845
+ lora_group.add_argument("--max-cpu-loras",
846
+ **lora_kwargs["max_cpu_loras"])
847
+ lora_group.add_argument("--fully-sharded-loras",
848
+ **lora_kwargs["fully_sharded_loras"])
849
+ lora_group.add_argument("--default-mm-loras",
850
+ **lora_kwargs["default_mm_loras"])
851
+
852
+ # Observability arguments
853
+ observability_kwargs = get_kwargs(ObservabilityConfig)
854
+ observability_group = parser.add_argument_group(
855
+ title="ObservabilityConfig",
856
+ description=ObservabilityConfig.__doc__,
857
+ )
858
+ observability_group.add_argument(
859
+ "--show-hidden-metrics-for-version",
860
+ **observability_kwargs["show_hidden_metrics_for_version"])
861
+ observability_group.add_argument(
862
+ "--otlp-traces-endpoint",
863
+ **observability_kwargs["otlp_traces_endpoint"])
864
+ # TODO: generalise this special case
865
+ choices = observability_kwargs["collect_detailed_traces"]["choices"]
866
+ metavar = f"{{{','.join(choices)}}}"
867
+ observability_kwargs["collect_detailed_traces"]["metavar"] = metavar
868
+ observability_kwargs["collect_detailed_traces"]["choices"] += [
869
+ ",".join(p)
870
+ for p in permutations(get_args(DetailedTraceModules), r=2)
871
+ ]
872
+ observability_group.add_argument(
873
+ "--collect-detailed-traces",
874
+ **observability_kwargs["collect_detailed_traces"])
875
+
876
+ # Scheduler arguments
877
+ scheduler_kwargs = get_kwargs(SchedulerConfig)
878
+ scheduler_group = parser.add_argument_group(
879
+ title="SchedulerConfig",
880
+ description=SchedulerConfig.__doc__,
881
+ )
882
+ scheduler_group.add_argument(
883
+ "--max-num-batched-tokens",
884
+ **scheduler_kwargs["max_num_batched_tokens"])
885
+ scheduler_group.add_argument("--max-num-seqs",
886
+ **scheduler_kwargs["max_num_seqs"])
887
+ scheduler_group.add_argument(
888
+ "--max-num-partial-prefills",
889
+ **scheduler_kwargs["max_num_partial_prefills"])
890
+ scheduler_group.add_argument(
891
+ "--max-long-partial-prefills",
892
+ **scheduler_kwargs["max_long_partial_prefills"])
893
+ scheduler_group.add_argument('--cuda-graph-sizes',
894
+ **scheduler_kwargs["cuda_graph_sizes"])
895
+ scheduler_group.add_argument(
896
+ "--long-prefill-token-threshold",
897
+ **scheduler_kwargs["long_prefill_token_threshold"])
898
+ scheduler_group.add_argument("--num-lookahead-slots",
899
+ **scheduler_kwargs["num_lookahead_slots"])
900
+ # multi-step scheduling has been removed; corresponding arguments
901
+ # are no longer supported.
902
+ scheduler_group.add_argument("--scheduling-policy",
903
+ **scheduler_kwargs["policy"])
904
+ scheduler_group.add_argument(
905
+ "--enable-chunked-prefill",
906
+ **scheduler_kwargs["enable_chunked_prefill"])
907
+ scheduler_group.add_argument(
908
+ "--disable-chunked-mm-input",
909
+ **scheduler_kwargs["disable_chunked_mm_input"])
910
+ scheduler_group.add_argument("--scheduler-cls",
911
+ **scheduler_kwargs["scheduler_cls"])
912
+ scheduler_group.add_argument(
913
+ "--disable-hybrid-kv-cache-manager",
914
+ **scheduler_kwargs["disable_hybrid_kv_cache_manager"])
915
+ scheduler_group.add_argument("--async-scheduling",
916
+ **scheduler_kwargs["async_scheduling"])
917
+
918
+ # vLLM arguments
919
+ vllm_kwargs = get_kwargs(VllmConfig)
920
+ vllm_group = parser.add_argument_group(
921
+ title="VllmConfig",
922
+ description=VllmConfig.__doc__,
923
+ )
924
+ # We construct SpeculativeConfig using fields from other configs in
925
+ # create_engine_config. So we set the type to a JSON string here to
926
+ # delay the Pydantic validation that comes with SpeculativeConfig.
927
+ vllm_kwargs["speculative_config"]["type"] = optional_type(json.loads)
928
+ vllm_group.add_argument("--speculative-config",
929
+ **vllm_kwargs["speculative_config"])
930
+ vllm_group.add_argument("--kv-transfer-config",
931
+ **vllm_kwargs["kv_transfer_config"])
932
+ vllm_group.add_argument('--kv-events-config',
933
+ **vllm_kwargs["kv_events_config"])
934
+ vllm_group.add_argument("--compilation-config", "-O",
935
+ **vllm_kwargs["compilation_config"])
936
+ vllm_group.add_argument("--additional-config",
937
+ **vllm_kwargs["additional_config"])
938
+ vllm_group.add_argument('--structured-outputs-config',
939
+ **vllm_kwargs["structured_outputs_config"])
940
+
941
+ # Other arguments
942
+ parser.add_argument('--disable-log-stats',
943
+ action='store_true',
944
+ help='Disable logging statistics.')
945
+
946
+ return parser
947
+
948
+ @classmethod
949
+ def from_cli_args(cls, args: argparse.Namespace):
950
+ # Get the list of attributes of this dataclass.
951
+ attrs = [attr.name for attr in dataclasses.fields(cls)]
952
+ # Set the attributes from the parsed arguments.
953
+ engine_args = cls(**{
954
+ attr: getattr(args, attr)
955
+ for attr in attrs if hasattr(args, attr)
956
+ })
957
+ return engine_args
958
+
959
+ def create_model_config(self) -> ModelConfig:
960
+ # gguf file needs a specific model loader and doesn't use hf_repo
961
+ if check_gguf_file(self.model):
962
+ self.quantization = self.load_format = "gguf"
963
+
964
+ # NOTE: This is to allow model loading from S3 in CI
965
+ if (not isinstance(self, AsyncEngineArgs) and envs.VLLM_CI_USE_S3
966
+ and self.model in MODELS_ON_S3 and self.load_format == "auto"):
967
+ self.model = f"{MODEL_WEIGHTS_S3_BUCKET}/{self.model}"
968
+
969
+ if self.disable_mm_preprocessor_cache:
970
+ logger.warning(
971
+ "`--disable-mm-preprocessor-cache` is deprecated "
972
+ "and will be removed in v0.13. "
973
+ "Please use `--mm-processor-cache-gb 0` instead.", )
974
+
975
+ self.mm_processor_cache_gb = 0
976
+ elif envs.VLLM_MM_INPUT_CACHE_GIB != 4:
977
+ logger.warning(
978
+ "VLLM_MM_INPUT_CACHE_GIB` is deprecated "
979
+ "and will be removed in v0.13. "
980
+ "Please use `--mm-processor-cache-gb %d` instead.",
981
+ envs.VLLM_MM_INPUT_CACHE_GIB,
982
+ )
983
+
984
+ self.mm_processor_cache_gb = envs.VLLM_MM_INPUT_CACHE_GIB
985
+
986
+ if self.enable_multimodal_encoder_data_parallel:
987
+ logger.warning(
988
+ "--enable-multimodal-encoder-data-parallel` is deprecated "
989
+ "and will be removed in v0.13. "
990
+ "Please use `--mm-encoder-tp-mode data` instead.")
991
+
992
+ self.mm_encoder_tp_mode = "data"
993
+
994
+ return ModelConfig(
995
+ model=self.model,
996
+ hf_config_path=self.hf_config_path,
997
+ runner=self.runner,
998
+ convert=self.convert,
999
+ task=self.task,
1000
+ tokenizer=self.tokenizer,
1001
+ tokenizer_mode=self.tokenizer_mode,
1002
+ trust_remote_code=self.trust_remote_code,
1003
+ allowed_local_media_path=self.allowed_local_media_path,
1004
+ allowed_media_domains=self.allowed_media_domains,
1005
+ dtype=self.dtype,
1006
+ seed=self.seed,
1007
+ revision=self.revision,
1008
+ code_revision=self.code_revision,
1009
+ rope_scaling=self.rope_scaling,
1010
+ rope_theta=self.rope_theta,
1011
+ hf_token=self.hf_token,
1012
+ hf_overrides=self.hf_overrides,
1013
+ tokenizer_revision=self.tokenizer_revision,
1014
+ max_model_len=self.max_model_len,
1015
+ quantization=self.quantization,
1016
+ enforce_eager=self.enforce_eager,
1017
+ max_logprobs=self.max_logprobs,
1018
+ logprobs_mode=self.logprobs_mode,
1019
+ disable_sliding_window=self.disable_sliding_window,
1020
+ disable_cascade_attn=self.disable_cascade_attn,
1021
+ skip_tokenizer_init=self.skip_tokenizer_init,
1022
+ enable_prompt_embeds=self.enable_prompt_embeds,
1023
+ served_model_name=self.served_model_name,
1024
+ limit_mm_per_prompt=self.limit_mm_per_prompt,
1025
+ interleave_mm_strings=self.interleave_mm_strings,
1026
+ media_io_kwargs=self.media_io_kwargs,
1027
+ skip_mm_profiling=self.skip_mm_profiling,
1028
+ config_format=self.config_format,
1029
+ mm_processor_kwargs=self.mm_processor_kwargs,
1030
+ mm_processor_cache_gb=self.mm_processor_cache_gb,
1031
+ mm_processor_cache_type=self.mm_processor_cache_type,
1032
+ mm_shm_cache_max_object_size_mb=self.
1033
+ mm_shm_cache_max_object_size_mb,
1034
+ mm_encoder_tp_mode=self.mm_encoder_tp_mode,
1035
+ pooler_config=self.pooler_config,
1036
+ override_pooler_config=self.override_pooler_config,
1037
+ logits_processor_pattern=self.logits_processor_pattern,
1038
+ generation_config=self.generation_config,
1039
+ override_generation_config=self.override_generation_config,
1040
+ enable_sleep_mode=self.enable_sleep_mode,
1041
+ model_impl=self.model_impl,
1042
+ override_attention_dtype=self.override_attention_dtype,
1043
+ logits_processors=self.logits_processors,
1044
+ video_pruning_rate=self.video_pruning_rate,
1045
+ io_processor_plugin=self.io_processor_plugin,
1046
+ )
1047
+
1048
+ def validate_tensorizer_args(self):
1049
+ from vllm.model_executor.model_loader.tensorizer import (
1050
+ TensorizerConfig)
1051
+ for key in self.model_loader_extra_config:
1052
+ if key in TensorizerConfig._fields:
1053
+ self.model_loader_extra_config["tensorizer_config"][
1054
+ key] = self.model_loader_extra_config[key]
1055
+
1056
+ def create_load_config(self) -> LoadConfig:
1057
+
1058
+ if self.quantization == "bitsandbytes":
1059
+ self.load_format = "bitsandbytes"
1060
+
1061
+ if self.load_format == "tensorizer":
1062
+ if hasattr(self.model_loader_extra_config, "to_serializable"):
1063
+ self.model_loader_extra_config = (
1064
+ self.model_loader_extra_config.to_serializable())
1065
+ self.model_loader_extra_config["tensorizer_config"] = {}
1066
+ self.model_loader_extra_config["tensorizer_config"][
1067
+ "tensorizer_dir"] = self.model
1068
+ self.validate_tensorizer_args()
1069
+
1070
+ return LoadConfig(
1071
+ load_format=self.load_format,
1072
+ download_dir=self.download_dir,
1073
+ safetensors_load_strategy=self.safetensors_load_strategy,
1074
+ device="cpu"
1075
+ if is_online_quantization(self.quantization) else None,
1076
+ model_loader_extra_config=self.model_loader_extra_config,
1077
+ ignore_patterns=self.ignore_patterns,
1078
+ use_tqdm_on_load=self.use_tqdm_on_load,
1079
+ pt_load_map_location=self.pt_load_map_location,
1080
+ )
1081
+
1082
+ def create_speculative_config(
1083
+ self,
1084
+ target_model_config: ModelConfig,
1085
+ target_parallel_config: ParallelConfig,
1086
+ enable_chunked_prefill: bool,
1087
+ disable_log_stats: bool,
1088
+ ) -> Optional["SpeculativeConfig"]:
1089
+ """Initializes and returns a SpeculativeConfig object based on
1090
+ `speculative_config`.
1091
+
1092
+ This function utilizes `speculative_config` to create a
1093
+ SpeculativeConfig object. The `speculative_config` can either be
1094
+ provided as a JSON string input via CLI arguments or directly as a
1095
+ dictionary from the engine.
1096
+ """
1097
+ if self.speculative_config is None:
1098
+ return None
1099
+
1100
+ # Note(Shangming): These parameters are not obtained from the cli arg
1101
+ # '--speculative-config' and must be passed in when creating the engine
1102
+ # config.
1103
+ self.speculative_config.update({
1104
+ "target_model_config": target_model_config,
1105
+ "target_parallel_config": target_parallel_config,
1106
+ "enable_chunked_prefill": enable_chunked_prefill,
1107
+ "disable_log_stats": disable_log_stats,
1108
+ })
1109
+ return SpeculativeConfig(**self.speculative_config)
1110
+
1111
+ def create_engine_config(
1112
+ self,
1113
+ usage_context: Optional[UsageContext] = None,
1114
+ headless: bool = False,
1115
+ ) -> VllmConfig:
1116
+ """
1117
+ Create the VllmConfig.
1118
+
1119
+ NOTE: for autoselection of V0 vs V1 engine, we need to
1120
+ create the ModelConfig first, since ModelConfig's attrs
1121
+ (e.g. the model arch) are needed to make the decision.
1122
+
1123
+ This function set VLLM_USE_V1=X if VLLM_USE_V1 is
1124
+ unspecified by the user.
1125
+
1126
+ If VLLM_USE_V1 is specified by the user but the VllmConfig
1127
+ is incompatible, we raise an error.
1128
+ """
1129
+ current_platform.pre_register_and_update()
1130
+
1131
+ device_config = DeviceConfig(
1132
+ device=cast(Device, current_platform.device_type))
1133
+
1134
+ (self.model, self.tokenizer,
1135
+ self.speculative_config) = maybe_override_with_speculators(
1136
+ model=self.model,
1137
+ tokenizer=self.tokenizer,
1138
+ revision=self.revision,
1139
+ trust_remote_code=self.trust_remote_code,
1140
+ vllm_speculative_config=self.speculative_config,
1141
+ )
1142
+ model_config = self.create_model_config()
1143
+
1144
+ # * If VLLM_USE_V1 is unset, we enable V1 for "supported features"
1145
+ # and fall back to V0 for experimental or unsupported features.
1146
+ # * If VLLM_USE_V1=1, we enable V1 for supported + experimental
1147
+ # features and raise error for unsupported features.
1148
+ # * If VLLM_USE_V1=0, we disable V1.
1149
+ use_v1 = False
1150
+ try_v1 = envs.VLLM_USE_V1 or not envs.is_set("VLLM_USE_V1")
1151
+ if try_v1 and self._is_v1_supported_oracle(model_config):
1152
+ use_v1 = True
1153
+
1154
+ # If user explicitly set VLLM_USE_V1, sanity check we respect it.
1155
+ if envs.is_set("VLLM_USE_V1"):
1156
+ assert use_v1 == envs.VLLM_USE_V1
1157
+ # Otherwise, set the VLLM_USE_V1 variable globally.
1158
+ else:
1159
+ envs.set_vllm_use_v1(use_v1)
1160
+
1161
+ # Set default arguments for V1 Engine.
1162
+ self._set_default_args(usage_context, model_config)
1163
+ # Disable chunked prefill for POWER (ppc64le)/ARM/s390x CPUs in V1
1164
+ if current_platform.is_cpu() and current_platform.get_cpu_architecture(
1165
+ ) in (CpuArchEnum.POWERPC, CpuArchEnum.S390X, CpuArchEnum.ARM):
1166
+ logger.info("Chunked prefill is not supported for ARM and POWER "
1167
+ "and S390X CPUs; "
1168
+ "disabling it for V1 backend.")
1169
+ self.enable_chunked_prefill = False
1170
+ assert self.enable_chunked_prefill is not None
1171
+
1172
+ sliding_window: Optional[int] = None
1173
+ if not is_interleaved(model_config.hf_text_config):
1174
+ # Only set CacheConfig.sliding_window if the model is all sliding
1175
+ # window. Otherwise CacheConfig.sliding_window will override the
1176
+ # global layers in interleaved sliding window models.
1177
+ sliding_window = model_config.get_sliding_window()
1178
+
1179
+ # Note(hc): In the current implementation of decode context
1180
+ # parallel(DCP), tp_size needs to be divisible by dcp_size,
1181
+ # because the world size does not change by dcp, it simply
1182
+ # reuses the GPUs of TP group, and split one TP group into
1183
+ # tp_size//dcp_size DCP groups.
1184
+ assert self.tensor_parallel_size % self.decode_context_parallel_size \
1185
+ == 0, (
1186
+ f"tp_size={self.tensor_parallel_size} must be divisible by"
1187
+ f"dcp_size={self.decode_context_parallel_size}."
1188
+ )
1189
+
1190
+ cache_config = CacheConfig(
1191
+ block_size=self.block_size,
1192
+ gpu_memory_utilization=self.gpu_memory_utilization,
1193
+ kv_cache_memory_bytes=self.kv_cache_memory_bytes,
1194
+ swap_space=self.swap_space,
1195
+ cache_dtype=self.kv_cache_dtype,
1196
+ is_attention_free=model_config.is_attention_free,
1197
+ num_gpu_blocks_override=self.num_gpu_blocks_override,
1198
+ sliding_window=sliding_window,
1199
+ enable_prefix_caching=self.enable_prefix_caching,
1200
+ prefix_caching_hash_algo=self.prefix_caching_hash_algo,
1201
+ cpu_offload_gb=self.cpu_offload_gb,
1202
+ calculate_kv_scales=self.calculate_kv_scales,
1203
+ kv_sharing_fast_prefill=self.kv_sharing_fast_prefill,
1204
+ mamba_cache_dtype=self.mamba_cache_dtype,
1205
+ mamba_ssm_cache_dtype=self.mamba_ssm_cache_dtype,
1206
+ )
1207
+
1208
+ ray_runtime_env = None
1209
+ if is_ray_initialized():
1210
+ # Ray Serve LLM calls `create_engine_config` in the context
1211
+ # of a Ray task, therefore we check is_ray_initialized()
1212
+ # as opposed to is_in_ray_actor().
1213
+ import ray
1214
+ ray_runtime_env = ray.get_runtime_context().runtime_env
1215
+ logger.info("Using ray runtime env: %s", ray_runtime_env)
1216
+
1217
+ # Get the current placement group if Ray is initialized and
1218
+ # we are in a Ray actor. If so, then the placement group will be
1219
+ # passed to spawned processes.
1220
+ placement_group = None
1221
+ if is_in_ray_actor():
1222
+ import ray
1223
+
1224
+ # This call initializes Ray automatically if it is not initialized,
1225
+ # but we should not do this here.
1226
+ placement_group = ray.util.get_current_placement_group()
1227
+
1228
+ assert not headless or not self.data_parallel_hybrid_lb, (
1229
+ "data_parallel_hybrid_lb is not applicable in "
1230
+ "headless mode")
1231
+
1232
+ data_parallel_external_lb = self.data_parallel_rank is not None
1233
+ # Local DP rank = 1, use pure-external LB.
1234
+ if data_parallel_external_lb:
1235
+ assert self.data_parallel_size_local in (1, None), (
1236
+ "data_parallel_size_local must be 1 when data_parallel_rank "
1237
+ "is set")
1238
+ data_parallel_size_local = 1
1239
+ # Use full external lb if we have local_size of 1.
1240
+ self.data_parallel_hybrid_lb = False
1241
+ elif self.data_parallel_size_local is not None:
1242
+ data_parallel_size_local = self.data_parallel_size_local
1243
+
1244
+ if self.data_parallel_start_rank and not headless:
1245
+ # Infer hybrid LB mode.
1246
+ self.data_parallel_hybrid_lb = True
1247
+
1248
+ if self.data_parallel_hybrid_lb and data_parallel_size_local == 1:
1249
+ # Use full external lb if we have local_size of 1.
1250
+ data_parallel_external_lb = True
1251
+ self.data_parallel_hybrid_lb = False
1252
+
1253
+ if data_parallel_size_local == self.data_parallel_size:
1254
+ # Disable hybrid LB mode if set for a single node
1255
+ self.data_parallel_hybrid_lb = False
1256
+
1257
+ self.data_parallel_rank = self.data_parallel_start_rank or 0
1258
+ else:
1259
+ assert not self.data_parallel_hybrid_lb, (
1260
+ "data_parallel_size_local must be set to use "
1261
+ "data_parallel_hybrid_lb.")
1262
+
1263
+ # Local DP size defaults to global DP size if not set.
1264
+ data_parallel_size_local = self.data_parallel_size
1265
+
1266
+ # DP address, used in multi-node case for torch distributed group
1267
+ # and ZMQ sockets.
1268
+ if self.data_parallel_address is None:
1269
+ if self.data_parallel_backend == "ray":
1270
+ host_ip = get_ip()
1271
+ logger.info(
1272
+ "Using host IP %s as ray-based data parallel address",
1273
+ host_ip)
1274
+ data_parallel_address = host_ip
1275
+ else:
1276
+ assert self.data_parallel_backend == "mp", (
1277
+ "data_parallel_backend can only be ray or mp, got %s",
1278
+ self.data_parallel_backend)
1279
+ data_parallel_address = ParallelConfig.data_parallel_master_ip
1280
+ else:
1281
+ data_parallel_address = self.data_parallel_address
1282
+
1283
+ # This port is only used when there are remote data parallel engines,
1284
+ # otherwise the local IPC transport is used.
1285
+ data_parallel_rpc_port = self.data_parallel_rpc_port if (
1286
+ self.data_parallel_rpc_port
1287
+ is not None) else ParallelConfig.data_parallel_rpc_port
1288
+
1289
+ if self.async_scheduling:
1290
+ # Async scheduling does not work with the uniprocess backend.
1291
+ if self.distributed_executor_backend is None:
1292
+ self.distributed_executor_backend = "mp"
1293
+ logger.info("Defaulting to mp-based distributed executor "
1294
+ "backend for async scheduling.")
1295
+ if self.pipeline_parallel_size > 1:
1296
+ raise ValueError("Async scheduling is not supported with "
1297
+ "pipeline-parallel-size > 1.")
1298
+
1299
+ # Currently, async scheduling does not support speculative decoding.
1300
+ # TODO(woosuk): Support it.
1301
+ if self.speculative_config is not None:
1302
+ raise ValueError(
1303
+ "Currently, speculative decoding is not supported with "
1304
+ "async scheduling.")
1305
+
1306
+ # Forward the deprecated CLI args to the EPLB config.
1307
+ if self.num_redundant_experts is not None:
1308
+ self.eplb_config.num_redundant_experts = self.num_redundant_experts
1309
+ if self.eplb_window_size is not None:
1310
+ self.eplb_config.window_size = self.eplb_window_size
1311
+ if self.eplb_step_interval is not None:
1312
+ self.eplb_config.step_interval = self.eplb_step_interval
1313
+ if self.eplb_log_balancedness is not None:
1314
+ self.eplb_config.log_balancedness = self.eplb_log_balancedness
1315
+
1316
+ parallel_config = ParallelConfig(
1317
+ pipeline_parallel_size=self.pipeline_parallel_size,
1318
+ tensor_parallel_size=self.tensor_parallel_size,
1319
+ data_parallel_size=self.data_parallel_size,
1320
+ data_parallel_rank=self.data_parallel_rank or 0,
1321
+ data_parallel_external_lb=data_parallel_external_lb,
1322
+ data_parallel_size_local=data_parallel_size_local,
1323
+ data_parallel_master_ip=data_parallel_address,
1324
+ data_parallel_rpc_port=data_parallel_rpc_port,
1325
+ data_parallel_backend=self.data_parallel_backend,
1326
+ data_parallel_hybrid_lb=self.data_parallel_hybrid_lb,
1327
+ enable_expert_parallel=self.enable_expert_parallel,
1328
+ enable_dbo=self.enable_dbo,
1329
+ dbo_decode_token_threshold=self.dbo_decode_token_threshold,
1330
+ dbo_prefill_token_threshold=self.dbo_prefill_token_threshold,
1331
+ enable_eplb=self.enable_eplb,
1332
+ eplb_config=self.eplb_config,
1333
+ expert_placement_strategy=self.expert_placement_strategy,
1334
+ max_parallel_loading_workers=self.max_parallel_loading_workers,
1335
+ disable_custom_all_reduce=self.disable_custom_all_reduce,
1336
+ ray_workers_use_nsight=self.ray_workers_use_nsight,
1337
+ ray_runtime_env=ray_runtime_env,
1338
+ placement_group=placement_group,
1339
+ distributed_executor_backend=self.distributed_executor_backend,
1340
+ worker_cls=self.worker_cls,
1341
+ worker_extension_cls=self.worker_extension_cls,
1342
+ decode_context_parallel_size=self.decode_context_parallel_size,
1343
+ _api_process_count=self._api_process_count,
1344
+ _api_process_rank=self._api_process_rank,
1345
+ )
1346
+
1347
+ speculative_config = self.create_speculative_config(
1348
+ target_model_config=model_config,
1349
+ target_parallel_config=parallel_config,
1350
+ enable_chunked_prefill=self.enable_chunked_prefill,
1351
+ disable_log_stats=self.disable_log_stats,
1352
+ )
1353
+
1354
+ # make sure num_lookahead_slots is set appropriately depending on
1355
+ # whether speculative decoding is enabled
1356
+ num_lookahead_slots = self.num_lookahead_slots
1357
+ if speculative_config is not None:
1358
+ num_lookahead_slots = speculative_config.num_lookahead_slots
1359
+
1360
+ scheduler_config = SchedulerConfig(
1361
+ runner_type=model_config.runner_type,
1362
+ max_num_batched_tokens=self.max_num_batched_tokens,
1363
+ max_num_seqs=self.max_num_seqs,
1364
+ max_model_len=model_config.max_model_len,
1365
+ cuda_graph_sizes=self.cuda_graph_sizes,
1366
+ num_lookahead_slots=num_lookahead_slots,
1367
+ enable_chunked_prefill=self.enable_chunked_prefill,
1368
+ disable_chunked_mm_input=self.disable_chunked_mm_input,
1369
+ is_multimodal_model=model_config.is_multimodal_model,
1370
+ send_delta_data=(envs.VLLM_USE_RAY_SPMD_WORKER
1371
+ and parallel_config.use_ray),
1372
+ policy=self.scheduling_policy,
1373
+ scheduler_cls=self.scheduler_cls,
1374
+ max_num_partial_prefills=self.max_num_partial_prefills,
1375
+ max_long_partial_prefills=self.max_long_partial_prefills,
1376
+ long_prefill_token_threshold=self.long_prefill_token_threshold,
1377
+ disable_hybrid_kv_cache_manager=self.
1378
+ disable_hybrid_kv_cache_manager,
1379
+ async_scheduling=self.async_scheduling,
1380
+ )
1381
+
1382
+ if not model_config.is_multimodal_model and self.default_mm_loras:
1383
+ raise ValueError(
1384
+ "Default modality-specific LoRA(s) were provided for a "
1385
+ "non multimodal model")
1386
+
1387
+ lora_config = LoRAConfig(
1388
+ bias_enabled=self.enable_lora_bias,
1389
+ max_lora_rank=self.max_lora_rank,
1390
+ max_loras=self.max_loras,
1391
+ default_mm_loras=self.default_mm_loras,
1392
+ fully_sharded_loras=self.fully_sharded_loras,
1393
+ lora_extra_vocab_size=self.lora_extra_vocab_size,
1394
+ lora_dtype=self.lora_dtype,
1395
+ max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
1396
+ and self.max_cpu_loras > 0 else None) if self.enable_lora else None
1397
+
1398
+ # bitsandbytes pre-quantized model need a specific model loader
1399
+ if model_config.quantization == "bitsandbytes":
1400
+ self.quantization = self.load_format = "bitsandbytes"
1401
+
1402
+ load_config = self.create_load_config()
1403
+
1404
+ # Pass reasoning_parser into StructuredOutputsConfig
1405
+ if self.reasoning_parser:
1406
+ self.structured_outputs_config.reasoning_parser = \
1407
+ self.reasoning_parser
1408
+
1409
+ # Forward the deprecated CLI args to the StructuredOutputsConfig
1410
+ so_config = self.structured_outputs_config
1411
+ if self.guided_decoding_backend is not None:
1412
+ so_config.guided_decoding_backend = \
1413
+ self.guided_decoding_backend
1414
+ if self.guided_decoding_disable_fallback is not None:
1415
+ so_config.guided_decoding_disable_fallback = \
1416
+ self.guided_decoding_disable_fallback
1417
+ if self.guided_decoding_disable_any_whitespace is not None:
1418
+ so_config.guided_decoding_disable_any_whitespace = \
1419
+ self.guided_decoding_disable_any_whitespace
1420
+ if self.guided_decoding_disable_additional_properties is not None:
1421
+ so_config.guided_decoding_disable_additional_properties = \
1422
+ self.guided_decoding_disable_additional_properties
1423
+
1424
+ observability_config = ObservabilityConfig(
1425
+ show_hidden_metrics_for_version=(
1426
+ self.show_hidden_metrics_for_version),
1427
+ otlp_traces_endpoint=self.otlp_traces_endpoint,
1428
+ collect_detailed_traces=self.collect_detailed_traces,
1429
+ )
1430
+
1431
+ config = VllmConfig(
1432
+ model_config=model_config,
1433
+ cache_config=cache_config,
1434
+ parallel_config=parallel_config,
1435
+ scheduler_config=scheduler_config,
1436
+ device_config=device_config,
1437
+ lora_config=lora_config,
1438
+ speculative_config=speculative_config,
1439
+ load_config=load_config,
1440
+ structured_outputs_config=self.structured_outputs_config,
1441
+ observability_config=observability_config,
1442
+ compilation_config=self.compilation_config,
1443
+ kv_transfer_config=self.kv_transfer_config,
1444
+ kv_events_config=self.kv_events_config,
1445
+ additional_config=self.additional_config,
1446
+ )
1447
+
1448
+ return config
1449
+
1450
+ def _is_v1_supported_oracle(self, model_config: ModelConfig) -> bool:
1451
+ """Oracle for whether to use V0 or V1 Engine by default."""
1452
+
1453
+ #############################################################
1454
+ # Unsupported Feature Flags on V1.
1455
+
1456
+ if (self.logits_processor_pattern
1457
+ != EngineArgs.logits_processor_pattern):
1458
+ _raise_or_fallback(feature_name="--logits-processor-pattern",
1459
+ recommend_to_remove=False)
1460
+ return False
1461
+
1462
+ # No Mamba or Encoder-Decoder so far.
1463
+ if not model_config.is_v1_compatible:
1464
+ _raise_or_fallback(feature_name=model_config.architectures,
1465
+ recommend_to_remove=False)
1466
+ return False
1467
+
1468
+ # No Concurrent Partial Prefills so far.
1469
+ if (self.max_num_partial_prefills
1470
+ != SchedulerConfig.max_num_partial_prefills
1471
+ or self.max_long_partial_prefills
1472
+ != SchedulerConfig.max_long_partial_prefills):
1473
+ _raise_or_fallback(feature_name="Concurrent Partial Prefill",
1474
+ recommend_to_remove=False)
1475
+ return False
1476
+
1477
+ # V1 supports N-gram, Medusa, and Eagle speculative decoding.
1478
+ if self.speculative_config is not None:
1479
+ # speculative_config could still be a dict at this point
1480
+ if isinstance(self.speculative_config, dict):
1481
+ method = self.speculative_config.get("method", None)
1482
+ else:
1483
+ method = self.speculative_config.method
1484
+
1485
+ if method == "draft_model":
1486
+ raise NotImplementedError(
1487
+ "Draft model speculative decoding is not supported yet. "
1488
+ "Please consider using other speculative decoding methods "
1489
+ "such as ngram, medusa, eagle, or mtp.")
1490
+
1491
+ V1_BACKENDS = [
1492
+ "FLASH_ATTN",
1493
+ "PALLAS",
1494
+ "TRITON_ATTN",
1495
+ "TRITON_MLA",
1496
+ "CUTLASS_MLA",
1497
+ "FLASHMLA",
1498
+ "FLASH_ATTN_MLA",
1499
+ "FLASHINFER",
1500
+ "FLASHINFER_MLA",
1501
+ "ROCM_AITER_MLA",
1502
+ "TORCH_SDPA",
1503
+ "FLEX_ATTENTION",
1504
+ "TREE_ATTN",
1505
+ "XFORMERS",
1506
+ "ROCM_ATTN",
1507
+ ]
1508
+ if (envs.is_set("VLLM_ATTENTION_BACKEND")
1509
+ and envs.VLLM_ATTENTION_BACKEND not in V1_BACKENDS):
1510
+ name = f"VLLM_ATTENTION_BACKEND={envs.VLLM_ATTENTION_BACKEND}"
1511
+ _raise_or_fallback(feature_name=name, recommend_to_remove=True)
1512
+ return False
1513
+
1514
+ #############################################################
1515
+ # Experimental Features - allow users to opt in.
1516
+
1517
+ if self.pipeline_parallel_size > 1:
1518
+ supports_pp = getattr(self.distributed_executor_backend,
1519
+ 'supports_pp', False)
1520
+ if not supports_pp and self.distributed_executor_backend not in (
1521
+ ParallelConfig.distributed_executor_backend, "ray", "mp",
1522
+ "external_launcher"):
1523
+ name = "Pipeline Parallelism without Ray distributed " \
1524
+ "executor or multiprocessing executor or external " \
1525
+ "launcher"
1526
+ _raise_or_fallback(feature_name=name,
1527
+ recommend_to_remove=False)
1528
+ return False
1529
+
1530
+ if (current_platform.is_cpu()
1531
+ and model_config.get_sliding_window() is not None):
1532
+ _raise_or_fallback(feature_name="sliding window (CPU backend)",
1533
+ recommend_to_remove=False)
1534
+ return False
1535
+
1536
+ #############################################################
1537
+
1538
+ return True
1539
+
1540
+ def _set_default_args(self, usage_context: UsageContext,
1541
+ model_config: ModelConfig) -> None:
1542
+ """Set Default Arguments for V1 Engine."""
1543
+
1544
+ # V1 always uses chunked prefills and prefix caching
1545
+ # for non-pooling tasks.
1546
+ # For pooling tasks the default is False
1547
+ if model_config.runner_type != "pooling":
1548
+ self.enable_chunked_prefill = True
1549
+
1550
+ # TODO: When prefix caching supports prompt embeds inputs, this
1551
+ # check can be removed.
1552
+ if (self.enable_prompt_embeds
1553
+ and self.enable_prefix_caching is not False):
1554
+ logger.warning(
1555
+ "--enable-prompt-embeds and --enable-prefix-caching "
1556
+ "are not supported together in V1. Prefix caching has "
1557
+ "been disabled.")
1558
+ self.enable_prefix_caching = False
1559
+
1560
+ if self.enable_prefix_caching is None:
1561
+ self.enable_prefix_caching = True
1562
+ else:
1563
+
1564
+ pooling_type = model_config.pooler_config.pooling_type
1565
+ is_causal = getattr(model_config.hf_config, "is_causal", True)
1566
+ incremental_prefill_supported = (pooling_type is not None
1567
+ and pooling_type.lower() == "last"
1568
+ and is_causal)
1569
+
1570
+ action = "Enabling" if \
1571
+ incremental_prefill_supported else "Disabling"
1572
+
1573
+ if self.enable_chunked_prefill is None:
1574
+ self.enable_chunked_prefill = incremental_prefill_supported
1575
+ logger.info("(%s) chunked prefill by default", action)
1576
+ if self.enable_prefix_caching is None:
1577
+ self.enable_prefix_caching = incremental_prefill_supported
1578
+ logger.info("(%s) prefix caching by default", action)
1579
+
1580
+ # V1 should use the new scheduler by default.
1581
+ # Swap it only if this arg is set to the original V0 default
1582
+ if self.scheduler_cls == EngineArgs.scheduler_cls:
1583
+ self.scheduler_cls = "vllm.v1.core.sched.scheduler.Scheduler"
1584
+
1585
+ # When no user override, set the default values based on the usage
1586
+ # context.
1587
+ # Use different default values for different hardware.
1588
+
1589
+ # Try to query the device name on the current platform. If it fails,
1590
+ # it may be because the platform that imports vLLM is not the same
1591
+ # as the platform that vLLM is running on (e.g. the case of scaling
1592
+ # vLLM with Ray) and has no GPUs. In this case we use the default
1593
+ # values for non-H100/H200 GPUs.
1594
+ try:
1595
+ device_memory = current_platform.get_device_total_memory()
1596
+ device_name = current_platform.get_device_name().lower()
1597
+ except Exception:
1598
+ # This is only used to set default_max_num_batched_tokens
1599
+ device_memory = 0
1600
+
1601
+ # NOTE(Kuntai): Setting large `max_num_batched_tokens` for A100 reduces
1602
+ # throughput, see PR #17885 for more details.
1603
+ # So here we do an extra device name check to prevent such regression.
1604
+ from vllm.usage.usage_lib import UsageContext
1605
+ if device_memory >= 70 * GiB_bytes and "a100" not in device_name:
1606
+ # For GPUs like H100 and MI300x, use larger default values.
1607
+ default_max_num_batched_tokens = {
1608
+ UsageContext.LLM_CLASS: 16384,
1609
+ UsageContext.OPENAI_API_SERVER: 8192,
1610
+ }
1611
+ default_max_num_seqs = {
1612
+ UsageContext.LLM_CLASS: 1024,
1613
+ UsageContext.OPENAI_API_SERVER: 1024,
1614
+ }
1615
+ else:
1616
+ # TODO(woosuk): Tune the default values for other hardware.
1617
+ default_max_num_batched_tokens = {
1618
+ UsageContext.LLM_CLASS: 8192,
1619
+ UsageContext.OPENAI_API_SERVER: 2048,
1620
+ }
1621
+ default_max_num_seqs = {
1622
+ UsageContext.LLM_CLASS: 256,
1623
+ UsageContext.OPENAI_API_SERVER: 256,
1624
+ }
1625
+
1626
+ # tpu specific default values.
1627
+ if current_platform.is_tpu():
1628
+ default_max_num_batched_tokens_tpu = {
1629
+ UsageContext.LLM_CLASS: {
1630
+ 'V6E': 2048,
1631
+ 'V5E': 1024,
1632
+ 'V5P': 512,
1633
+ },
1634
+ UsageContext.OPENAI_API_SERVER: {
1635
+ 'V6E': 1024,
1636
+ 'V5E': 512,
1637
+ 'V5P': 256,
1638
+ }
1639
+ }
1640
+
1641
+ # cpu specific default values.
1642
+ if current_platform.is_cpu():
1643
+ world_size = self.pipeline_parallel_size * self.tensor_parallel_size
1644
+ default_max_num_batched_tokens = {
1645
+ UsageContext.LLM_CLASS: 4096 * world_size,
1646
+ UsageContext.OPENAI_API_SERVER: 2048 * world_size,
1647
+ }
1648
+ default_max_num_seqs = {
1649
+ UsageContext.LLM_CLASS: 256 * world_size,
1650
+ UsageContext.OPENAI_API_SERVER: 128 * world_size,
1651
+ }
1652
+
1653
+ use_context_value = usage_context.value if usage_context else None
1654
+ if (self.max_num_batched_tokens is None
1655
+ and usage_context in default_max_num_batched_tokens):
1656
+ if current_platform.is_tpu():
1657
+ chip_name = current_platform.get_device_name()
1658
+ if chip_name in default_max_num_batched_tokens_tpu[
1659
+ usage_context]:
1660
+ self.max_num_batched_tokens = \
1661
+ default_max_num_batched_tokens_tpu[
1662
+ usage_context][chip_name]
1663
+ else:
1664
+ self.max_num_batched_tokens = \
1665
+ default_max_num_batched_tokens[usage_context]
1666
+ else:
1667
+ if not self.enable_chunked_prefill:
1668
+ self.max_num_batched_tokens = model_config.max_model_len
1669
+ else:
1670
+ self.max_num_batched_tokens = \
1671
+ default_max_num_batched_tokens[usage_context]
1672
+ logger.debug(
1673
+ "Setting max_num_batched_tokens to %d for %s usage context.",
1674
+ self.max_num_batched_tokens, use_context_value)
1675
+
1676
+ if (self.max_num_seqs is None
1677
+ and usage_context in default_max_num_seqs):
1678
+ self.max_num_seqs = min(default_max_num_seqs[usage_context],
1679
+ self.max_num_batched_tokens or sys.maxsize)
1680
+
1681
+ logger.debug("Setting max_num_seqs to %d for %s usage context.",
1682
+ self.max_num_seqs, use_context_value)
1683
+
1684
+
1685
+ @dataclass
1686
+ class AsyncEngineArgs(EngineArgs):
1687
+ """Arguments for asynchronous vLLM engine."""
1688
+ enable_log_requests: bool = False
1689
+
1690
+ @property
1691
+ @deprecated(
1692
+ "`disable_log_requests` is deprecated and has been replaced with "
1693
+ "`enable_log_requests`. This will be removed in v0.12.0. Please use "
1694
+ "`enable_log_requests` instead.")
1695
+ def disable_log_requests(self) -> bool:
1696
+ return not self.enable_log_requests
1697
+
1698
+ @disable_log_requests.setter
1699
+ @deprecated(
1700
+ "`disable_log_requests` is deprecated and has been replaced with "
1701
+ "`enable_log_requests`. This will be removed in v0.12.0. Please use "
1702
+ "`enable_log_requests` instead.")
1703
+ def disable_log_requests(self, value: bool):
1704
+ self.enable_log_requests = not value
1705
+
1706
+ @staticmethod
1707
+ def add_cli_args(parser: FlexibleArgumentParser,
1708
+ async_args_only: bool = False) -> FlexibleArgumentParser:
1709
+ # Initialize plugin to update the parser, for example, The plugin may
1710
+ # add a new kind of quantization method to --quantization argument or
1711
+ # a new device to --device argument.
1712
+ load_general_plugins()
1713
+ if not async_args_only:
1714
+ parser = EngineArgs.add_cli_args(parser)
1715
+ parser.add_argument('--enable-log-requests',
1716
+ action=argparse.BooleanOptionalAction,
1717
+ default=AsyncEngineArgs.enable_log_requests,
1718
+ help='Enable logging requests.')
1719
+ parser.add_argument('--disable-log-requests',
1720
+ action=argparse.BooleanOptionalAction,
1721
+ default=not AsyncEngineArgs.enable_log_requests,
1722
+ help='[DEPRECATED] Disable logging requests.',
1723
+ deprecated=True)
1724
+ current_platform.pre_register_and_update(parser)
1725
+ return parser
1726
+
1727
+
1728
+ def _raise_or_fallback(feature_name: str, recommend_to_remove: bool):
1729
+ if envs.is_set("VLLM_USE_V1") and envs.VLLM_USE_V1:
1730
+ raise NotImplementedError(
1731
+ f"VLLM_USE_V1=1 is not supported with {feature_name}.")
1732
+ msg = f"{feature_name} is not supported by the V1 Engine. "
1733
+ msg += "Falling back to V0. "
1734
+ if recommend_to_remove:
1735
+ msg += f"We recommend to remove {feature_name} from your config "
1736
+ msg += "in favor of the V1 Engine."
1737
+ logger.warning(msg)
1738
+
1739
+
1740
+ def human_readable_int(value):
1741
+ """Parse human-readable integers like '1k', '2M', etc.
1742
+ Including decimal values with decimal multipliers.
1743
+
1744
+ Examples:
1745
+ - '1k' -> 1,000
1746
+ - '1K' -> 1,024
1747
+ - '25.6k' -> 25,600
1748
+ """
1749
+ value = value.strip()
1750
+ match = re.fullmatch(r'(\d+(?:\.\d+)?)([kKmMgGtT])', value)
1751
+ if match:
1752
+ decimal_multiplier = {
1753
+ 'k': 10**3,
1754
+ 'm': 10**6,
1755
+ 'g': 10**9,
1756
+ }
1757
+ binary_multiplier = {
1758
+ 'K': 2**10,
1759
+ 'M': 2**20,
1760
+ 'G': 2**30,
1761
+ }
1762
+
1763
+ number, suffix = match.groups()
1764
+ if suffix in decimal_multiplier:
1765
+ mult = decimal_multiplier[suffix]
1766
+ return int(float(number) * mult)
1767
+ elif suffix in binary_multiplier:
1768
+ mult = binary_multiplier[suffix]
1769
+ # Do not allow decimals with binary multipliers
1770
+ try:
1771
+ return int(number) * mult
1772
+ except ValueError as e:
1773
+ raise argparse.ArgumentTypeError("Decimals are not allowed " \
1774
+ f"with binary suffixes like {suffix}. Did you mean to use " \
1775
+ f"{number}{suffix.lower()} instead?") from e
1776
+
1777
+ # Regular plain number.
1778
+ return int(value)