vllm-cpu-amxbf16 0.11.2.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1536) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +983 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +2863 -0
  6. vllm/_ipex_ops.py +457 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +59 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +18 -0
  14. vllm/attention/backends/__init__.py +0 -0
  15. vllm/attention/backends/abstract.py +391 -0
  16. vllm/attention/backends/registry.py +195 -0
  17. vllm/attention/backends/utils.py +33 -0
  18. vllm/attention/layer.py +1052 -0
  19. vllm/attention/layers/__init__.py +0 -0
  20. vllm/attention/layers/chunked_local_attention.py +121 -0
  21. vllm/attention/layers/cross_attention.py +178 -0
  22. vllm/attention/layers/encoder_only_attention.py +103 -0
  23. vllm/attention/ops/__init__.py +0 -0
  24. vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
  25. vllm/attention/ops/common.py +414 -0
  26. vllm/attention/ops/flashmla.py +251 -0
  27. vllm/attention/ops/merge_attn_states.py +47 -0
  28. vllm/attention/ops/paged_attn.py +262 -0
  29. vllm/attention/ops/pallas_kv_cache_update.py +130 -0
  30. vllm/attention/ops/prefix_prefill.py +814 -0
  31. vllm/attention/ops/rocm_aiter_paged_attn.py +123 -0
  32. vllm/attention/ops/triton_decode_attention.py +712 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +105 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
  35. vllm/attention/ops/triton_unified_attention.py +941 -0
  36. vllm/attention/ops/vit_attn_wrappers.py +178 -0
  37. vllm/attention/selector.py +231 -0
  38. vllm/attention/utils/__init__.py +0 -0
  39. vllm/attention/utils/fa_utils.py +109 -0
  40. vllm/attention/utils/kv_sharing_utils.py +33 -0
  41. vllm/attention/utils/kv_transfer_utils.py +60 -0
  42. vllm/beam_search.py +88 -0
  43. vllm/benchmarks/__init__.py +0 -0
  44. vllm/benchmarks/datasets.py +3222 -0
  45. vllm/benchmarks/latency.py +172 -0
  46. vllm/benchmarks/lib/__init__.py +3 -0
  47. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  48. vllm/benchmarks/lib/ready_checker.py +72 -0
  49. vllm/benchmarks/lib/utils.py +79 -0
  50. vllm/benchmarks/serve.py +1531 -0
  51. vllm/benchmarks/sweep/__init__.py +0 -0
  52. vllm/benchmarks/sweep/cli.py +38 -0
  53. vllm/benchmarks/sweep/param_sweep.py +91 -0
  54. vllm/benchmarks/sweep/plot.py +580 -0
  55. vllm/benchmarks/sweep/serve.py +416 -0
  56. vllm/benchmarks/sweep/serve_sla.py +492 -0
  57. vllm/benchmarks/sweep/server.py +114 -0
  58. vllm/benchmarks/sweep/sla_sweep.py +132 -0
  59. vllm/benchmarks/sweep/utils.py +4 -0
  60. vllm/benchmarks/throughput.py +799 -0
  61. vllm/collect_env.py +857 -0
  62. vllm/compilation/__init__.py +0 -0
  63. vllm/compilation/activation_quant_fusion.py +209 -0
  64. vllm/compilation/backends.py +759 -0
  65. vllm/compilation/base_static_graph.py +57 -0
  66. vllm/compilation/caching.py +178 -0
  67. vllm/compilation/collective_fusion.py +1234 -0
  68. vllm/compilation/compiler_interface.py +639 -0
  69. vllm/compilation/counter.py +48 -0
  70. vllm/compilation/cuda_graph.py +208 -0
  71. vllm/compilation/decorators.py +571 -0
  72. vllm/compilation/fix_functionalization.py +253 -0
  73. vllm/compilation/fusion.py +374 -0
  74. vllm/compilation/fusion_attn.py +359 -0
  75. vllm/compilation/fx_utils.py +91 -0
  76. vllm/compilation/inductor_pass.py +133 -0
  77. vllm/compilation/matcher_utils.py +317 -0
  78. vllm/compilation/monitor.py +62 -0
  79. vllm/compilation/noop_elimination.py +134 -0
  80. vllm/compilation/partition_rules.py +72 -0
  81. vllm/compilation/pass_manager.py +135 -0
  82. vllm/compilation/piecewise_backend.py +121 -0
  83. vllm/compilation/post_cleanup.py +21 -0
  84. vllm/compilation/qk_norm_rope_fusion.py +238 -0
  85. vllm/compilation/sequence_parallelism.py +363 -0
  86. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  87. vllm/compilation/vllm_inductor_pass.py +173 -0
  88. vllm/compilation/wrapper.py +238 -0
  89. vllm/config/__init__.py +102 -0
  90. vllm/config/cache.py +207 -0
  91. vllm/config/compilation.py +975 -0
  92. vllm/config/device.py +75 -0
  93. vllm/config/ec_transfer.py +110 -0
  94. vllm/config/kv_events.py +56 -0
  95. vllm/config/kv_transfer.py +114 -0
  96. vllm/config/load.py +124 -0
  97. vllm/config/lora.py +112 -0
  98. vllm/config/model.py +2162 -0
  99. vllm/config/multimodal.py +248 -0
  100. vllm/config/observability.py +123 -0
  101. vllm/config/parallel.py +655 -0
  102. vllm/config/pooler.py +122 -0
  103. vllm/config/scheduler.py +298 -0
  104. vllm/config/speculative.py +654 -0
  105. vllm/config/speech_to_text.py +38 -0
  106. vllm/config/structured_outputs.py +92 -0
  107. vllm/config/utils.py +178 -0
  108. vllm/config/vllm.py +1166 -0
  109. vllm/connections.py +189 -0
  110. vllm/device_allocator/__init__.py +0 -0
  111. vllm/device_allocator/cumem.py +327 -0
  112. vllm/distributed/__init__.py +6 -0
  113. vllm/distributed/communication_op.py +43 -0
  114. vllm/distributed/device_communicators/__init__.py +0 -0
  115. vllm/distributed/device_communicators/all2all.py +490 -0
  116. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  117. vllm/distributed/device_communicators/base_device_communicator.py +297 -0
  118. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  119. vllm/distributed/device_communicators/cuda_communicator.py +340 -0
  120. vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
  121. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  122. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  123. vllm/distributed/device_communicators/pynccl.py +386 -0
  124. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  125. vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
  126. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  127. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  128. vllm/distributed/device_communicators/shm_broadcast.py +733 -0
  129. vllm/distributed/device_communicators/shm_object_storage.py +660 -0
  130. vllm/distributed/device_communicators/symm_mem.py +156 -0
  131. vllm/distributed/device_communicators/tpu_communicator.py +107 -0
  132. vllm/distributed/device_communicators/xpu_communicator.py +95 -0
  133. vllm/distributed/ec_transfer/__init__.py +14 -0
  134. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  135. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  136. vllm/distributed/ec_transfer/ec_connector/factory.py +88 -0
  137. vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
  138. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  139. vllm/distributed/eplb/__init__.py +8 -0
  140. vllm/distributed/eplb/eplb_state.py +837 -0
  141. vllm/distributed/eplb/rebalance_algo.py +260 -0
  142. vllm/distributed/eplb/rebalance_execute.py +431 -0
  143. vllm/distributed/kv_events.py +371 -0
  144. vllm/distributed/kv_transfer/README.md +29 -0
  145. vllm/distributed/kv_transfer/__init__.py +20 -0
  146. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  147. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  149. vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
  150. vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/base.py +546 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +379 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
  159. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +867 -0
  160. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
  161. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
  162. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2440 -0
  163. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +504 -0
  164. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  165. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  166. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  167. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  168. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
  169. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  170. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
  171. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
  172. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
  173. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  174. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  175. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
  176. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
  177. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  178. vllm/distributed/parallel_state.py +1759 -0
  179. vllm/distributed/tpu_distributed_utils.py +188 -0
  180. vllm/distributed/utils.py +543 -0
  181. vllm/engine/__init__.py +0 -0
  182. vllm/engine/arg_utils.py +2144 -0
  183. vllm/engine/async_llm_engine.py +6 -0
  184. vllm/engine/llm_engine.py +6 -0
  185. vllm/engine/protocol.py +170 -0
  186. vllm/entrypoints/__init__.py +0 -0
  187. vllm/entrypoints/anthropic/__init__.py +0 -0
  188. vllm/entrypoints/anthropic/protocol.py +162 -0
  189. vllm/entrypoints/anthropic/serving_messages.py +460 -0
  190. vllm/entrypoints/api_server.py +184 -0
  191. vllm/entrypoints/chat_utils.py +1690 -0
  192. vllm/entrypoints/cli/__init__.py +13 -0
  193. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  194. vllm/entrypoints/cli/benchmark/base.py +25 -0
  195. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  196. vllm/entrypoints/cli/benchmark/main.py +56 -0
  197. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  198. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  199. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  200. vllm/entrypoints/cli/collect_env.py +38 -0
  201. vllm/entrypoints/cli/main.py +79 -0
  202. vllm/entrypoints/cli/openai.py +256 -0
  203. vllm/entrypoints/cli/run_batch.py +68 -0
  204. vllm/entrypoints/cli/serve.py +249 -0
  205. vllm/entrypoints/cli/types.py +29 -0
  206. vllm/entrypoints/constants.py +10 -0
  207. vllm/entrypoints/context.py +572 -0
  208. vllm/entrypoints/dynamic_lora.py +57 -0
  209. vllm/entrypoints/harmony_utils.py +535 -0
  210. vllm/entrypoints/launcher.py +175 -0
  211. vllm/entrypoints/llm.py +1768 -0
  212. vllm/entrypoints/logger.py +84 -0
  213. vllm/entrypoints/openai/__init__.py +0 -0
  214. vllm/entrypoints/openai/api_server.py +2096 -0
  215. vllm/entrypoints/openai/cli_args.py +302 -0
  216. vllm/entrypoints/openai/orca_metrics.py +120 -0
  217. vllm/entrypoints/openai/protocol.py +3299 -0
  218. vllm/entrypoints/openai/run_batch.py +547 -0
  219. vllm/entrypoints/openai/serving_chat.py +1772 -0
  220. vllm/entrypoints/openai/serving_classification.py +235 -0
  221. vllm/entrypoints/openai/serving_completion.py +715 -0
  222. vllm/entrypoints/openai/serving_embedding.py +695 -0
  223. vllm/entrypoints/openai/serving_engine.py +1433 -0
  224. vllm/entrypoints/openai/serving_models.py +304 -0
  225. vllm/entrypoints/openai/serving_pooling.py +346 -0
  226. vllm/entrypoints/openai/serving_responses.py +2021 -0
  227. vllm/entrypoints/openai/serving_score.py +503 -0
  228. vllm/entrypoints/openai/serving_tokenization.py +203 -0
  229. vllm/entrypoints/openai/serving_tokens.py +269 -0
  230. vllm/entrypoints/openai/serving_transcription.py +148 -0
  231. vllm/entrypoints/openai/speech_to_text.py +405 -0
  232. vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
  233. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
  234. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
  235. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
  236. vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
  237. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
  238. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  239. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
  240. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
  241. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  242. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
  243. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +323 -0
  244. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
  245. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  246. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +290 -0
  247. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
  248. vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
  249. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
  250. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
  251. vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
  252. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
  253. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
  254. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
  255. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
  256. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  257. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
  258. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
  259. vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
  260. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
  261. vllm/entrypoints/renderer.py +409 -0
  262. vllm/entrypoints/responses_utils.py +77 -0
  263. vllm/entrypoints/sagemaker/__init__.py +4 -0
  264. vllm/entrypoints/sagemaker/routes.py +72 -0
  265. vllm/entrypoints/score_utils.py +242 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +143 -0
  268. vllm/entrypoints/tool_server.py +209 -0
  269. vllm/entrypoints/utils.py +319 -0
  270. vllm/env_override.py +378 -0
  271. vllm/envs.py +1659 -0
  272. vllm/forward_context.py +356 -0
  273. vllm/inputs/__init__.py +44 -0
  274. vllm/inputs/data.py +359 -0
  275. vllm/inputs/parse.py +137 -0
  276. vllm/inputs/preprocess.py +727 -0
  277. vllm/logger.py +267 -0
  278. vllm/logging_utils/__init__.py +10 -0
  279. vllm/logging_utils/dump_input.py +83 -0
  280. vllm/logging_utils/formatter.py +77 -0
  281. vllm/logging_utils/log_time.py +34 -0
  282. vllm/logits_process.py +121 -0
  283. vllm/logprobs.py +208 -0
  284. vllm/lora/__init__.py +0 -0
  285. vllm/lora/layers/__init__.py +41 -0
  286. vllm/lora/layers/base.py +67 -0
  287. vllm/lora/layers/base_linear.py +164 -0
  288. vllm/lora/layers/column_parallel_linear.py +578 -0
  289. vllm/lora/layers/fused_moe.py +472 -0
  290. vllm/lora/layers/logits_processor.py +252 -0
  291. vllm/lora/layers/replicated_linear.py +70 -0
  292. vllm/lora/layers/row_parallel_linear.py +181 -0
  293. vllm/lora/layers/utils.py +65 -0
  294. vllm/lora/layers/vocal_parallel_embedding.py +166 -0
  295. vllm/lora/lora_weights.py +198 -0
  296. vllm/lora/models.py +890 -0
  297. vllm/lora/ops/__init__.py +0 -0
  298. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  299. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  300. vllm/lora/ops/torch_ops/__init__.py +20 -0
  301. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  302. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  303. vllm/lora/ops/triton_ops/__init__.py +21 -0
  304. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +641 -0
  305. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  306. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  307. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  308. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  309. vllm/lora/ops/triton_ops/utils.py +295 -0
  310. vllm/lora/ops/xla_ops/__init__.py +6 -0
  311. vllm/lora/ops/xla_ops/lora_ops.py +141 -0
  312. vllm/lora/peft_helper.py +128 -0
  313. vllm/lora/punica_wrapper/__init__.py +10 -0
  314. vllm/lora/punica_wrapper/punica_base.py +492 -0
  315. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  316. vllm/lora/punica_wrapper/punica_gpu.py +411 -0
  317. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  318. vllm/lora/punica_wrapper/punica_tpu.py +359 -0
  319. vllm/lora/punica_wrapper/punica_xpu.py +279 -0
  320. vllm/lora/punica_wrapper/utils.py +150 -0
  321. vllm/lora/request.py +100 -0
  322. vllm/lora/resolver.py +88 -0
  323. vllm/lora/utils.py +293 -0
  324. vllm/lora/worker_manager.py +279 -0
  325. vllm/model_executor/__init__.py +11 -0
  326. vllm/model_executor/custom_op.py +194 -0
  327. vllm/model_executor/layers/__init__.py +0 -0
  328. vllm/model_executor/layers/activation.py +569 -0
  329. vllm/model_executor/layers/attention_layer_base.py +35 -0
  330. vllm/model_executor/layers/batch_invariant.py +854 -0
  331. vllm/model_executor/layers/conv.py +236 -0
  332. vllm/model_executor/layers/fla/__init__.py +8 -0
  333. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  334. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  335. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  336. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  337. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  338. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  339. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  340. vllm/model_executor/layers/fla/ops/index.py +41 -0
  341. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  342. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  343. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  344. vllm/model_executor/layers/fla/ops/op.py +60 -0
  345. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  346. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  347. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  348. vllm/model_executor/layers/fused_moe/__init__.py +106 -0
  349. vllm/model_executor/layers/fused_moe/all2all_utils.py +160 -0
  350. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
  351. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
  352. vllm/model_executor/layers/fused_moe/config.py +916 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  625. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +354 -0
  626. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
  627. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
  628. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
  629. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  630. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +367 -0
  631. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
  632. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
  633. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  634. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
  635. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +792 -0
  636. vllm/model_executor/layers/fused_moe/fused_moe.py +2175 -0
  637. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +112 -0
  638. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +164 -0
  639. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +316 -0
  640. vllm/model_executor/layers/fused_moe/layer.py +1944 -0
  641. vllm/model_executor/layers/fused_moe/modular_kernel.py +1222 -0
  642. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
  643. vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
  644. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  645. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  646. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  647. vllm/model_executor/layers/fused_moe/prepare_finalize.py +77 -0
  648. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
  649. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  650. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +97 -0
  651. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  652. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
  653. vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
  654. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +578 -0
  655. vllm/model_executor/layers/fused_moe/utils.py +332 -0
  656. vllm/model_executor/layers/kda.py +448 -0
  657. vllm/model_executor/layers/layernorm.py +442 -0
  658. vllm/model_executor/layers/lightning_attn.py +729 -0
  659. vllm/model_executor/layers/linear.py +1424 -0
  660. vllm/model_executor/layers/logits_processor.py +106 -0
  661. vllm/model_executor/layers/mamba/__init__.py +0 -0
  662. vllm/model_executor/layers/mamba/abstract.py +71 -0
  663. vllm/model_executor/layers/mamba/linear_attn.py +402 -0
  664. vllm/model_executor/layers/mamba/mamba_mixer.py +535 -0
  665. vllm/model_executor/layers/mamba/mamba_mixer2.py +928 -0
  666. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  667. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  668. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  669. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  670. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
  671. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  672. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  673. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  674. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  675. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  676. vllm/model_executor/layers/mamba/short_conv.py +264 -0
  677. vllm/model_executor/layers/mla.py +168 -0
  678. vllm/model_executor/layers/pooler.py +817 -0
  679. vllm/model_executor/layers/quantization/__init__.py +174 -0
  680. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  681. vllm/model_executor/layers/quantization/awq.py +277 -0
  682. vllm/model_executor/layers/quantization/awq_marlin.py +659 -0
  683. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  684. vllm/model_executor/layers/quantization/base_config.py +170 -0
  685. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  686. vllm/model_executor/layers/quantization/bitsandbytes.py +658 -0
  687. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  688. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +914 -0
  689. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2284 -0
  690. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
  691. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  692. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  693. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  694. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  695. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  696. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
  697. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  698. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  699. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
  700. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  701. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +219 -0
  702. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  703. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  704. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  705. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  706. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  707. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  708. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  709. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  710. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  711. vllm/model_executor/layers/quantization/experts_int8.py +240 -0
  712. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  713. vllm/model_executor/layers/quantization/fp8.py +1333 -0
  714. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  715. vllm/model_executor/layers/quantization/gguf.py +643 -0
  716. vllm/model_executor/layers/quantization/gptq.py +393 -0
  717. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  718. vllm/model_executor/layers/quantization/gptq_marlin.py +789 -0
  719. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  720. vllm/model_executor/layers/quantization/hqq_marlin.py +371 -0
  721. vllm/model_executor/layers/quantization/inc.py +65 -0
  722. vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
  723. vllm/model_executor/layers/quantization/ipex_quant.py +467 -0
  724. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  725. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  726. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
  727. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  728. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  729. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  730. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
  731. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  732. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
  733. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  734. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +166 -0
  735. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
  736. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
  737. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  738. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
  739. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
  740. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
  741. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  742. vllm/model_executor/layers/quantization/kv_cache.py +146 -0
  743. vllm/model_executor/layers/quantization/modelopt.py +1788 -0
  744. vllm/model_executor/layers/quantization/moe_wna16.py +541 -0
  745. vllm/model_executor/layers/quantization/mxfp4.py +1162 -0
  746. vllm/model_executor/layers/quantization/petit.py +320 -0
  747. vllm/model_executor/layers/quantization/ptpc_fp8.py +137 -0
  748. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  749. vllm/model_executor/layers/quantization/quark/quark.py +528 -0
  750. vllm/model_executor/layers/quantization/quark/quark_moe.py +683 -0
  751. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  752. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +306 -0
  753. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  754. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  755. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  756. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  757. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  758. vllm/model_executor/layers/quantization/rtn.py +652 -0
  759. vllm/model_executor/layers/quantization/schema.py +90 -0
  760. vllm/model_executor/layers/quantization/torchao.py +380 -0
  761. vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
  762. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  763. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  764. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  976. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +89 -0
  977. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +298 -0
  978. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
  979. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  980. vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
  981. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  982. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  983. vllm/model_executor/layers/quantization/utils/marlin_utils.py +575 -0
  984. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +397 -0
  985. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +351 -0
  986. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +161 -0
  987. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  988. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +181 -0
  989. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  990. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  991. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  992. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +63 -0
  993. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  994. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  995. vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
  996. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
  997. vllm/model_executor/layers/resampler.py +283 -0
  998. vllm/model_executor/layers/rotary_embedding/__init__.py +278 -0
  999. vllm/model_executor/layers/rotary_embedding/base.py +235 -0
  1000. vllm/model_executor/layers/rotary_embedding/common.py +188 -0
  1001. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
  1002. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
  1003. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1004. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1005. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
  1006. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1007. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1008. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
  1009. vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
  1010. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1011. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1012. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +81 -0
  1013. vllm/model_executor/layers/utils.py +251 -0
  1014. vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
  1015. vllm/model_executor/model_loader/__init__.py +148 -0
  1016. vllm/model_executor/model_loader/base_loader.py +57 -0
  1017. vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
  1018. vllm/model_executor/model_loader/default_loader.py +327 -0
  1019. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1020. vllm/model_executor/model_loader/gguf_loader.py +176 -0
  1021. vllm/model_executor/model_loader/online_quantization.py +224 -0
  1022. vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
  1023. vllm/model_executor/model_loader/sharded_state_loader.py +206 -0
  1024. vllm/model_executor/model_loader/tensorizer.py +790 -0
  1025. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1026. vllm/model_executor/model_loader/tpu.py +118 -0
  1027. vllm/model_executor/model_loader/utils.py +288 -0
  1028. vllm/model_executor/model_loader/weight_utils.py +1084 -0
  1029. vllm/model_executor/models/__init__.py +44 -0
  1030. vllm/model_executor/models/adapters.py +543 -0
  1031. vllm/model_executor/models/afmoe.py +711 -0
  1032. vllm/model_executor/models/aimv2.py +247 -0
  1033. vllm/model_executor/models/apertus.py +587 -0
  1034. vllm/model_executor/models/arcee.py +439 -0
  1035. vllm/model_executor/models/arctic.py +635 -0
  1036. vllm/model_executor/models/aria.py +655 -0
  1037. vllm/model_executor/models/aya_vision.py +450 -0
  1038. vllm/model_executor/models/baichuan.py +496 -0
  1039. vllm/model_executor/models/bailing_moe.py +646 -0
  1040. vllm/model_executor/models/bamba.py +522 -0
  1041. vllm/model_executor/models/bee.py +157 -0
  1042. vllm/model_executor/models/bert.py +925 -0
  1043. vllm/model_executor/models/bert_with_rope.py +732 -0
  1044. vllm/model_executor/models/blip.py +349 -0
  1045. vllm/model_executor/models/blip2.py +695 -0
  1046. vllm/model_executor/models/bloom.py +390 -0
  1047. vllm/model_executor/models/chameleon.py +1120 -0
  1048. vllm/model_executor/models/chatglm.py +498 -0
  1049. vllm/model_executor/models/clip.py +965 -0
  1050. vllm/model_executor/models/cohere2_vision.py +472 -0
  1051. vllm/model_executor/models/commandr.py +473 -0
  1052. vllm/model_executor/models/config.py +503 -0
  1053. vllm/model_executor/models/dbrx.py +482 -0
  1054. vllm/model_executor/models/deepencoder.py +673 -0
  1055. vllm/model_executor/models/deepseek_eagle.py +260 -0
  1056. vllm/model_executor/models/deepseek_mtp.py +360 -0
  1057. vllm/model_executor/models/deepseek_ocr.py +593 -0
  1058. vllm/model_executor/models/deepseek_v2.py +1649 -0
  1059. vllm/model_executor/models/deepseek_vl2.py +655 -0
  1060. vllm/model_executor/models/dots1.py +574 -0
  1061. vllm/model_executor/models/dots_ocr.py +900 -0
  1062. vllm/model_executor/models/ernie45.py +53 -0
  1063. vllm/model_executor/models/ernie45_moe.py +759 -0
  1064. vllm/model_executor/models/ernie45_vl.py +1742 -0
  1065. vllm/model_executor/models/ernie45_vl_moe.py +803 -0
  1066. vllm/model_executor/models/ernie_mtp.py +279 -0
  1067. vllm/model_executor/models/exaone.py +545 -0
  1068. vllm/model_executor/models/exaone4.py +531 -0
  1069. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1070. vllm/model_executor/models/falcon.py +545 -0
  1071. vllm/model_executor/models/falcon_h1.py +685 -0
  1072. vllm/model_executor/models/flex_olmo.py +155 -0
  1073. vllm/model_executor/models/fuyu.py +373 -0
  1074. vllm/model_executor/models/gemma.py +426 -0
  1075. vllm/model_executor/models/gemma2.py +439 -0
  1076. vllm/model_executor/models/gemma3.py +571 -0
  1077. vllm/model_executor/models/gemma3_mm.py +741 -0
  1078. vllm/model_executor/models/gemma3n.py +1165 -0
  1079. vllm/model_executor/models/gemma3n_mm.py +811 -0
  1080. vllm/model_executor/models/glm.py +23 -0
  1081. vllm/model_executor/models/glm4.py +305 -0
  1082. vllm/model_executor/models/glm4_1v.py +1821 -0
  1083. vllm/model_executor/models/glm4_moe.py +747 -0
  1084. vllm/model_executor/models/glm4_moe_mtp.py +359 -0
  1085. vllm/model_executor/models/glm4v.py +784 -0
  1086. vllm/model_executor/models/gpt2.py +397 -0
  1087. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1088. vllm/model_executor/models/gpt_j.py +346 -0
  1089. vllm/model_executor/models/gpt_neox.py +344 -0
  1090. vllm/model_executor/models/gpt_oss.py +738 -0
  1091. vllm/model_executor/models/granite.py +516 -0
  1092. vllm/model_executor/models/granite_speech.py +913 -0
  1093. vllm/model_executor/models/granitemoe.py +569 -0
  1094. vllm/model_executor/models/granitemoehybrid.py +709 -0
  1095. vllm/model_executor/models/granitemoeshared.py +333 -0
  1096. vllm/model_executor/models/gritlm.py +245 -0
  1097. vllm/model_executor/models/grok1.py +558 -0
  1098. vllm/model_executor/models/h2ovl.py +554 -0
  1099. vllm/model_executor/models/hunyuan_v1.py +1053 -0
  1100. vllm/model_executor/models/hyperclovax_vision.py +1166 -0
  1101. vllm/model_executor/models/idefics2_vision_model.py +426 -0
  1102. vllm/model_executor/models/idefics3.py +717 -0
  1103. vllm/model_executor/models/interfaces.py +1092 -0
  1104. vllm/model_executor/models/interfaces_base.py +214 -0
  1105. vllm/model_executor/models/intern_vit.py +453 -0
  1106. vllm/model_executor/models/internlm2.py +460 -0
  1107. vllm/model_executor/models/internlm2_ve.py +142 -0
  1108. vllm/model_executor/models/interns1.py +830 -0
  1109. vllm/model_executor/models/interns1_vit.py +432 -0
  1110. vllm/model_executor/models/internvl.py +1452 -0
  1111. vllm/model_executor/models/jais.py +397 -0
  1112. vllm/model_executor/models/jamba.py +610 -0
  1113. vllm/model_executor/models/jina_vl.py +147 -0
  1114. vllm/model_executor/models/keye.py +1761 -0
  1115. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1116. vllm/model_executor/models/kimi_linear.py +663 -0
  1117. vllm/model_executor/models/kimi_vl.py +578 -0
  1118. vllm/model_executor/models/lfm2.py +532 -0
  1119. vllm/model_executor/models/lfm2_moe.py +762 -0
  1120. vllm/model_executor/models/lightonocr.py +195 -0
  1121. vllm/model_executor/models/llama.py +732 -0
  1122. vllm/model_executor/models/llama4.py +859 -0
  1123. vllm/model_executor/models/llama4_eagle.py +223 -0
  1124. vllm/model_executor/models/llama_eagle.py +218 -0
  1125. vllm/model_executor/models/llama_eagle3.py +367 -0
  1126. vllm/model_executor/models/llava.py +842 -0
  1127. vllm/model_executor/models/llava_next.py +583 -0
  1128. vllm/model_executor/models/llava_next_video.py +467 -0
  1129. vllm/model_executor/models/llava_onevision.py +923 -0
  1130. vllm/model_executor/models/longcat_flash.py +749 -0
  1131. vllm/model_executor/models/longcat_flash_mtp.py +349 -0
  1132. vllm/model_executor/models/mamba.py +276 -0
  1133. vllm/model_executor/models/mamba2.py +289 -0
  1134. vllm/model_executor/models/medusa.py +179 -0
  1135. vllm/model_executor/models/midashenglm.py +827 -0
  1136. vllm/model_executor/models/mimo.py +188 -0
  1137. vllm/model_executor/models/mimo_mtp.py +294 -0
  1138. vllm/model_executor/models/minicpm.py +664 -0
  1139. vllm/model_executor/models/minicpm3.py +242 -0
  1140. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1141. vllm/model_executor/models/minicpmo.py +768 -0
  1142. vllm/model_executor/models/minicpmv.py +1745 -0
  1143. vllm/model_executor/models/minimax_m2.py +552 -0
  1144. vllm/model_executor/models/minimax_text_01.py +1012 -0
  1145. vllm/model_executor/models/minimax_vl_01.py +396 -0
  1146. vllm/model_executor/models/mistral3.py +637 -0
  1147. vllm/model_executor/models/mixtral.py +621 -0
  1148. vllm/model_executor/models/mllama4.py +1147 -0
  1149. vllm/model_executor/models/mlp_speculator.py +235 -0
  1150. vllm/model_executor/models/modernbert.py +450 -0
  1151. vllm/model_executor/models/module_mapping.py +74 -0
  1152. vllm/model_executor/models/molmo.py +1555 -0
  1153. vllm/model_executor/models/moonvit.py +677 -0
  1154. vllm/model_executor/models/mpt.py +335 -0
  1155. vllm/model_executor/models/nano_nemotron_vl.py +1740 -0
  1156. vllm/model_executor/models/nemotron.py +518 -0
  1157. vllm/model_executor/models/nemotron_h.py +852 -0
  1158. vllm/model_executor/models/nemotron_nas.py +491 -0
  1159. vllm/model_executor/models/nemotron_vl.py +653 -0
  1160. vllm/model_executor/models/nvlm_d.py +216 -0
  1161. vllm/model_executor/models/olmo.py +414 -0
  1162. vllm/model_executor/models/olmo2.py +454 -0
  1163. vllm/model_executor/models/olmoe.py +498 -0
  1164. vllm/model_executor/models/openpangu.py +1062 -0
  1165. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1166. vllm/model_executor/models/opt.py +426 -0
  1167. vllm/model_executor/models/orion.py +372 -0
  1168. vllm/model_executor/models/ouro.py +516 -0
  1169. vllm/model_executor/models/ovis.py +559 -0
  1170. vllm/model_executor/models/ovis2_5.py +673 -0
  1171. vllm/model_executor/models/paddleocr_vl.py +1407 -0
  1172. vllm/model_executor/models/paligemma.py +412 -0
  1173. vllm/model_executor/models/persimmon.py +377 -0
  1174. vllm/model_executor/models/phi.py +374 -0
  1175. vllm/model_executor/models/phi3.py +18 -0
  1176. vllm/model_executor/models/phi3v.py +737 -0
  1177. vllm/model_executor/models/phi4_multimodal.py +1447 -0
  1178. vllm/model_executor/models/phi4mm.py +1253 -0
  1179. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1180. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1181. vllm/model_executor/models/phimoe.py +675 -0
  1182. vllm/model_executor/models/pixtral.py +1352 -0
  1183. vllm/model_executor/models/plamo2.py +981 -0
  1184. vllm/model_executor/models/qwen.py +368 -0
  1185. vllm/model_executor/models/qwen2.py +541 -0
  1186. vllm/model_executor/models/qwen2_5_omni_thinker.py +1246 -0
  1187. vllm/model_executor/models/qwen2_5_vl.py +1613 -0
  1188. vllm/model_executor/models/qwen2_audio.py +473 -0
  1189. vllm/model_executor/models/qwen2_moe.py +596 -0
  1190. vllm/model_executor/models/qwen2_rm.py +123 -0
  1191. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1192. vllm/model_executor/models/qwen3.py +336 -0
  1193. vllm/model_executor/models/qwen3_moe.py +744 -0
  1194. vllm/model_executor/models/qwen3_next.py +1395 -0
  1195. vllm/model_executor/models/qwen3_next_mtp.py +296 -0
  1196. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1721 -0
  1197. vllm/model_executor/models/qwen3_vl.py +1673 -0
  1198. vllm/model_executor/models/qwen3_vl_moe.py +415 -0
  1199. vllm/model_executor/models/qwen_vl.py +802 -0
  1200. vllm/model_executor/models/radio.py +555 -0
  1201. vllm/model_executor/models/registry.py +1155 -0
  1202. vllm/model_executor/models/roberta.py +259 -0
  1203. vllm/model_executor/models/rvl.py +107 -0
  1204. vllm/model_executor/models/seed_oss.py +497 -0
  1205. vllm/model_executor/models/siglip.py +1174 -0
  1206. vllm/model_executor/models/siglip2navit.py +724 -0
  1207. vllm/model_executor/models/skyworkr1v.py +953 -0
  1208. vllm/model_executor/models/smolvlm.py +38 -0
  1209. vllm/model_executor/models/solar.py +502 -0
  1210. vllm/model_executor/models/stablelm.py +359 -0
  1211. vllm/model_executor/models/starcoder2.py +367 -0
  1212. vllm/model_executor/models/step3_text.py +559 -0
  1213. vllm/model_executor/models/step3_vl.py +1148 -0
  1214. vllm/model_executor/models/swin.py +514 -0
  1215. vllm/model_executor/models/tarsier.py +619 -0
  1216. vllm/model_executor/models/telechat2.py +153 -0
  1217. vllm/model_executor/models/teleflm.py +78 -0
  1218. vllm/model_executor/models/terratorch.py +319 -0
  1219. vllm/model_executor/models/transformers/__init__.py +127 -0
  1220. vllm/model_executor/models/transformers/base.py +464 -0
  1221. vllm/model_executor/models/transformers/causal.py +65 -0
  1222. vllm/model_executor/models/transformers/legacy.py +90 -0
  1223. vllm/model_executor/models/transformers/moe.py +318 -0
  1224. vllm/model_executor/models/transformers/multimodal.py +411 -0
  1225. vllm/model_executor/models/transformers/pooling.py +119 -0
  1226. vllm/model_executor/models/transformers/utils.py +207 -0
  1227. vllm/model_executor/models/ultravox.py +681 -0
  1228. vllm/model_executor/models/utils.py +877 -0
  1229. vllm/model_executor/models/vision.py +552 -0
  1230. vllm/model_executor/models/voxtral.py +845 -0
  1231. vllm/model_executor/models/whisper.py +959 -0
  1232. vllm/model_executor/models/zamba2.py +986 -0
  1233. vllm/model_executor/parameter.py +642 -0
  1234. vllm/model_executor/utils.py +94 -0
  1235. vllm/model_executor/warmup/__init__.py +0 -0
  1236. vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
  1237. vllm/model_executor/warmup/kernel_warmup.py +98 -0
  1238. vllm/multimodal/__init__.py +40 -0
  1239. vllm/multimodal/audio.py +118 -0
  1240. vllm/multimodal/base.py +26 -0
  1241. vllm/multimodal/cache.py +755 -0
  1242. vllm/multimodal/evs.py +294 -0
  1243. vllm/multimodal/hasher.py +106 -0
  1244. vllm/multimodal/image.py +130 -0
  1245. vllm/multimodal/inputs.py +1036 -0
  1246. vllm/multimodal/parse.py +544 -0
  1247. vllm/multimodal/processing.py +2186 -0
  1248. vllm/multimodal/profiling.py +369 -0
  1249. vllm/multimodal/registry.py +360 -0
  1250. vllm/multimodal/utils.py +512 -0
  1251. vllm/multimodal/video.py +306 -0
  1252. vllm/outputs.py +345 -0
  1253. vllm/platforms/__init__.py +277 -0
  1254. vllm/platforms/cpu.py +414 -0
  1255. vllm/platforms/cuda.py +657 -0
  1256. vllm/platforms/interface.py +639 -0
  1257. vllm/platforms/rocm.py +466 -0
  1258. vllm/platforms/tpu.py +276 -0
  1259. vllm/platforms/xpu.py +274 -0
  1260. vllm/plugins/__init__.py +78 -0
  1261. vllm/plugins/io_processors/__init__.py +68 -0
  1262. vllm/plugins/io_processors/interface.py +77 -0
  1263. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1264. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1265. vllm/pooling_params.py +228 -0
  1266. vllm/profiler/__init__.py +0 -0
  1267. vllm/profiler/gpu_profiler.py +37 -0
  1268. vllm/profiler/layerwise_profile.py +392 -0
  1269. vllm/profiler/utils.py +151 -0
  1270. vllm/py.typed +2 -0
  1271. vllm/ray/__init__.py +0 -0
  1272. vllm/ray/lazy_utils.py +26 -0
  1273. vllm/ray/ray_env.py +79 -0
  1274. vllm/reasoning/__init__.py +92 -0
  1275. vllm/reasoning/abs_reasoning_parsers.py +290 -0
  1276. vllm/reasoning/basic_parsers.py +162 -0
  1277. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1278. vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
  1279. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1280. vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
  1281. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1282. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1283. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1284. vllm/reasoning/identity_reasoning_parser.py +58 -0
  1285. vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
  1286. vllm/reasoning/mistral_reasoning_parser.py +55 -0
  1287. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1288. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1289. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1290. vllm/reasoning/step3_reasoning_parser.py +107 -0
  1291. vllm/sampling_params.py +669 -0
  1292. vllm/scalar_type.py +355 -0
  1293. vllm/scripts.py +17 -0
  1294. vllm/sequence.py +98 -0
  1295. vllm/tasks.py +13 -0
  1296. vllm/third_party/__init__.py +0 -0
  1297. vllm/third_party/pynvml.py +6140 -0
  1298. vllm/tracing.py +135 -0
  1299. vllm/transformers_utils/__init__.py +26 -0
  1300. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1301. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1302. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1303. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1304. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1305. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1306. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1307. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1308. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1309. vllm/transformers_utils/config.py +1203 -0
  1310. vllm/transformers_utils/config_parser_base.py +20 -0
  1311. vllm/transformers_utils/configs/__init__.py +70 -0
  1312. vllm/transformers_utils/configs/afmoe.py +84 -0
  1313. vllm/transformers_utils/configs/arctic.py +206 -0
  1314. vllm/transformers_utils/configs/chatglm.py +75 -0
  1315. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1316. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1317. vllm/transformers_utils/configs/eagle.py +84 -0
  1318. vllm/transformers_utils/configs/falcon.py +89 -0
  1319. vllm/transformers_utils/configs/flex_olmo.py +77 -0
  1320. vllm/transformers_utils/configs/jais.py +243 -0
  1321. vllm/transformers_utils/configs/kimi_linear.py +144 -0
  1322. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1323. vllm/transformers_utils/configs/lfm2_moe.py +159 -0
  1324. vllm/transformers_utils/configs/medusa.py +65 -0
  1325. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1326. vllm/transformers_utils/configs/mistral.py +174 -0
  1327. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1328. vllm/transformers_utils/configs/moonvit.py +33 -0
  1329. vllm/transformers_utils/configs/nemotron.py +212 -0
  1330. vllm/transformers_utils/configs/nemotron_h.py +282 -0
  1331. vllm/transformers_utils/configs/olmo3.py +79 -0
  1332. vllm/transformers_utils/configs/ovis.py +182 -0
  1333. vllm/transformers_utils/configs/qwen3_next.py +274 -0
  1334. vllm/transformers_utils/configs/radio.py +89 -0
  1335. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1336. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1337. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1338. vllm/transformers_utils/configs/step3_vl.py +174 -0
  1339. vllm/transformers_utils/configs/ultravox.py +118 -0
  1340. vllm/transformers_utils/detokenizer_utils.py +198 -0
  1341. vllm/transformers_utils/dynamic_module.py +59 -0
  1342. vllm/transformers_utils/processor.py +402 -0
  1343. vllm/transformers_utils/processors/__init__.py +15 -0
  1344. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1345. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1346. vllm/transformers_utils/processors/ovis.py +453 -0
  1347. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1348. vllm/transformers_utils/runai_utils.py +104 -0
  1349. vllm/transformers_utils/s3_utils.py +95 -0
  1350. vllm/transformers_utils/tokenizer.py +293 -0
  1351. vllm/transformers_utils/tokenizer_base.py +155 -0
  1352. vllm/transformers_utils/tokenizers/__init__.py +16 -0
  1353. vllm/transformers_utils/tokenizers/mistral.py +502 -0
  1354. vllm/transformers_utils/utils.py +130 -0
  1355. vllm/triton_utils/__init__.py +19 -0
  1356. vllm/triton_utils/importing.py +103 -0
  1357. vllm/usage/__init__.py +0 -0
  1358. vllm/usage/usage_lib.py +294 -0
  1359. vllm/utils/__init__.py +82 -0
  1360. vllm/utils/argparse_utils.py +487 -0
  1361. vllm/utils/async_utils.py +303 -0
  1362. vllm/utils/cache.py +214 -0
  1363. vllm/utils/collection_utils.py +139 -0
  1364. vllm/utils/counter.py +45 -0
  1365. vllm/utils/deep_gemm.py +391 -0
  1366. vllm/utils/flashinfer.py +490 -0
  1367. vllm/utils/func_utils.py +236 -0
  1368. vllm/utils/gc_utils.py +147 -0
  1369. vllm/utils/hashing.py +63 -0
  1370. vllm/utils/import_utils.py +411 -0
  1371. vllm/utils/jsontree.py +165 -0
  1372. vllm/utils/math_utils.py +32 -0
  1373. vllm/utils/mem_constants.py +13 -0
  1374. vllm/utils/mem_utils.py +232 -0
  1375. vllm/utils/nccl.py +64 -0
  1376. vllm/utils/network_utils.py +331 -0
  1377. vllm/utils/platform_utils.py +59 -0
  1378. vllm/utils/profiling.py +56 -0
  1379. vllm/utils/registry.py +49 -0
  1380. vllm/utils/serial_utils.py +169 -0
  1381. vllm/utils/system_utils.py +229 -0
  1382. vllm/utils/tensor_schema.py +255 -0
  1383. vllm/utils/torch_utils.py +657 -0
  1384. vllm/v1/__init__.py +0 -0
  1385. vllm/v1/attention/__init__.py +0 -0
  1386. vllm/v1/attention/backends/__init__.py +0 -0
  1387. vllm/v1/attention/backends/cpu_attn.py +496 -0
  1388. vllm/v1/attention/backends/flash_attn.py +1028 -0
  1389. vllm/v1/attention/backends/flashinfer.py +1572 -0
  1390. vllm/v1/attention/backends/flex_attention.py +926 -0
  1391. vllm/v1/attention/backends/gdn_attn.py +387 -0
  1392. vllm/v1/attention/backends/linear_attn.py +74 -0
  1393. vllm/v1/attention/backends/mamba1_attn.py +165 -0
  1394. vllm/v1/attention/backends/mamba2_attn.py +354 -0
  1395. vllm/v1/attention/backends/mamba_attn.py +115 -0
  1396. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1397. vllm/v1/attention/backends/mla/common.py +2031 -0
  1398. vllm/v1/attention/backends/mla/cutlass_mla.py +275 -0
  1399. vllm/v1/attention/backends/mla/flashattn_mla.py +337 -0
  1400. vllm/v1/attention/backends/mla/flashinfer_mla.py +171 -0
  1401. vllm/v1/attention/backends/mla/flashmla.py +314 -0
  1402. vllm/v1/attention/backends/mla/flashmla_sparse.py +548 -0
  1403. vllm/v1/attention/backends/mla/indexer.py +362 -0
  1404. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +294 -0
  1405. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1406. vllm/v1/attention/backends/pallas.py +436 -0
  1407. vllm/v1/attention/backends/rocm_aiter_fa.py +816 -0
  1408. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +196 -0
  1409. vllm/v1/attention/backends/rocm_attn.py +362 -0
  1410. vllm/v1/attention/backends/short_conv_attn.py +105 -0
  1411. vllm/v1/attention/backends/tree_attn.py +425 -0
  1412. vllm/v1/attention/backends/triton_attn.py +373 -0
  1413. vllm/v1/attention/backends/utils.py +1116 -0
  1414. vllm/v1/attention/backends/xformers.py +417 -0
  1415. vllm/v1/core/__init__.py +0 -0
  1416. vllm/v1/core/block_pool.py +428 -0
  1417. vllm/v1/core/encoder_cache_manager.py +343 -0
  1418. vllm/v1/core/kv_cache_coordinator.py +480 -0
  1419. vllm/v1/core/kv_cache_manager.py +420 -0
  1420. vllm/v1/core/kv_cache_utils.py +1340 -0
  1421. vllm/v1/core/sched/__init__.py +0 -0
  1422. vllm/v1/core/sched/async_scheduler.py +62 -0
  1423. vllm/v1/core/sched/interface.py +181 -0
  1424. vllm/v1/core/sched/output.py +202 -0
  1425. vllm/v1/core/sched/request_queue.py +221 -0
  1426. vllm/v1/core/sched/scheduler.py +1617 -0
  1427. vllm/v1/core/sched/utils.py +72 -0
  1428. vllm/v1/core/single_type_kv_cache_manager.py +736 -0
  1429. vllm/v1/cudagraph_dispatcher.py +148 -0
  1430. vllm/v1/engine/__init__.py +206 -0
  1431. vllm/v1/engine/async_llm.py +797 -0
  1432. vllm/v1/engine/coordinator.py +377 -0
  1433. vllm/v1/engine/core.py +1420 -0
  1434. vllm/v1/engine/core_client.py +1400 -0
  1435. vllm/v1/engine/detokenizer.py +351 -0
  1436. vllm/v1/engine/exceptions.py +18 -0
  1437. vllm/v1/engine/llm_engine.py +408 -0
  1438. vllm/v1/engine/logprobs.py +182 -0
  1439. vllm/v1/engine/output_processor.py +642 -0
  1440. vllm/v1/engine/parallel_sampling.py +145 -0
  1441. vllm/v1/engine/processor.py +621 -0
  1442. vllm/v1/engine/utils.py +1072 -0
  1443. vllm/v1/executor/__init__.py +6 -0
  1444. vllm/v1/executor/abstract.py +352 -0
  1445. vllm/v1/executor/multiproc_executor.py +877 -0
  1446. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1447. vllm/v1/executor/ray_executor.py +626 -0
  1448. vllm/v1/executor/ray_utils.py +465 -0
  1449. vllm/v1/executor/uniproc_executor.py +183 -0
  1450. vllm/v1/kv_cache_interface.py +403 -0
  1451. vllm/v1/kv_offload/__init__.py +0 -0
  1452. vllm/v1/kv_offload/abstract.py +161 -0
  1453. vllm/v1/kv_offload/arc_manager.py +237 -0
  1454. vllm/v1/kv_offload/backend.py +97 -0
  1455. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1456. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1457. vllm/v1/kv_offload/cpu.py +93 -0
  1458. vllm/v1/kv_offload/factory.py +56 -0
  1459. vllm/v1/kv_offload/lru_manager.py +139 -0
  1460. vllm/v1/kv_offload/mediums.py +39 -0
  1461. vllm/v1/kv_offload/spec.py +62 -0
  1462. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1463. vllm/v1/kv_offload/worker/cpu_gpu.py +185 -0
  1464. vllm/v1/kv_offload/worker/worker.py +144 -0
  1465. vllm/v1/metrics/__init__.py +0 -0
  1466. vllm/v1/metrics/loggers.py +1238 -0
  1467. vllm/v1/metrics/prometheus.py +82 -0
  1468. vllm/v1/metrics/ray_wrappers.py +169 -0
  1469. vllm/v1/metrics/reader.py +257 -0
  1470. vllm/v1/metrics/stats.py +420 -0
  1471. vllm/v1/outputs.py +249 -0
  1472. vllm/v1/pool/__init__.py +0 -0
  1473. vllm/v1/pool/metadata.py +82 -0
  1474. vllm/v1/request.py +259 -0
  1475. vllm/v1/sample/__init__.py +0 -0
  1476. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1477. vllm/v1/sample/logits_processor/builtin.py +274 -0
  1478. vllm/v1/sample/logits_processor/interface.py +106 -0
  1479. vllm/v1/sample/logits_processor/state.py +165 -0
  1480. vllm/v1/sample/metadata.py +44 -0
  1481. vllm/v1/sample/ops/__init__.py +0 -0
  1482. vllm/v1/sample/ops/bad_words.py +52 -0
  1483. vllm/v1/sample/ops/logprobs.py +25 -0
  1484. vllm/v1/sample/ops/penalties.py +57 -0
  1485. vllm/v1/sample/ops/topk_topp_sampler.py +290 -0
  1486. vllm/v1/sample/rejection_sampler.py +793 -0
  1487. vllm/v1/sample/sampler.py +316 -0
  1488. vllm/v1/sample/tpu/__init__.py +0 -0
  1489. vllm/v1/sample/tpu/metadata.py +120 -0
  1490. vllm/v1/sample/tpu/sampler.py +215 -0
  1491. vllm/v1/serial_utils.py +532 -0
  1492. vllm/v1/spec_decode/__init__.py +0 -0
  1493. vllm/v1/spec_decode/eagle.py +1225 -0
  1494. vllm/v1/spec_decode/medusa.py +73 -0
  1495. vllm/v1/spec_decode/metadata.py +66 -0
  1496. vllm/v1/spec_decode/metrics.py +224 -0
  1497. vllm/v1/spec_decode/ngram_proposer.py +291 -0
  1498. vllm/v1/spec_decode/suffix_decoding.py +103 -0
  1499. vllm/v1/spec_decode/utils.py +16 -0
  1500. vllm/v1/structured_output/__init__.py +338 -0
  1501. vllm/v1/structured_output/backend_guidance.py +265 -0
  1502. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1503. vllm/v1/structured_output/backend_outlines.py +324 -0
  1504. vllm/v1/structured_output/backend_types.py +136 -0
  1505. vllm/v1/structured_output/backend_xgrammar.py +362 -0
  1506. vllm/v1/structured_output/request.py +94 -0
  1507. vllm/v1/structured_output/utils.py +469 -0
  1508. vllm/v1/utils.py +414 -0
  1509. vllm/v1/worker/__init__.py +0 -0
  1510. vllm/v1/worker/block_table.py +327 -0
  1511. vllm/v1/worker/cpu_model_runner.py +122 -0
  1512. vllm/v1/worker/cpu_worker.py +206 -0
  1513. vllm/v1/worker/dp_utils.py +230 -0
  1514. vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
  1515. vllm/v1/worker/gpu_input_batch.py +975 -0
  1516. vllm/v1/worker/gpu_model_runner.py +5102 -0
  1517. vllm/v1/worker/gpu_ubatch_wrapper.py +466 -0
  1518. vllm/v1/worker/gpu_worker.py +894 -0
  1519. vllm/v1/worker/kv_connector_model_runner_mixin.py +144 -0
  1520. vllm/v1/worker/lora_model_runner_mixin.py +213 -0
  1521. vllm/v1/worker/tpu_input_batch.py +593 -0
  1522. vllm/v1/worker/tpu_model_runner.py +2173 -0
  1523. vllm/v1/worker/tpu_worker.py +355 -0
  1524. vllm/v1/worker/ubatch_utils.py +73 -0
  1525. vllm/v1/worker/ubatching.py +231 -0
  1526. vllm/v1/worker/utils.py +366 -0
  1527. vllm/v1/worker/worker_base.py +375 -0
  1528. vllm/v1/worker/xpu_model_runner.py +55 -0
  1529. vllm/v1/worker/xpu_worker.py +189 -0
  1530. vllm/version.py +39 -0
  1531. vllm/vllm_flash_attn/.gitkeep +0 -0
  1532. vllm_cpu_amxbf16-0.11.2.post2.dist-info/METADATA +345 -0
  1533. vllm_cpu_amxbf16-0.11.2.post2.dist-info/RECORD +1536 -0
  1534. vllm_cpu_amxbf16-0.11.2.post2.dist-info/WHEEL +5 -0
  1535. vllm_cpu_amxbf16-0.11.2.post2.dist-info/entry_points.txt +5 -0
  1536. vllm_cpu_amxbf16-0.11.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2096 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import asyncio
4
+ import hashlib
5
+ import importlib
6
+ import inspect
7
+ import json
8
+ import multiprocessing
9
+ import multiprocessing.forkserver as forkserver
10
+ import os
11
+ import secrets
12
+ import signal
13
+ import socket
14
+ import tempfile
15
+ import uuid
16
+ from argparse import Namespace
17
+ from collections.abc import AsyncGenerator, AsyncIterator, Awaitable, Callable
18
+ from contextlib import asynccontextmanager
19
+ from http import HTTPStatus
20
+ from typing import Annotated, Any, Literal
21
+
22
+ import model_hosting_container_standards.sagemaker as sagemaker_standards
23
+ import prometheus_client
24
+ import pydantic
25
+ import regex as re
26
+ import uvloop
27
+ from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Query, Request
28
+ from fastapi.exceptions import RequestValidationError
29
+ from fastapi.middleware.cors import CORSMiddleware
30
+ from fastapi.responses import JSONResponse, Response, StreamingResponse
31
+ from prometheus_client import make_asgi_app
32
+ from prometheus_fastapi_instrumentator import Instrumentator
33
+ from starlette.concurrency import iterate_in_threadpool
34
+ from starlette.datastructures import URL, Headers, MutableHeaders, State
35
+ from starlette.routing import Mount
36
+ from starlette.types import ASGIApp, Message, Receive, Scope, Send
37
+ from typing_extensions import assert_never
38
+
39
+ import vllm.envs as envs
40
+ from vllm.config import VllmConfig
41
+ from vllm.engine.arg_utils import AsyncEngineArgs
42
+ from vllm.engine.protocol import EngineClient
43
+ from vllm.entrypoints.anthropic.protocol import (
44
+ AnthropicError,
45
+ AnthropicErrorResponse,
46
+ AnthropicMessagesRequest,
47
+ AnthropicMessagesResponse,
48
+ )
49
+ from vllm.entrypoints.anthropic.serving_messages import AnthropicServingMessages
50
+ from vllm.entrypoints.launcher import serve_http
51
+ from vllm.entrypoints.logger import RequestLogger
52
+ from vllm.entrypoints.openai.cli_args import make_arg_parser, validate_parsed_serve_args
53
+ from vllm.entrypoints.openai.orca_metrics import metrics_header
54
+ from vllm.entrypoints.openai.protocol import (
55
+ ChatCompletionRequest,
56
+ ChatCompletionResponse,
57
+ ClassificationRequest,
58
+ ClassificationResponse,
59
+ CompletionRequest,
60
+ CompletionResponse,
61
+ DetokenizeRequest,
62
+ DetokenizeResponse,
63
+ EmbeddingBytesResponse,
64
+ EmbeddingRequest,
65
+ EmbeddingResponse,
66
+ ErrorInfo,
67
+ ErrorResponse,
68
+ GenerateRequest,
69
+ GenerateResponse,
70
+ IOProcessorResponse,
71
+ PoolingBytesResponse,
72
+ PoolingRequest,
73
+ PoolingResponse,
74
+ RerankRequest,
75
+ RerankResponse,
76
+ ResponsesRequest,
77
+ ResponsesResponse,
78
+ ScoreRequest,
79
+ ScoreResponse,
80
+ StreamingResponsesResponse,
81
+ TokenizeRequest,
82
+ TokenizeResponse,
83
+ TranscriptionRequest,
84
+ TranscriptionResponse,
85
+ TranslationRequest,
86
+ TranslationResponse,
87
+ )
88
+ from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
89
+ from vllm.entrypoints.openai.serving_classification import ServingClassification
90
+ from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
91
+ from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding
92
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
93
+ from vllm.entrypoints.openai.serving_models import (
94
+ BaseModelPath,
95
+ OpenAIServingModels,
96
+ )
97
+ from vllm.entrypoints.openai.serving_pooling import OpenAIServingPooling
98
+ from vllm.entrypoints.openai.serving_responses import OpenAIServingResponses
99
+ from vllm.entrypoints.openai.serving_score import ServingScores
100
+ from vllm.entrypoints.openai.serving_tokenization import OpenAIServingTokenization
101
+ from vllm.entrypoints.openai.serving_tokens import ServingTokens
102
+ from vllm.entrypoints.openai.serving_transcription import (
103
+ OpenAIServingTranscription,
104
+ OpenAIServingTranslation,
105
+ )
106
+ from vllm.entrypoints.openai.tool_parsers import ToolParserManager
107
+ from vllm.entrypoints.tool_server import DemoToolServer, MCPToolServer, ToolServer
108
+ from vllm.entrypoints.utils import (
109
+ cli_env_setup,
110
+ load_aware_call,
111
+ log_non_default_args,
112
+ process_chat_template,
113
+ process_lora_modules,
114
+ with_cancellation,
115
+ )
116
+ from vllm.logger import init_logger
117
+ from vllm.reasoning import ReasoningParserManager
118
+ from vllm.tasks import POOLING_TASKS
119
+ from vllm.usage.usage_lib import UsageContext
120
+ from vllm.utils.argparse_utils import FlexibleArgumentParser
121
+ from vllm.utils.gc_utils import freeze_gc_heap
122
+ from vllm.utils.network_utils import is_valid_ipv6_address
123
+ from vllm.utils.system_utils import decorate_logs, set_ulimit
124
+ from vllm.v1.engine.exceptions import EngineDeadError
125
+ from vllm.v1.metrics.prometheus import get_prometheus_registry
126
+ from vllm.version import __version__ as VLLM_VERSION
127
+
128
+ prometheus_multiproc_dir: tempfile.TemporaryDirectory
129
+
130
+ # Cannot use __name__ (https://github.com/vllm-project/vllm/pull/4765)
131
+ logger = init_logger("vllm.entrypoints.openai.api_server")
132
+
133
+ ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL = "endpoint-load-metrics-format"
134
+
135
+ _running_tasks: set[asyncio.Task] = set()
136
+
137
+
138
+ @asynccontextmanager
139
+ async def lifespan(app: FastAPI):
140
+ try:
141
+ if app.state.log_stats:
142
+ engine_client: EngineClient = app.state.engine_client
143
+
144
+ async def _force_log():
145
+ while True:
146
+ await asyncio.sleep(envs.VLLM_LOG_STATS_INTERVAL)
147
+ await engine_client.do_log_stats()
148
+
149
+ task = asyncio.create_task(_force_log())
150
+ _running_tasks.add(task)
151
+ task.add_done_callback(_running_tasks.remove)
152
+ else:
153
+ task = None
154
+
155
+ # Mark the startup heap as static so that it's ignored by GC.
156
+ # Reduces pause times of oldest generation collections.
157
+ freeze_gc_heap()
158
+ try:
159
+ yield
160
+ finally:
161
+ if task is not None:
162
+ task.cancel()
163
+ finally:
164
+ # Ensure app state including engine ref is gc'd
165
+ del app.state
166
+
167
+
168
+ @asynccontextmanager
169
+ async def build_async_engine_client(
170
+ args: Namespace,
171
+ *,
172
+ usage_context: UsageContext = UsageContext.OPENAI_API_SERVER,
173
+ disable_frontend_multiprocessing: bool | None = None,
174
+ client_config: dict[str, Any] | None = None,
175
+ ) -> AsyncIterator[EngineClient]:
176
+ if os.getenv("VLLM_WORKER_MULTIPROC_METHOD") == "forkserver":
177
+ # The executor is expected to be mp.
178
+ # Pre-import heavy modules in the forkserver process
179
+ logger.debug("Setup forkserver with pre-imports")
180
+ multiprocessing.set_start_method("forkserver")
181
+ multiprocessing.set_forkserver_preload(["vllm.v1.engine.async_llm"])
182
+ forkserver.ensure_running()
183
+ logger.debug("Forkserver setup complete!")
184
+
185
+ # Context manager to handle engine_client lifecycle
186
+ # Ensures everything is shutdown and cleaned up on error/exit
187
+ engine_args = AsyncEngineArgs.from_cli_args(args)
188
+ if client_config:
189
+ engine_args._api_process_count = client_config.get("client_count", 1)
190
+ engine_args._api_process_rank = client_config.get("client_index", 0)
191
+
192
+ if disable_frontend_multiprocessing is None:
193
+ disable_frontend_multiprocessing = bool(args.disable_frontend_multiprocessing)
194
+
195
+ async with build_async_engine_client_from_engine_args(
196
+ engine_args,
197
+ usage_context=usage_context,
198
+ disable_frontend_multiprocessing=disable_frontend_multiprocessing,
199
+ client_config=client_config,
200
+ ) as engine:
201
+ yield engine
202
+
203
+
204
+ @asynccontextmanager
205
+ async def build_async_engine_client_from_engine_args(
206
+ engine_args: AsyncEngineArgs,
207
+ *,
208
+ usage_context: UsageContext = UsageContext.OPENAI_API_SERVER,
209
+ disable_frontend_multiprocessing: bool = False,
210
+ client_config: dict[str, Any] | None = None,
211
+ ) -> AsyncIterator[EngineClient]:
212
+ """
213
+ Create EngineClient, either:
214
+ - in-process using the AsyncLLMEngine Directly
215
+ - multiprocess using AsyncLLMEngine RPC
216
+
217
+ Returns the Client or None if the creation failed.
218
+ """
219
+
220
+ # Create the EngineConfig (determines if we can use V1).
221
+ vllm_config = engine_args.create_engine_config(usage_context=usage_context)
222
+
223
+ if disable_frontend_multiprocessing:
224
+ logger.warning("V1 is enabled, but got --disable-frontend-multiprocessing.")
225
+
226
+ from vllm.v1.engine.async_llm import AsyncLLM
227
+
228
+ async_llm: AsyncLLM | None = None
229
+
230
+ # Don't mutate the input client_config
231
+ client_config = dict(client_config) if client_config else {}
232
+ client_count = client_config.pop("client_count", 1)
233
+ client_index = client_config.pop("client_index", 0)
234
+
235
+ try:
236
+ async_llm = AsyncLLM.from_vllm_config(
237
+ vllm_config=vllm_config,
238
+ usage_context=usage_context,
239
+ enable_log_requests=engine_args.enable_log_requests,
240
+ aggregate_engine_logging=engine_args.aggregate_engine_logging,
241
+ disable_log_stats=engine_args.disable_log_stats,
242
+ client_addresses=client_config,
243
+ client_count=client_count,
244
+ client_index=client_index,
245
+ )
246
+
247
+ # Don't keep the dummy data in memory
248
+ assert async_llm is not None
249
+ await async_llm.reset_mm_cache()
250
+
251
+ yield async_llm
252
+ finally:
253
+ if async_llm:
254
+ async_llm.shutdown()
255
+
256
+
257
+ async def validate_json_request(raw_request: Request):
258
+ content_type = raw_request.headers.get("content-type", "").lower()
259
+ media_type = content_type.split(";", maxsplit=1)[0]
260
+ if media_type != "application/json":
261
+ raise RequestValidationError(
262
+ errors=["Unsupported Media Type: Only 'application/json' is allowed"]
263
+ )
264
+
265
+
266
+ router = APIRouter()
267
+
268
+
269
+ class PrometheusResponse(Response):
270
+ media_type = prometheus_client.CONTENT_TYPE_LATEST
271
+
272
+
273
+ def mount_metrics(app: FastAPI):
274
+ """Mount prometheus metrics to a FastAPI app."""
275
+
276
+ registry = get_prometheus_registry()
277
+
278
+ # `response_class=PrometheusResponse` is needed to return an HTTP response
279
+ # with header "Content-Type: text/plain; version=0.0.4; charset=utf-8"
280
+ # instead of the default "application/json" which is incorrect.
281
+ # See https://github.com/trallnag/prometheus-fastapi-instrumentator/issues/163#issue-1296092364
282
+ Instrumentator(
283
+ excluded_handlers=[
284
+ "/metrics",
285
+ "/health",
286
+ "/load",
287
+ "/ping",
288
+ "/version",
289
+ "/server_info",
290
+ ],
291
+ registry=registry,
292
+ ).add().instrument(app).expose(app, response_class=PrometheusResponse)
293
+
294
+ # Add prometheus asgi middleware to route /metrics requests
295
+ metrics_route = Mount("/metrics", make_asgi_app(registry=registry))
296
+
297
+ # Workaround for 307 Redirect for /metrics
298
+ metrics_route.path_regex = re.compile("^/metrics(?P<path>.*)$")
299
+ app.routes.append(metrics_route)
300
+
301
+
302
+ def base(request: Request) -> OpenAIServing:
303
+ # Reuse the existing instance
304
+ return tokenization(request)
305
+
306
+
307
+ def models(request: Request) -> OpenAIServingModels:
308
+ return request.app.state.openai_serving_models
309
+
310
+
311
+ def responses(request: Request) -> OpenAIServingResponses | None:
312
+ return request.app.state.openai_serving_responses
313
+
314
+
315
+ def messages(request: Request) -> AnthropicServingMessages:
316
+ return request.app.state.anthropic_serving_messages
317
+
318
+
319
+ def chat(request: Request) -> OpenAIServingChat | None:
320
+ return request.app.state.openai_serving_chat
321
+
322
+
323
+ def completion(request: Request) -> OpenAIServingCompletion | None:
324
+ return request.app.state.openai_serving_completion
325
+
326
+
327
+ def pooling(request: Request) -> OpenAIServingPooling | None:
328
+ return request.app.state.openai_serving_pooling
329
+
330
+
331
+ def embedding(request: Request) -> OpenAIServingEmbedding | None:
332
+ return request.app.state.openai_serving_embedding
333
+
334
+
335
+ def score(request: Request) -> ServingScores | None:
336
+ return request.app.state.openai_serving_scores
337
+
338
+
339
+ def classify(request: Request) -> ServingClassification | None:
340
+ return request.app.state.openai_serving_classification
341
+
342
+
343
+ def rerank(request: Request) -> ServingScores | None:
344
+ return request.app.state.openai_serving_scores
345
+
346
+
347
+ def tokenization(request: Request) -> OpenAIServingTokenization:
348
+ return request.app.state.openai_serving_tokenization
349
+
350
+
351
+ def transcription(request: Request) -> OpenAIServingTranscription:
352
+ return request.app.state.openai_serving_transcription
353
+
354
+
355
+ def translation(request: Request) -> OpenAIServingTranslation:
356
+ return request.app.state.openai_serving_translation
357
+
358
+
359
+ def engine_client(request: Request) -> EngineClient:
360
+ return request.app.state.engine_client
361
+
362
+
363
+ def generate_tokens(request: Request) -> ServingTokens | None:
364
+ return request.app.state.serving_tokens
365
+
366
+
367
+ @router.get("/health", response_class=Response)
368
+ async def health(raw_request: Request) -> Response:
369
+ """Health check."""
370
+ try:
371
+ await engine_client(raw_request).check_health()
372
+ return Response(status_code=200)
373
+ except EngineDeadError:
374
+ return Response(status_code=503)
375
+
376
+
377
+ @router.get("/load")
378
+ async def get_server_load_metrics(request: Request):
379
+ # This endpoint returns the current server load metrics.
380
+ # It tracks requests utilizing the GPU from the following routes:
381
+ # - /v1/chat/completions
382
+ # - /v1/completions
383
+ # - /v1/audio/transcriptions
384
+ # - /v1/audio/translations
385
+ # - /v1/embeddings
386
+ # - /pooling
387
+ # - /classify
388
+ # - /score
389
+ # - /v1/score
390
+ # - /rerank
391
+ # - /v1/rerank
392
+ # - /v2/rerank
393
+ return JSONResponse(content={"server_load": request.app.state.server_load_metrics})
394
+
395
+
396
+ @router.post(
397
+ "/tokenize",
398
+ dependencies=[Depends(validate_json_request)],
399
+ responses={
400
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
401
+ HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
402
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
403
+ HTTPStatus.NOT_IMPLEMENTED.value: {"model": ErrorResponse},
404
+ },
405
+ )
406
+ @with_cancellation
407
+ async def tokenize(request: TokenizeRequest, raw_request: Request):
408
+ handler = tokenization(raw_request)
409
+
410
+ try:
411
+ generator = await handler.create_tokenize(request, raw_request)
412
+ except NotImplementedError as e:
413
+ raise HTTPException(
414
+ status_code=HTTPStatus.NOT_IMPLEMENTED.value, detail=str(e)
415
+ ) from e
416
+ except Exception as e:
417
+ raise HTTPException(
418
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
419
+ ) from e
420
+
421
+ if isinstance(generator, ErrorResponse):
422
+ return JSONResponse(
423
+ content=generator.model_dump(), status_code=generator.error.code
424
+ )
425
+ elif isinstance(generator, TokenizeResponse):
426
+ return JSONResponse(content=generator.model_dump())
427
+
428
+ assert_never(generator)
429
+
430
+
431
+ @router.post(
432
+ "/detokenize",
433
+ dependencies=[Depends(validate_json_request)],
434
+ responses={
435
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
436
+ HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
437
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
438
+ },
439
+ )
440
+ @with_cancellation
441
+ async def detokenize(request: DetokenizeRequest, raw_request: Request):
442
+ handler = tokenization(raw_request)
443
+
444
+ try:
445
+ generator = await handler.create_detokenize(request, raw_request)
446
+ except OverflowError as e:
447
+ raise RequestValidationError(errors=[str(e)]) from e
448
+ except Exception as e:
449
+ raise HTTPException(
450
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
451
+ ) from e
452
+
453
+ if isinstance(generator, ErrorResponse):
454
+ return JSONResponse(
455
+ content=generator.model_dump(), status_code=generator.error.code
456
+ )
457
+ elif isinstance(generator, DetokenizeResponse):
458
+ return JSONResponse(content=generator.model_dump())
459
+
460
+ assert_never(generator)
461
+
462
+
463
+ def maybe_register_tokenizer_info_endpoint(args):
464
+ """Conditionally register the tokenizer info endpoint if enabled."""
465
+ if getattr(args, "enable_tokenizer_info_endpoint", False):
466
+
467
+ @router.get("/tokenizer_info")
468
+ async def get_tokenizer_info(raw_request: Request):
469
+ """Get comprehensive tokenizer information."""
470
+ result = await tokenization(raw_request).get_tokenizer_info()
471
+ return JSONResponse(
472
+ content=result.model_dump(),
473
+ status_code=result.error.code
474
+ if isinstance(result, ErrorResponse)
475
+ else 200,
476
+ )
477
+
478
+
479
+ @router.get("/v1/models")
480
+ async def show_available_models(raw_request: Request):
481
+ handler = models(raw_request)
482
+
483
+ models_ = await handler.show_available_models()
484
+ return JSONResponse(content=models_.model_dump())
485
+
486
+
487
+ @router.get("/version")
488
+ async def show_version():
489
+ ver = {"version": VLLM_VERSION}
490
+ return JSONResponse(content=ver)
491
+
492
+
493
+ async def _convert_stream_to_sse_events(
494
+ generator: AsyncGenerator[StreamingResponsesResponse, None],
495
+ ) -> AsyncGenerator[str, None]:
496
+ """Convert the generator to a stream of events in SSE format"""
497
+ async for event in generator:
498
+ event_type = getattr(event, "type", "unknown")
499
+ # https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format
500
+ event_data = (
501
+ f"event: {event_type}\ndata: {event.model_dump_json(indent=None)}\n\n"
502
+ )
503
+ yield event_data
504
+
505
+
506
+ @router.post(
507
+ "/v1/responses",
508
+ dependencies=[Depends(validate_json_request)],
509
+ responses={
510
+ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
511
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
512
+ HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
513
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
514
+ },
515
+ )
516
+ @with_cancellation
517
+ async def create_responses(request: ResponsesRequest, raw_request: Request):
518
+ handler = responses(raw_request)
519
+ if handler is None:
520
+ return base(raw_request).create_error_response(
521
+ message="The model does not support Responses API"
522
+ )
523
+ try:
524
+ generator = await handler.create_responses(request, raw_request)
525
+ except Exception as e:
526
+ raise HTTPException(
527
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
528
+ ) from e
529
+
530
+ if isinstance(generator, ErrorResponse):
531
+ return JSONResponse(
532
+ content=generator.model_dump(), status_code=generator.error.code
533
+ )
534
+ elif isinstance(generator, ResponsesResponse):
535
+ return JSONResponse(content=generator.model_dump())
536
+
537
+ return StreamingResponse(
538
+ content=_convert_stream_to_sse_events(generator), media_type="text/event-stream"
539
+ )
540
+
541
+
542
+ @router.get("/v1/responses/{response_id}")
543
+ async def retrieve_responses(
544
+ response_id: str,
545
+ raw_request: Request,
546
+ starting_after: int | None = None,
547
+ stream: bool | None = False,
548
+ ):
549
+ handler = responses(raw_request)
550
+ if handler is None:
551
+ return base(raw_request).create_error_response(
552
+ message="The model does not support Responses API"
553
+ )
554
+
555
+ try:
556
+ response = await handler.retrieve_responses(
557
+ response_id,
558
+ starting_after=starting_after,
559
+ stream=stream,
560
+ )
561
+ except Exception as e:
562
+ raise HTTPException(
563
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
564
+ ) from e
565
+
566
+ if isinstance(response, ErrorResponse):
567
+ return JSONResponse(
568
+ content=response.model_dump(), status_code=response.error.code
569
+ )
570
+ elif isinstance(response, ResponsesResponse):
571
+ return JSONResponse(content=response.model_dump())
572
+ return StreamingResponse(
573
+ content=_convert_stream_to_sse_events(response), media_type="text/event-stream"
574
+ )
575
+
576
+
577
+ @router.post("/v1/responses/{response_id}/cancel")
578
+ async def cancel_responses(response_id: str, raw_request: Request):
579
+ handler = responses(raw_request)
580
+ if handler is None:
581
+ return base(raw_request).create_error_response(
582
+ message="The model does not support Responses API"
583
+ )
584
+
585
+ try:
586
+ response = await handler.cancel_responses(response_id)
587
+ except Exception as e:
588
+ raise HTTPException(
589
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
590
+ ) from e
591
+
592
+ if isinstance(response, ErrorResponse):
593
+ return JSONResponse(
594
+ content=response.model_dump(), status_code=response.error.code
595
+ )
596
+ return JSONResponse(content=response.model_dump())
597
+
598
+
599
+ @router.post(
600
+ "/v1/messages",
601
+ dependencies=[Depends(validate_json_request)],
602
+ responses={
603
+ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
604
+ HTTPStatus.BAD_REQUEST.value: {"model": AnthropicErrorResponse},
605
+ HTTPStatus.NOT_FOUND.value: {"model": AnthropicErrorResponse},
606
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": AnthropicErrorResponse},
607
+ },
608
+ )
609
+ @with_cancellation
610
+ @load_aware_call
611
+ async def create_messages(request: AnthropicMessagesRequest, raw_request: Request):
612
+ def translate_error_response(response: ErrorResponse) -> JSONResponse:
613
+ anthropic_error = AnthropicErrorResponse(
614
+ error=AnthropicError(
615
+ type=response.error.type,
616
+ message=response.error.message,
617
+ )
618
+ )
619
+ return JSONResponse(
620
+ status_code=response.error.code, content=anthropic_error.model_dump()
621
+ )
622
+
623
+ handler = messages(raw_request)
624
+ if handler is None:
625
+ error = base(raw_request).create_error_response(
626
+ message="The model does not support Messages API"
627
+ )
628
+ return translate_error_response(error)
629
+
630
+ try:
631
+ generator = await handler.create_messages(request, raw_request)
632
+ except Exception as e:
633
+ logger.exception("Error in create_messages: %s", e)
634
+ return JSONResponse(
635
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
636
+ content=AnthropicErrorResponse(
637
+ error=AnthropicError(
638
+ type="internal_error",
639
+ message=str(e),
640
+ )
641
+ ).model_dump(),
642
+ )
643
+
644
+ if isinstance(generator, ErrorResponse):
645
+ return translate_error_response(generator)
646
+
647
+ elif isinstance(generator, AnthropicMessagesResponse):
648
+ resp = generator.model_dump(exclude_none=True)
649
+ logger.debug("Anthropic Messages Response: %s", resp)
650
+ return JSONResponse(content=resp)
651
+
652
+ return StreamingResponse(content=generator, media_type="text/event-stream")
653
+
654
+
655
+ @router.post(
656
+ "/v1/chat/completions",
657
+ dependencies=[Depends(validate_json_request)],
658
+ responses={
659
+ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
660
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
661
+ HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
662
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
663
+ },
664
+ )
665
+ @with_cancellation
666
+ @load_aware_call
667
+ async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request):
668
+ metrics_header_format = raw_request.headers.get(
669
+ ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, ""
670
+ )
671
+ handler = chat(raw_request)
672
+ if handler is None:
673
+ return base(raw_request).create_error_response(
674
+ message="The model does not support Chat Completions API"
675
+ )
676
+ try:
677
+ generator = await handler.create_chat_completion(request, raw_request)
678
+ except Exception as e:
679
+ raise HTTPException(
680
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
681
+ ) from e
682
+ if isinstance(generator, ErrorResponse):
683
+ return JSONResponse(
684
+ content=generator.model_dump(), status_code=generator.error.code
685
+ )
686
+
687
+ elif isinstance(generator, ChatCompletionResponse):
688
+ return JSONResponse(
689
+ content=generator.model_dump(),
690
+ headers=metrics_header(metrics_header_format),
691
+ )
692
+
693
+ return StreamingResponse(content=generator, media_type="text/event-stream")
694
+
695
+
696
+ @router.post(
697
+ "/v1/completions",
698
+ dependencies=[Depends(validate_json_request)],
699
+ responses={
700
+ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
701
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
702
+ HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
703
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
704
+ },
705
+ )
706
+ @with_cancellation
707
+ @load_aware_call
708
+ async def create_completion(request: CompletionRequest, raw_request: Request):
709
+ metrics_header_format = raw_request.headers.get(
710
+ ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, ""
711
+ )
712
+ handler = completion(raw_request)
713
+ if handler is None:
714
+ return base(raw_request).create_error_response(
715
+ message="The model does not support Completions API"
716
+ )
717
+
718
+ try:
719
+ generator = await handler.create_completion(request, raw_request)
720
+ except OverflowError as e:
721
+ raise HTTPException(
722
+ status_code=HTTPStatus.BAD_REQUEST.value, detail=str(e)
723
+ ) from e
724
+ except Exception as e:
725
+ raise HTTPException(
726
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
727
+ ) from e
728
+
729
+ if isinstance(generator, ErrorResponse):
730
+ return JSONResponse(
731
+ content=generator.model_dump(), status_code=generator.error.code
732
+ )
733
+ elif isinstance(generator, CompletionResponse):
734
+ return JSONResponse(
735
+ content=generator.model_dump(),
736
+ headers=metrics_header(metrics_header_format),
737
+ )
738
+
739
+ return StreamingResponse(content=generator, media_type="text/event-stream")
740
+
741
+
742
+ @router.post(
743
+ "/v1/embeddings",
744
+ dependencies=[Depends(validate_json_request)],
745
+ responses={
746
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
747
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
748
+ },
749
+ )
750
+ @with_cancellation
751
+ @load_aware_call
752
+ async def create_embedding(
753
+ request: EmbeddingRequest,
754
+ raw_request: Request,
755
+ ):
756
+ handler = embedding(raw_request)
757
+ if handler is None:
758
+ return base(raw_request).create_error_response(
759
+ message="The model does not support Embeddings API"
760
+ )
761
+
762
+ try:
763
+ generator = await handler.create_embedding(request, raw_request)
764
+ except Exception as e:
765
+ raise HTTPException(
766
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
767
+ ) from e
768
+
769
+ if isinstance(generator, ErrorResponse):
770
+ return JSONResponse(
771
+ content=generator.model_dump(), status_code=generator.error.code
772
+ )
773
+ elif isinstance(generator, EmbeddingResponse):
774
+ return JSONResponse(content=generator.model_dump())
775
+ elif isinstance(generator, EmbeddingBytesResponse):
776
+ return StreamingResponse(
777
+ content=generator.body,
778
+ headers={"metadata": generator.metadata},
779
+ media_type=generator.media_type,
780
+ )
781
+
782
+ assert_never(generator)
783
+
784
+
785
+ @router.post(
786
+ "/pooling",
787
+ dependencies=[Depends(validate_json_request)],
788
+ responses={
789
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
790
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
791
+ },
792
+ )
793
+ @with_cancellation
794
+ @load_aware_call
795
+ async def create_pooling(request: PoolingRequest, raw_request: Request):
796
+ handler = pooling(raw_request)
797
+ if handler is None:
798
+ return base(raw_request).create_error_response(
799
+ message="The model does not support Pooling API"
800
+ )
801
+ try:
802
+ generator = await handler.create_pooling(request, raw_request)
803
+ except Exception as e:
804
+ raise HTTPException(
805
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
806
+ ) from e
807
+ if isinstance(generator, ErrorResponse):
808
+ return JSONResponse(
809
+ content=generator.model_dump(), status_code=generator.error.code
810
+ )
811
+ elif isinstance(generator, (PoolingResponse, IOProcessorResponse)):
812
+ return JSONResponse(content=generator.model_dump())
813
+ elif isinstance(generator, PoolingBytesResponse):
814
+ return StreamingResponse(
815
+ content=generator.body,
816
+ headers={"metadata": generator.metadata},
817
+ media_type=generator.media_type,
818
+ )
819
+
820
+ assert_never(generator)
821
+
822
+
823
+ @router.post("/classify", dependencies=[Depends(validate_json_request)])
824
+ @with_cancellation
825
+ @load_aware_call
826
+ async def create_classify(request: ClassificationRequest, raw_request: Request):
827
+ handler = classify(raw_request)
828
+ if handler is None:
829
+ return base(raw_request).create_error_response(
830
+ message="The model does not support Classification API"
831
+ )
832
+
833
+ try:
834
+ generator = await handler.create_classify(request, raw_request)
835
+ except Exception as e:
836
+ raise HTTPException(
837
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
838
+ ) from e
839
+ if isinstance(generator, ErrorResponse):
840
+ return JSONResponse(
841
+ content=generator.model_dump(), status_code=generator.error.code
842
+ )
843
+
844
+ elif isinstance(generator, ClassificationResponse):
845
+ return JSONResponse(content=generator.model_dump())
846
+
847
+ assert_never(generator)
848
+
849
+
850
+ @router.post(
851
+ "/score",
852
+ dependencies=[Depends(validate_json_request)],
853
+ responses={
854
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
855
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
856
+ },
857
+ )
858
+ @with_cancellation
859
+ @load_aware_call
860
+ async def create_score(request: ScoreRequest, raw_request: Request):
861
+ handler = score(raw_request)
862
+ if handler is None:
863
+ return base(raw_request).create_error_response(
864
+ message="The model does not support Score API"
865
+ )
866
+
867
+ try:
868
+ generator = await handler.create_score(request, raw_request)
869
+ except Exception as e:
870
+ raise HTTPException(
871
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
872
+ ) from e
873
+ if isinstance(generator, ErrorResponse):
874
+ return JSONResponse(
875
+ content=generator.model_dump(), status_code=generator.error.code
876
+ )
877
+ elif isinstance(generator, ScoreResponse):
878
+ return JSONResponse(content=generator.model_dump())
879
+
880
+ assert_never(generator)
881
+
882
+
883
+ @router.post(
884
+ "/v1/score",
885
+ dependencies=[Depends(validate_json_request)],
886
+ responses={
887
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
888
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
889
+ },
890
+ )
891
+ @with_cancellation
892
+ @load_aware_call
893
+ async def create_score_v1(request: ScoreRequest, raw_request: Request):
894
+ logger.warning(
895
+ "To indicate that Score API is not part of standard OpenAI API, we "
896
+ "have moved it to `/score`. Please update your client accordingly."
897
+ )
898
+
899
+ return await create_score(request, raw_request)
900
+
901
+
902
+ @router.post(
903
+ "/v1/audio/transcriptions",
904
+ responses={
905
+ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
906
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
907
+ HTTPStatus.UNPROCESSABLE_ENTITY.value: {"model": ErrorResponse},
908
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
909
+ },
910
+ )
911
+ @with_cancellation
912
+ @load_aware_call
913
+ async def create_transcriptions(
914
+ raw_request: Request, request: Annotated[TranscriptionRequest, Form()]
915
+ ):
916
+ handler = transcription(raw_request)
917
+ if handler is None:
918
+ return base(raw_request).create_error_response(
919
+ message="The model does not support Transcriptions API"
920
+ )
921
+
922
+ audio_data = await request.file.read()
923
+ try:
924
+ generator = await handler.create_transcription(audio_data, request, raw_request)
925
+ except Exception as e:
926
+ raise HTTPException(
927
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
928
+ ) from e
929
+
930
+ if isinstance(generator, ErrorResponse):
931
+ return JSONResponse(
932
+ content=generator.model_dump(), status_code=generator.error.code
933
+ )
934
+
935
+ elif isinstance(generator, TranscriptionResponse):
936
+ return JSONResponse(content=generator.model_dump())
937
+
938
+ return StreamingResponse(content=generator, media_type="text/event-stream")
939
+
940
+
941
+ @router.post(
942
+ "/v1/audio/translations",
943
+ responses={
944
+ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
945
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
946
+ HTTPStatus.UNPROCESSABLE_ENTITY.value: {"model": ErrorResponse},
947
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
948
+ },
949
+ )
950
+ @with_cancellation
951
+ @load_aware_call
952
+ async def create_translations(
953
+ request: Annotated[TranslationRequest, Form()], raw_request: Request
954
+ ):
955
+ handler = translation(raw_request)
956
+ if handler is None:
957
+ return base(raw_request).create_error_response(
958
+ message="The model does not support Translations API"
959
+ )
960
+
961
+ audio_data = await request.file.read()
962
+ try:
963
+ generator = await handler.create_translation(audio_data, request, raw_request)
964
+ except Exception as e:
965
+ raise HTTPException(
966
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
967
+ ) from e
968
+
969
+ if isinstance(generator, ErrorResponse):
970
+ return JSONResponse(
971
+ content=generator.model_dump(), status_code=generator.error.code
972
+ )
973
+
974
+ elif isinstance(generator, TranslationResponse):
975
+ return JSONResponse(content=generator.model_dump())
976
+
977
+ return StreamingResponse(content=generator, media_type="text/event-stream")
978
+
979
+
980
+ @router.post(
981
+ "/rerank",
982
+ dependencies=[Depends(validate_json_request)],
983
+ responses={
984
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
985
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
986
+ },
987
+ )
988
+ @with_cancellation
989
+ @load_aware_call
990
+ async def do_rerank(request: RerankRequest, raw_request: Request):
991
+ handler = rerank(raw_request)
992
+ if handler is None:
993
+ return base(raw_request).create_error_response(
994
+ message="The model does not support Rerank (Score) API"
995
+ )
996
+ try:
997
+ generator = await handler.do_rerank(request, raw_request)
998
+ except Exception as e:
999
+ raise HTTPException(
1000
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
1001
+ ) from e
1002
+ if isinstance(generator, ErrorResponse):
1003
+ return JSONResponse(
1004
+ content=generator.model_dump(), status_code=generator.error.code
1005
+ )
1006
+ elif isinstance(generator, RerankResponse):
1007
+ return JSONResponse(content=generator.model_dump())
1008
+
1009
+ assert_never(generator)
1010
+
1011
+
1012
+ @router.post(
1013
+ "/v1/rerank",
1014
+ dependencies=[Depends(validate_json_request)],
1015
+ responses={
1016
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
1017
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
1018
+ },
1019
+ )
1020
+ @with_cancellation
1021
+ async def do_rerank_v1(request: RerankRequest, raw_request: Request):
1022
+ logger.warning_once(
1023
+ "To indicate that the rerank API is not part of the standard OpenAI"
1024
+ " API, we have located it at `/rerank`. Please update your client "
1025
+ "accordingly. (Note: Conforms to JinaAI rerank API)"
1026
+ )
1027
+
1028
+ return await do_rerank(request, raw_request)
1029
+
1030
+
1031
+ @router.post(
1032
+ "/v2/rerank",
1033
+ dependencies=[Depends(validate_json_request)],
1034
+ responses={
1035
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
1036
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
1037
+ },
1038
+ )
1039
+ @with_cancellation
1040
+ async def do_rerank_v2(request: RerankRequest, raw_request: Request):
1041
+ return await do_rerank(request, raw_request)
1042
+
1043
+
1044
+ if envs.VLLM_SERVER_DEV_MODE:
1045
+ logger.warning(
1046
+ "SECURITY WARNING: Development endpoints are enabled! "
1047
+ "This should NOT be used in production!"
1048
+ )
1049
+
1050
+ PydanticVllmConfig = pydantic.TypeAdapter(VllmConfig)
1051
+
1052
+ @router.get("/server_info")
1053
+ async def show_server_info(
1054
+ raw_request: Request,
1055
+ config_format: Annotated[Literal["text", "json"], Query()] = "text",
1056
+ ):
1057
+ vllm_config: VllmConfig = raw_request.app.state.vllm_config
1058
+ server_info = {
1059
+ "vllm_config": str(vllm_config)
1060
+ if config_format == "text"
1061
+ else PydanticVllmConfig.dump_python(vllm_config, mode="json", fallback=str)
1062
+ # fallback=str is needed to handle e.g. torch.dtype
1063
+ }
1064
+ return JSONResponse(content=server_info)
1065
+
1066
+ @router.post("/reset_prefix_cache")
1067
+ async def reset_prefix_cache(raw_request: Request):
1068
+ """
1069
+ Reset the prefix cache. Note that we currently do not check if the
1070
+ prefix cache is successfully reset in the API server.
1071
+ """
1072
+ logger.info("Resetting prefix cache...")
1073
+ await engine_client(raw_request).reset_prefix_cache()
1074
+ return Response(status_code=200)
1075
+
1076
+ @router.post("/reset_mm_cache")
1077
+ async def reset_mm_cache(raw_request: Request):
1078
+ """
1079
+ Reset the multi-modal cache. Note that we currently do not check if the
1080
+ multi-modal cache is successfully reset in the API server.
1081
+ """
1082
+ logger.info("Resetting multi-modal cache...")
1083
+ await engine_client(raw_request).reset_mm_cache()
1084
+ return Response(status_code=200)
1085
+
1086
+ @router.post("/sleep")
1087
+ async def sleep(raw_request: Request):
1088
+ # get POST params
1089
+ level = raw_request.query_params.get("level", "1")
1090
+ await engine_client(raw_request).sleep(int(level))
1091
+ # FIXME: in v0 with frontend multiprocessing, the sleep command
1092
+ # is sent but does not finish yet when we return a response.
1093
+ return Response(status_code=200)
1094
+
1095
+ @router.post("/wake_up")
1096
+ async def wake_up(raw_request: Request):
1097
+ tags = raw_request.query_params.getlist("tags")
1098
+ if tags == []:
1099
+ # set to None to wake up all tags if no tags are provided
1100
+ tags = None
1101
+ logger.info("wake up the engine with tags: %s", tags)
1102
+ await engine_client(raw_request).wake_up(tags)
1103
+ # FIXME: in v0 with frontend multiprocessing, the wake-up command
1104
+ # is sent but does not finish yet when we return a response.
1105
+ return Response(status_code=200)
1106
+
1107
+ @router.get("/is_sleeping")
1108
+ async def is_sleeping(raw_request: Request):
1109
+ logger.info("check whether the engine is sleeping")
1110
+ is_sleeping = await engine_client(raw_request).is_sleeping()
1111
+ return JSONResponse(content={"is_sleeping": is_sleeping})
1112
+
1113
+ @router.post("/collective_rpc")
1114
+ async def collective_rpc(raw_request: Request):
1115
+ try:
1116
+ body = await raw_request.json()
1117
+ except json.JSONDecodeError as e:
1118
+ raise HTTPException(
1119
+ status_code=HTTPStatus.BAD_REQUEST.value,
1120
+ detail=f"JSON decode error: {e}",
1121
+ ) from e
1122
+ method = body.get("method")
1123
+ if method is None:
1124
+ raise HTTPException(
1125
+ status_code=HTTPStatus.BAD_REQUEST.value,
1126
+ detail="Missing 'method' in request body",
1127
+ )
1128
+ # For security reason, only serialized string args/kwargs are passed.
1129
+ # User-defined `method` is responsible for deserialization if needed.
1130
+ args: list[str] = body.get("args", [])
1131
+ kwargs: dict[str, str] = body.get("kwargs", {})
1132
+ timeout: float | None = body.get("timeout")
1133
+ results = await engine_client(raw_request).collective_rpc(
1134
+ method=method, timeout=timeout, args=tuple(args), kwargs=kwargs
1135
+ )
1136
+ if results is None:
1137
+ return Response(status_code=200)
1138
+ response: list[Any] = []
1139
+ for result in results:
1140
+ if result is None or isinstance(result, (dict, list)):
1141
+ response.append(result)
1142
+ else:
1143
+ response.append(str(result))
1144
+ return JSONResponse(content={"results": response})
1145
+
1146
+
1147
+ @router.post(
1148
+ "/scale_elastic_ep",
1149
+ dependencies=[Depends(validate_json_request)],
1150
+ responses={
1151
+ HTTPStatus.OK.value: {"model": dict},
1152
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
1153
+ HTTPStatus.REQUEST_TIMEOUT.value: {"model": ErrorResponse},
1154
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
1155
+ },
1156
+ )
1157
+ async def scale_elastic_ep(raw_request: Request):
1158
+ try:
1159
+ body = await raw_request.json()
1160
+ except json.JSONDecodeError as e:
1161
+ raise HTTPException(status_code=400, detail="Invalid JSON format") from e # noqa: B904
1162
+
1163
+ new_data_parallel_size = body.get("new_data_parallel_size")
1164
+ drain_timeout = body.get("drain_timeout", 120) # Default 2 minutes
1165
+
1166
+ if new_data_parallel_size is None:
1167
+ raise HTTPException(
1168
+ status_code=400, detail="new_data_parallel_size is required"
1169
+ )
1170
+
1171
+ if not isinstance(new_data_parallel_size, int) or new_data_parallel_size <= 0:
1172
+ raise HTTPException(
1173
+ status_code=400, detail="new_data_parallel_size must be a positive integer"
1174
+ )
1175
+
1176
+ if not isinstance(drain_timeout, int) or drain_timeout <= 0:
1177
+ raise HTTPException(
1178
+ status_code=400, detail="drain_timeout must be a positive integer"
1179
+ )
1180
+
1181
+ # Set scaling flag to prevent new requests
1182
+ global _scaling_elastic_ep
1183
+ _scaling_elastic_ep = True
1184
+ client = engine_client(raw_request)
1185
+ try:
1186
+ await client.scale_elastic_ep(new_data_parallel_size, drain_timeout)
1187
+ return JSONResponse(
1188
+ {
1189
+ "message": f"Scaled to {new_data_parallel_size} data parallel engines",
1190
+ }
1191
+ )
1192
+ except TimeoutError as e:
1193
+ raise HTTPException(
1194
+ status_code=408,
1195
+ detail="Scale failed due to request drain timeout "
1196
+ f"after {drain_timeout} seconds",
1197
+ ) from e
1198
+ except Exception as e:
1199
+ logger.error("Scale failed: %s", e)
1200
+ raise HTTPException(status_code=500, detail="Scale failed") from e
1201
+ finally:
1202
+ _scaling_elastic_ep = False
1203
+
1204
+
1205
+ @router.post("/is_scaling_elastic_ep")
1206
+ async def is_scaling_elastic_ep(raw_request: Request):
1207
+ return JSONResponse({"is_scaling_elastic_ep": _scaling_elastic_ep})
1208
+
1209
+
1210
+ # TODO: RequestType = TypeForm[BaseModel] when recognized by type checkers
1211
+ # (requires typing_extensions >= 4.13)
1212
+ RequestType = Any
1213
+ GetHandlerFn = Callable[[Request], OpenAIServing | None]
1214
+ EndpointFn = Callable[[RequestType, Request], Awaitable[Any]]
1215
+
1216
+ # NOTE: Items defined earlier take higher priority
1217
+ INVOCATION_TYPES: list[tuple[RequestType, tuple[GetHandlerFn, EndpointFn]]] = [
1218
+ (ChatCompletionRequest, (chat, create_chat_completion)),
1219
+ (CompletionRequest, (completion, create_completion)),
1220
+ (EmbeddingRequest, (embedding, create_embedding)),
1221
+ (ClassificationRequest, (classify, create_classify)),
1222
+ (ScoreRequest, (score, create_score)),
1223
+ (RerankRequest, (rerank, do_rerank)),
1224
+ (PoolingRequest, (pooling, create_pooling)),
1225
+ ]
1226
+
1227
+ # NOTE: Construct the TypeAdapters only once
1228
+ INVOCATION_VALIDATORS = [
1229
+ (pydantic.TypeAdapter(request_type), (get_handler, endpoint))
1230
+ for request_type, (get_handler, endpoint) in INVOCATION_TYPES
1231
+ ]
1232
+
1233
+
1234
+ @router.post(
1235
+ "/inference/v1/generate",
1236
+ dependencies=[Depends(validate_json_request)],
1237
+ responses={
1238
+ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
1239
+ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
1240
+ HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
1241
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
1242
+ },
1243
+ )
1244
+ @with_cancellation
1245
+ @load_aware_call
1246
+ async def generate(request: GenerateRequest, raw_request: Request):
1247
+ handler = generate_tokens(raw_request)
1248
+ if handler is None:
1249
+ return base(raw_request).create_error_response(
1250
+ message="The model does not support generate tokens API"
1251
+ )
1252
+ try:
1253
+ generator = await handler.serve_tokens(request, raw_request)
1254
+ except Exception as e:
1255
+ raise HTTPException(
1256
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
1257
+ ) from e
1258
+ if isinstance(generator, ErrorResponse):
1259
+ return JSONResponse(
1260
+ content=generator.model_dump(), status_code=generator.error.code
1261
+ )
1262
+
1263
+ elif isinstance(generator, GenerateResponse):
1264
+ return JSONResponse(content=generator.model_dump())
1265
+
1266
+ return StreamingResponse(content=generator, media_type="text/event-stream")
1267
+
1268
+
1269
+ if envs.VLLM_TORCH_PROFILER_DIR:
1270
+ logger.warning_once(
1271
+ "Torch Profiler is enabled in the API server. This should ONLY be "
1272
+ "used for local development!"
1273
+ )
1274
+ elif envs.VLLM_TORCH_CUDA_PROFILE:
1275
+ logger.warning_once(
1276
+ "CUDA Profiler is enabled in the API server. This should ONLY be "
1277
+ "used for local development!"
1278
+ )
1279
+ if envs.VLLM_TORCH_PROFILER_DIR or envs.VLLM_TORCH_CUDA_PROFILE:
1280
+
1281
+ @router.post("/start_profile")
1282
+ async def start_profile(raw_request: Request):
1283
+ logger.info("Starting profiler...")
1284
+ await engine_client(raw_request).start_profile()
1285
+ logger.info("Profiler started.")
1286
+ return Response(status_code=200)
1287
+
1288
+ @router.post("/stop_profile")
1289
+ async def stop_profile(raw_request: Request):
1290
+ logger.info("Stopping profiler...")
1291
+ await engine_client(raw_request).stop_profile()
1292
+ logger.info("Profiler stopped.")
1293
+ return Response(status_code=200)
1294
+
1295
+
1296
+ def load_log_config(log_config_file: str | None) -> dict | None:
1297
+ if not log_config_file:
1298
+ return None
1299
+ try:
1300
+ with open(log_config_file) as f:
1301
+ return json.load(f)
1302
+ except Exception as e:
1303
+ logger.warning(
1304
+ "Failed to load log config from file %s: error %s", log_config_file, e
1305
+ )
1306
+ return None
1307
+
1308
+
1309
+ class AuthenticationMiddleware:
1310
+ """
1311
+ Pure ASGI middleware that authenticates each request by checking
1312
+ if the Authorization Bearer token exists and equals anyof "{api_key}".
1313
+
1314
+ Notes
1315
+ -----
1316
+ There are two cases in which authentication is skipped:
1317
+ 1. The HTTP method is OPTIONS.
1318
+ 2. The request path doesn't start with /v1 (e.g. /health).
1319
+ """
1320
+
1321
+ def __init__(self, app: ASGIApp, tokens: list[str]) -> None:
1322
+ self.app = app
1323
+ self.api_tokens = [hashlib.sha256(t.encode("utf-8")).digest() for t in tokens]
1324
+
1325
+ def verify_token(self, headers: Headers) -> bool:
1326
+ authorization_header_value = headers.get("Authorization")
1327
+ if not authorization_header_value:
1328
+ return False
1329
+
1330
+ scheme, _, param = authorization_header_value.partition(" ")
1331
+ if scheme.lower() != "bearer":
1332
+ return False
1333
+
1334
+ param_hash = hashlib.sha256(param.encode("utf-8")).digest()
1335
+
1336
+ token_match = False
1337
+ for token_hash in self.api_tokens:
1338
+ token_match |= secrets.compare_digest(param_hash, token_hash)
1339
+
1340
+ return token_match
1341
+
1342
+ def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]:
1343
+ if scope["type"] not in ("http", "websocket") or scope["method"] == "OPTIONS":
1344
+ # scope["type"] can be "lifespan" or "startup" for example,
1345
+ # in which case we don't need to do anything
1346
+ return self.app(scope, receive, send)
1347
+ root_path = scope.get("root_path", "")
1348
+ url_path = URL(scope=scope).path.removeprefix(root_path)
1349
+ headers = Headers(scope=scope)
1350
+ # Type narrow to satisfy mypy.
1351
+ if url_path.startswith("/v1") and not self.verify_token(headers):
1352
+ response = JSONResponse(content={"error": "Unauthorized"}, status_code=401)
1353
+ return response(scope, receive, send)
1354
+ return self.app(scope, receive, send)
1355
+
1356
+
1357
+ class XRequestIdMiddleware:
1358
+ """
1359
+ Middleware the set's the X-Request-Id header for each response
1360
+ to a random uuid4 (hex) value if the header isn't already
1361
+ present in the request, otherwise use the provided request id.
1362
+ """
1363
+
1364
+ def __init__(self, app: ASGIApp) -> None:
1365
+ self.app = app
1366
+
1367
+ def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]:
1368
+ if scope["type"] not in ("http", "websocket"):
1369
+ return self.app(scope, receive, send)
1370
+
1371
+ # Extract the request headers.
1372
+ request_headers = Headers(scope=scope)
1373
+
1374
+ async def send_with_request_id(message: Message) -> None:
1375
+ """
1376
+ Custom send function to mutate the response headers
1377
+ and append X-Request-Id to it.
1378
+ """
1379
+ if message["type"] == "http.response.start":
1380
+ response_headers = MutableHeaders(raw=message["headers"])
1381
+ request_id = request_headers.get("X-Request-Id", uuid.uuid4().hex)
1382
+ response_headers.append("X-Request-Id", request_id)
1383
+ await send(message)
1384
+
1385
+ return self.app(scope, receive, send_with_request_id)
1386
+
1387
+
1388
+ # Global variable to track scaling state
1389
+ _scaling_elastic_ep = False
1390
+
1391
+
1392
+ class ScalingMiddleware:
1393
+ """
1394
+ Middleware that checks if the model is currently scaling and
1395
+ returns a 503 Service Unavailable response if it is.
1396
+
1397
+ This middleware applies to all HTTP requests and prevents
1398
+ processing when the model is in a scaling state.
1399
+ """
1400
+
1401
+ def __init__(self, app: ASGIApp) -> None:
1402
+ self.app = app
1403
+
1404
+ def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]:
1405
+ if scope["type"] != "http":
1406
+ return self.app(scope, receive, send)
1407
+
1408
+ # Check global scaling state
1409
+ global _scaling_elastic_ep
1410
+ if _scaling_elastic_ep:
1411
+ # Return 503 Service Unavailable response
1412
+ response = JSONResponse(
1413
+ content={
1414
+ "error": "The model is currently scaling. Please try again later."
1415
+ },
1416
+ status_code=503,
1417
+ )
1418
+ return response(scope, receive, send)
1419
+
1420
+ return self.app(scope, receive, send)
1421
+
1422
+
1423
+ def _extract_content_from_chunk(chunk_data: dict) -> str:
1424
+ """Extract content from a streaming response chunk."""
1425
+ try:
1426
+ from vllm.entrypoints.openai.protocol import (
1427
+ ChatCompletionStreamResponse,
1428
+ CompletionStreamResponse,
1429
+ )
1430
+
1431
+ # Try using Completion types for type-safe parsing
1432
+ if chunk_data.get("object") == "chat.completion.chunk":
1433
+ chat_response = ChatCompletionStreamResponse.model_validate(chunk_data)
1434
+ if chat_response.choices and chat_response.choices[0].delta.content:
1435
+ return chat_response.choices[0].delta.content
1436
+ elif chunk_data.get("object") == "text_completion":
1437
+ completion_response = CompletionStreamResponse.model_validate(chunk_data)
1438
+ if completion_response.choices and completion_response.choices[0].text:
1439
+ return completion_response.choices[0].text
1440
+ except pydantic.ValidationError:
1441
+ # Fallback to manual parsing
1442
+ if "choices" in chunk_data and chunk_data["choices"]:
1443
+ choice = chunk_data["choices"][0]
1444
+ if "delta" in choice and choice["delta"].get("content"):
1445
+ return choice["delta"]["content"]
1446
+ elif choice.get("text"):
1447
+ return choice["text"]
1448
+ return ""
1449
+
1450
+
1451
+ class SSEDecoder:
1452
+ """Robust Server-Sent Events decoder for streaming responses."""
1453
+
1454
+ def __init__(self):
1455
+ self.buffer = ""
1456
+ self.content_buffer = []
1457
+
1458
+ def decode_chunk(self, chunk: bytes) -> list[dict]:
1459
+ """Decode a chunk of SSE data and return parsed events."""
1460
+ import json
1461
+
1462
+ try:
1463
+ chunk_str = chunk.decode("utf-8")
1464
+ except UnicodeDecodeError:
1465
+ # Skip malformed chunks
1466
+ return []
1467
+
1468
+ self.buffer += chunk_str
1469
+ events = []
1470
+
1471
+ # Process complete lines
1472
+ while "\n" in self.buffer:
1473
+ line, self.buffer = self.buffer.split("\n", 1)
1474
+ line = line.rstrip("\r") # Handle CRLF
1475
+
1476
+ if line.startswith("data: "):
1477
+ data_str = line[6:].strip()
1478
+ if data_str == "[DONE]":
1479
+ events.append({"type": "done"})
1480
+ elif data_str:
1481
+ try:
1482
+ event_data = json.loads(data_str)
1483
+ events.append({"type": "data", "data": event_data})
1484
+ except json.JSONDecodeError:
1485
+ # Skip malformed JSON
1486
+ continue
1487
+
1488
+ return events
1489
+
1490
+ def extract_content(self, event_data: dict) -> str:
1491
+ """Extract content from event data."""
1492
+ return _extract_content_from_chunk(event_data)
1493
+
1494
+ def add_content(self, content: str) -> None:
1495
+ """Add content to the buffer."""
1496
+ if content:
1497
+ self.content_buffer.append(content)
1498
+
1499
+ def get_complete_content(self) -> str:
1500
+ """Get the complete buffered content."""
1501
+ return "".join(self.content_buffer)
1502
+
1503
+
1504
+ def _log_streaming_response(response, response_body: list) -> None:
1505
+ """Log streaming response with robust SSE parsing."""
1506
+ from starlette.concurrency import iterate_in_threadpool
1507
+
1508
+ sse_decoder = SSEDecoder()
1509
+ chunk_count = 0
1510
+
1511
+ def buffered_iterator():
1512
+ nonlocal chunk_count
1513
+
1514
+ for chunk in response_body:
1515
+ chunk_count += 1
1516
+ yield chunk
1517
+
1518
+ # Parse SSE events from chunk
1519
+ events = sse_decoder.decode_chunk(chunk)
1520
+
1521
+ for event in events:
1522
+ if event["type"] == "data":
1523
+ content = sse_decoder.extract_content(event["data"])
1524
+ sse_decoder.add_content(content)
1525
+ elif event["type"] == "done":
1526
+ # Log complete content when done
1527
+ full_content = sse_decoder.get_complete_content()
1528
+ if full_content:
1529
+ # Truncate if too long
1530
+ if len(full_content) > 2048:
1531
+ full_content = full_content[:2048] + ""
1532
+ "...[truncated]"
1533
+ logger.info(
1534
+ "response_body={streaming_complete: content=%r, chunks=%d}",
1535
+ full_content,
1536
+ chunk_count,
1537
+ )
1538
+ else:
1539
+ logger.info(
1540
+ "response_body={streaming_complete: no_content, chunks=%d}",
1541
+ chunk_count,
1542
+ )
1543
+ return
1544
+
1545
+ response.body_iterator = iterate_in_threadpool(buffered_iterator())
1546
+ logger.info("response_body={streaming_started: chunks=%d}", len(response_body))
1547
+
1548
+
1549
+ def _log_non_streaming_response(response_body: list) -> None:
1550
+ """Log non-streaming response."""
1551
+ try:
1552
+ decoded_body = response_body[0].decode()
1553
+ logger.info("response_body={%s}", decoded_body)
1554
+ except UnicodeDecodeError:
1555
+ logger.info("response_body={<binary_data>}")
1556
+
1557
+
1558
+ def build_app(args: Namespace) -> FastAPI:
1559
+ if args.disable_fastapi_docs:
1560
+ app = FastAPI(
1561
+ openapi_url=None, docs_url=None, redoc_url=None, lifespan=lifespan
1562
+ )
1563
+ else:
1564
+ app = FastAPI(lifespan=lifespan)
1565
+
1566
+ if envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING:
1567
+ logger.warning(
1568
+ "LoRA dynamic loading & unloading is enabled in the API server. "
1569
+ "This should ONLY be used for local development!"
1570
+ )
1571
+ from vllm.entrypoints.dynamic_lora import register_dynamic_lora_routes
1572
+
1573
+ register_dynamic_lora_routes(router)
1574
+
1575
+ from vllm.entrypoints.sagemaker.routes import register_sagemaker_routes
1576
+
1577
+ register_sagemaker_routes(router)
1578
+
1579
+ app.include_router(router)
1580
+ app.root_path = args.root_path
1581
+
1582
+ mount_metrics(app)
1583
+
1584
+ app.add_middleware(
1585
+ CORSMiddleware,
1586
+ allow_origins=args.allowed_origins,
1587
+ allow_credentials=args.allow_credentials,
1588
+ allow_methods=args.allowed_methods,
1589
+ allow_headers=args.allowed_headers,
1590
+ )
1591
+
1592
+ @app.exception_handler(HTTPException)
1593
+ async def http_exception_handler(_: Request, exc: HTTPException):
1594
+ err = ErrorResponse(
1595
+ error=ErrorInfo(
1596
+ message=exc.detail,
1597
+ type=HTTPStatus(exc.status_code).phrase,
1598
+ code=exc.status_code,
1599
+ )
1600
+ )
1601
+ return JSONResponse(err.model_dump(), status_code=exc.status_code)
1602
+
1603
+ @app.exception_handler(RequestValidationError)
1604
+ async def validation_exception_handler(_: Request, exc: RequestValidationError):
1605
+ exc_str = str(exc)
1606
+ errors_str = str(exc.errors())
1607
+
1608
+ if exc.errors() and errors_str and errors_str != exc_str:
1609
+ message = f"{exc_str} {errors_str}"
1610
+ else:
1611
+ message = exc_str
1612
+
1613
+ err = ErrorResponse(
1614
+ error=ErrorInfo(
1615
+ message=message,
1616
+ type=HTTPStatus.BAD_REQUEST.phrase,
1617
+ code=HTTPStatus.BAD_REQUEST,
1618
+ )
1619
+ )
1620
+ return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST)
1621
+
1622
+ # Ensure --api-key option from CLI takes precedence over VLLM_API_KEY
1623
+ if tokens := [key for key in (args.api_key or [envs.VLLM_API_KEY]) if key]:
1624
+ app.add_middleware(AuthenticationMiddleware, tokens=tokens)
1625
+
1626
+ if args.enable_request_id_headers:
1627
+ app.add_middleware(XRequestIdMiddleware)
1628
+
1629
+ # Add scaling middleware to check for scaling state
1630
+ app.add_middleware(ScalingMiddleware)
1631
+
1632
+ if envs.VLLM_DEBUG_LOG_API_SERVER_RESPONSE:
1633
+ logger.warning(
1634
+ "CAUTION: Enabling log response in the API Server. "
1635
+ "This can include sensitive information and should be "
1636
+ "avoided in production."
1637
+ )
1638
+
1639
+ @app.middleware("http")
1640
+ async def log_response(request: Request, call_next):
1641
+ response = await call_next(request)
1642
+ response_body = [section async for section in response.body_iterator]
1643
+ response.body_iterator = iterate_in_threadpool(iter(response_body))
1644
+ # Check if this is a streaming response by looking at content-type
1645
+ content_type = response.headers.get("content-type", "")
1646
+ is_streaming = content_type == "text/event-stream; charset=utf-8"
1647
+
1648
+ # Log response body based on type
1649
+ if not response_body:
1650
+ logger.info("response_body={<empty>}")
1651
+ elif is_streaming:
1652
+ _log_streaming_response(response, response_body)
1653
+ else:
1654
+ _log_non_streaming_response(response_body)
1655
+ return response
1656
+
1657
+ for middleware in args.middleware:
1658
+ module_path, object_name = middleware.rsplit(".", 1)
1659
+ imported = getattr(importlib.import_module(module_path), object_name)
1660
+ if inspect.isclass(imported):
1661
+ app.add_middleware(imported) # type: ignore[arg-type]
1662
+ elif inspect.iscoroutinefunction(imported):
1663
+ app.middleware("http")(imported)
1664
+ else:
1665
+ raise ValueError(
1666
+ f"Invalid middleware {middleware}. Must be a function or a class."
1667
+ )
1668
+
1669
+ app = sagemaker_standards.bootstrap(app)
1670
+ # Optional endpoints
1671
+ if args.tokens_only:
1672
+
1673
+ @app.post("/abort_requests")
1674
+ async def abort_requests(raw_request: Request):
1675
+ """
1676
+ Abort one or more requests. To be used in a
1677
+ Disaggregated Everything setup.
1678
+ """
1679
+ try:
1680
+ body = await raw_request.json()
1681
+ except json.JSONDecodeError as e:
1682
+ raise HTTPException(
1683
+ status_code=HTTPStatus.BAD_REQUEST.value,
1684
+ detail=f"JSON decode error: {e}",
1685
+ ) from e
1686
+ request_ids = body.get("request_ids")
1687
+ if request_ids is None:
1688
+ raise HTTPException(
1689
+ status_code=HTTPStatus.BAD_REQUEST.value,
1690
+ detail="Missing 'request_ids' in request body",
1691
+ )
1692
+ # Abort requests in background
1693
+ asyncio.create_task(engine_client(raw_request).abort(request_ids))
1694
+ return Response(status_code=200)
1695
+
1696
+ return app
1697
+
1698
+
1699
+ async def init_app_state(
1700
+ engine_client: EngineClient,
1701
+ state: State,
1702
+ args: Namespace,
1703
+ ) -> None:
1704
+ vllm_config = engine_client.vllm_config
1705
+
1706
+ if args.served_model_name is not None:
1707
+ served_model_names = args.served_model_name
1708
+ else:
1709
+ served_model_names = [args.model]
1710
+
1711
+ if args.enable_log_requests:
1712
+ request_logger = RequestLogger(max_log_len=args.max_log_len)
1713
+ else:
1714
+ request_logger = None
1715
+
1716
+ base_model_paths = [
1717
+ BaseModelPath(name=name, model_path=args.model) for name in served_model_names
1718
+ ]
1719
+
1720
+ state.engine_client = engine_client
1721
+ state.log_stats = not args.disable_log_stats
1722
+ state.vllm_config = vllm_config
1723
+
1724
+ supported_tasks = await engine_client.get_supported_tasks()
1725
+ logger.info("Supported tasks: %s", supported_tasks)
1726
+
1727
+ resolved_chat_template = await process_chat_template(
1728
+ args.chat_template, engine_client, vllm_config.model_config
1729
+ )
1730
+
1731
+ if args.tool_server == "demo":
1732
+ tool_server: ToolServer | None = DemoToolServer()
1733
+ assert isinstance(tool_server, DemoToolServer)
1734
+ await tool_server.init_and_validate()
1735
+ elif args.tool_server:
1736
+ tool_server = MCPToolServer()
1737
+ await tool_server.add_tool_server(args.tool_server)
1738
+ else:
1739
+ tool_server = None
1740
+
1741
+ # Merge default_mm_loras into the static lora_modules
1742
+ default_mm_loras = (
1743
+ vllm_config.lora_config.default_mm_loras
1744
+ if vllm_config.lora_config is not None
1745
+ else {}
1746
+ )
1747
+
1748
+ default_mm_loras = (
1749
+ vllm_config.lora_config.default_mm_loras
1750
+ if vllm_config.lora_config is not None
1751
+ else {}
1752
+ )
1753
+ lora_modules = process_lora_modules(args.lora_modules, default_mm_loras)
1754
+
1755
+ state.openai_serving_models = OpenAIServingModels(
1756
+ engine_client=engine_client,
1757
+ base_model_paths=base_model_paths,
1758
+ lora_modules=lora_modules,
1759
+ )
1760
+ await state.openai_serving_models.init_static_loras()
1761
+ state.openai_serving_responses = (
1762
+ OpenAIServingResponses(
1763
+ engine_client,
1764
+ state.openai_serving_models,
1765
+ request_logger=request_logger,
1766
+ chat_template=resolved_chat_template,
1767
+ chat_template_content_format=args.chat_template_content_format,
1768
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1769
+ enable_auto_tools=args.enable_auto_tool_choice,
1770
+ tool_parser=args.tool_call_parser,
1771
+ tool_server=tool_server,
1772
+ reasoning_parser=args.structured_outputs_config.reasoning_parser,
1773
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1774
+ enable_force_include_usage=args.enable_force_include_usage,
1775
+ enable_log_outputs=args.enable_log_outputs,
1776
+ log_error_stack=args.log_error_stack,
1777
+ )
1778
+ if "generate" in supported_tasks
1779
+ else None
1780
+ )
1781
+ state.openai_serving_chat = (
1782
+ OpenAIServingChat(
1783
+ engine_client,
1784
+ state.openai_serving_models,
1785
+ args.response_role,
1786
+ request_logger=request_logger,
1787
+ chat_template=resolved_chat_template,
1788
+ chat_template_content_format=args.chat_template_content_format,
1789
+ trust_request_chat_template=args.trust_request_chat_template,
1790
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1791
+ enable_auto_tools=args.enable_auto_tool_choice,
1792
+ exclude_tools_when_tool_choice_none=args.exclude_tools_when_tool_choice_none,
1793
+ tool_parser=args.tool_call_parser,
1794
+ reasoning_parser=args.structured_outputs_config.reasoning_parser,
1795
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1796
+ enable_force_include_usage=args.enable_force_include_usage,
1797
+ enable_log_outputs=args.enable_log_outputs,
1798
+ log_error_stack=args.log_error_stack,
1799
+ )
1800
+ if "generate" in supported_tasks
1801
+ else None
1802
+ )
1803
+ state.openai_serving_completion = (
1804
+ OpenAIServingCompletion(
1805
+ engine_client,
1806
+ state.openai_serving_models,
1807
+ request_logger=request_logger,
1808
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1809
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1810
+ enable_force_include_usage=args.enable_force_include_usage,
1811
+ log_error_stack=args.log_error_stack,
1812
+ )
1813
+ if "generate" in supported_tasks
1814
+ else None
1815
+ )
1816
+ state.openai_serving_pooling = (
1817
+ (
1818
+ OpenAIServingPooling(
1819
+ engine_client,
1820
+ state.openai_serving_models,
1821
+ supported_tasks=supported_tasks,
1822
+ request_logger=request_logger,
1823
+ chat_template=resolved_chat_template,
1824
+ chat_template_content_format=args.chat_template_content_format,
1825
+ trust_request_chat_template=args.trust_request_chat_template,
1826
+ log_error_stack=args.log_error_stack,
1827
+ )
1828
+ )
1829
+ if any(task in POOLING_TASKS for task in supported_tasks)
1830
+ else None
1831
+ )
1832
+ state.openai_serving_embedding = (
1833
+ OpenAIServingEmbedding(
1834
+ engine_client,
1835
+ state.openai_serving_models,
1836
+ request_logger=request_logger,
1837
+ chat_template=resolved_chat_template,
1838
+ chat_template_content_format=args.chat_template_content_format,
1839
+ trust_request_chat_template=args.trust_request_chat_template,
1840
+ log_error_stack=args.log_error_stack,
1841
+ )
1842
+ if "embed" in supported_tasks
1843
+ else None
1844
+ )
1845
+ state.openai_serving_classification = (
1846
+ ServingClassification(
1847
+ engine_client,
1848
+ state.openai_serving_models,
1849
+ request_logger=request_logger,
1850
+ chat_template=resolved_chat_template,
1851
+ chat_template_content_format=args.chat_template_content_format,
1852
+ trust_request_chat_template=args.trust_request_chat_template,
1853
+ log_error_stack=args.log_error_stack,
1854
+ )
1855
+ if "classify" in supported_tasks
1856
+ else None
1857
+ )
1858
+ state.openai_serving_scores = (
1859
+ ServingScores(
1860
+ engine_client,
1861
+ state.openai_serving_models,
1862
+ request_logger=request_logger,
1863
+ log_error_stack=args.log_error_stack,
1864
+ )
1865
+ if ("embed" in supported_tasks or "score" in supported_tasks)
1866
+ else None
1867
+ )
1868
+ state.openai_serving_tokenization = OpenAIServingTokenization(
1869
+ engine_client,
1870
+ state.openai_serving_models,
1871
+ request_logger=request_logger,
1872
+ chat_template=resolved_chat_template,
1873
+ chat_template_content_format=args.chat_template_content_format,
1874
+ trust_request_chat_template=args.trust_request_chat_template,
1875
+ log_error_stack=args.log_error_stack,
1876
+ )
1877
+ state.openai_serving_transcription = (
1878
+ OpenAIServingTranscription(
1879
+ engine_client,
1880
+ state.openai_serving_models,
1881
+ request_logger=request_logger,
1882
+ log_error_stack=args.log_error_stack,
1883
+ enable_force_include_usage=args.enable_force_include_usage,
1884
+ )
1885
+ if "transcription" in supported_tasks
1886
+ else None
1887
+ )
1888
+ state.openai_serving_translation = (
1889
+ OpenAIServingTranslation(
1890
+ engine_client,
1891
+ state.openai_serving_models,
1892
+ request_logger=request_logger,
1893
+ log_error_stack=args.log_error_stack,
1894
+ enable_force_include_usage=args.enable_force_include_usage,
1895
+ )
1896
+ if "transcription" in supported_tasks
1897
+ else None
1898
+ )
1899
+ state.anthropic_serving_messages = (
1900
+ AnthropicServingMessages(
1901
+ engine_client,
1902
+ state.openai_serving_models,
1903
+ args.response_role,
1904
+ request_logger=request_logger,
1905
+ chat_template=resolved_chat_template,
1906
+ chat_template_content_format=args.chat_template_content_format,
1907
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1908
+ enable_auto_tools=args.enable_auto_tool_choice,
1909
+ tool_parser=args.tool_call_parser,
1910
+ reasoning_parser=args.structured_outputs_config.reasoning_parser,
1911
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1912
+ enable_force_include_usage=args.enable_force_include_usage,
1913
+ )
1914
+ if "generate" in supported_tasks
1915
+ else None
1916
+ )
1917
+ state.serving_tokens = (
1918
+ ServingTokens(
1919
+ engine_client,
1920
+ state.openai_serving_models,
1921
+ request_logger=request_logger,
1922
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1923
+ log_error_stack=args.log_error_stack,
1924
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1925
+ enable_log_outputs=args.enable_log_outputs,
1926
+ force_no_detokenize=args.tokens_only,
1927
+ )
1928
+ if "generate" in supported_tasks
1929
+ else None
1930
+ )
1931
+
1932
+ state.enable_server_load_tracking = args.enable_server_load_tracking
1933
+ state.server_load_metrics = 0
1934
+
1935
+
1936
+ def create_server_socket(addr: tuple[str, int]) -> socket.socket:
1937
+ family = socket.AF_INET
1938
+ if is_valid_ipv6_address(addr[0]):
1939
+ family = socket.AF_INET6
1940
+
1941
+ sock = socket.socket(family=family, type=socket.SOCK_STREAM)
1942
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1943
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1944
+ sock.bind(addr)
1945
+
1946
+ return sock
1947
+
1948
+
1949
+ def create_server_unix_socket(path: str) -> socket.socket:
1950
+ sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
1951
+ sock.bind(path)
1952
+ return sock
1953
+
1954
+
1955
+ def validate_api_server_args(args):
1956
+ valid_tool_parses = ToolParserManager.list_registered()
1957
+ if args.enable_auto_tool_choice and args.tool_call_parser not in valid_tool_parses:
1958
+ raise KeyError(
1959
+ f"invalid tool call parser: {args.tool_call_parser} "
1960
+ f"(chose from {{ {','.join(valid_tool_parses)} }})"
1961
+ )
1962
+
1963
+ valid_reasoning_parsers = ReasoningParserManager.list_registered()
1964
+ if (
1965
+ reasoning_parser := args.structured_outputs_config.reasoning_parser
1966
+ ) and reasoning_parser not in valid_reasoning_parsers:
1967
+ raise KeyError(
1968
+ f"invalid reasoning parser: {reasoning_parser} "
1969
+ f"(chose from {{ {','.join(valid_reasoning_parsers)} }})"
1970
+ )
1971
+
1972
+
1973
+ def setup_server(args):
1974
+ """Validate API server args, set up signal handler, create socket
1975
+ ready to serve."""
1976
+
1977
+ logger.info("vLLM API server version %s", VLLM_VERSION)
1978
+ log_non_default_args(args)
1979
+
1980
+ if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
1981
+ ToolParserManager.import_tool_parser(args.tool_parser_plugin)
1982
+
1983
+ if args.reasoning_parser_plugin and len(args.reasoning_parser_plugin) > 3:
1984
+ ReasoningParserManager.import_reasoning_parser(args.reasoning_parser_plugin)
1985
+
1986
+ validate_api_server_args(args)
1987
+
1988
+ # workaround to make sure that we bind the port before the engine is set up.
1989
+ # This avoids race conditions with ray.
1990
+ # see https://github.com/vllm-project/vllm/issues/8204
1991
+ if args.uds:
1992
+ sock = create_server_unix_socket(args.uds)
1993
+ else:
1994
+ sock_addr = (args.host or "", args.port)
1995
+ sock = create_server_socket(sock_addr)
1996
+
1997
+ # workaround to avoid footguns where uvicorn drops requests with too
1998
+ # many concurrent requests active
1999
+ set_ulimit()
2000
+
2001
+ def signal_handler(*_) -> None:
2002
+ # Interrupt server on sigterm while initializing
2003
+ raise KeyboardInterrupt("terminated")
2004
+
2005
+ signal.signal(signal.SIGTERM, signal_handler)
2006
+
2007
+ if args.uds:
2008
+ listen_address = f"unix:{args.uds}"
2009
+ else:
2010
+ addr, port = sock_addr
2011
+ is_ssl = args.ssl_keyfile and args.ssl_certfile
2012
+ host_part = f"[{addr}]" if is_valid_ipv6_address(addr) else addr or "0.0.0.0"
2013
+ listen_address = f"http{'s' if is_ssl else ''}://{host_part}:{port}"
2014
+ return listen_address, sock
2015
+
2016
+
2017
+ async def run_server(args, **uvicorn_kwargs) -> None:
2018
+ """Run a single-worker API server."""
2019
+
2020
+ # Add process-specific prefix to stdout and stderr.
2021
+ decorate_logs("APIServer")
2022
+
2023
+ listen_address, sock = setup_server(args)
2024
+ await run_server_worker(listen_address, sock, args, **uvicorn_kwargs)
2025
+
2026
+
2027
+ async def run_server_worker(
2028
+ listen_address, sock, args, client_config=None, **uvicorn_kwargs
2029
+ ) -> None:
2030
+ """Run a single API server worker."""
2031
+
2032
+ if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
2033
+ ToolParserManager.import_tool_parser(args.tool_parser_plugin)
2034
+
2035
+ if args.reasoning_parser_plugin and len(args.reasoning_parser_plugin) > 3:
2036
+ ReasoningParserManager.import_reasoning_parser(args.reasoning_parser_plugin)
2037
+
2038
+ # Load logging config for uvicorn if specified
2039
+ log_config = load_log_config(args.log_config_file)
2040
+ if log_config is not None:
2041
+ uvicorn_kwargs["log_config"] = log_config
2042
+
2043
+ async with build_async_engine_client(
2044
+ args,
2045
+ client_config=client_config,
2046
+ ) as engine_client:
2047
+ maybe_register_tokenizer_info_endpoint(args)
2048
+ app = build_app(args)
2049
+
2050
+ await init_app_state(engine_client, app.state, args)
2051
+
2052
+ logger.info(
2053
+ "Starting vLLM API server %d on %s",
2054
+ engine_client.vllm_config.parallel_config._api_process_rank,
2055
+ listen_address,
2056
+ )
2057
+ shutdown_task = await serve_http(
2058
+ app,
2059
+ sock=sock,
2060
+ enable_ssl_refresh=args.enable_ssl_refresh,
2061
+ host=args.host,
2062
+ port=args.port,
2063
+ log_level=args.uvicorn_log_level,
2064
+ # NOTE: When the 'disable_uvicorn_access_log' value is True,
2065
+ # no access log will be output.
2066
+ access_log=not args.disable_uvicorn_access_log,
2067
+ timeout_keep_alive=envs.VLLM_HTTP_TIMEOUT_KEEP_ALIVE,
2068
+ ssl_keyfile=args.ssl_keyfile,
2069
+ ssl_certfile=args.ssl_certfile,
2070
+ ssl_ca_certs=args.ssl_ca_certs,
2071
+ ssl_cert_reqs=args.ssl_cert_reqs,
2072
+ h11_max_incomplete_event_size=args.h11_max_incomplete_event_size,
2073
+ h11_max_header_count=args.h11_max_header_count,
2074
+ **uvicorn_kwargs,
2075
+ )
2076
+
2077
+ # NB: Await server shutdown only after the backend context is exited
2078
+ try:
2079
+ await shutdown_task
2080
+ finally:
2081
+ sock.close()
2082
+
2083
+
2084
+ if __name__ == "__main__":
2085
+ # NOTE(simon):
2086
+ # This section should be in sync with vllm/entrypoints/cli/main.py for CLI
2087
+ # entrypoints.
2088
+ cli_env_setup()
2089
+ parser = FlexibleArgumentParser(
2090
+ description="vLLM OpenAI-Compatible RESTful API server."
2091
+ )
2092
+ parser = make_arg_parser(parser)
2093
+ args = parser.parse_args()
2094
+ validate_parsed_serve_args(args)
2095
+
2096
+ uvloop.run(run_server(args))