vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1398) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2044 -0
  5. vllm/_ipex_ops.py +393 -0
  6. vllm/_version.py +34 -0
  7. vllm/assets/__init__.py +0 -0
  8. vllm/assets/audio.py +45 -0
  9. vllm/assets/base.py +41 -0
  10. vllm/assets/image.py +50 -0
  11. vllm/assets/video.py +145 -0
  12. vllm/attention/__init__.py +15 -0
  13. vllm/attention/backends/__init__.py +0 -0
  14. vllm/attention/backends/abstract.py +204 -0
  15. vllm/attention/backends/utils.py +33 -0
  16. vllm/attention/layer.py +645 -0
  17. vllm/attention/layers/__init__.py +0 -0
  18. vllm/attention/layers/chunked_local_attention.py +93 -0
  19. vllm/attention/layers/cross_attention.py +162 -0
  20. vllm/attention/layers/encoder_only_attention.py +86 -0
  21. vllm/attention/ops/__init__.py +0 -0
  22. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  23. vllm/attention/ops/common.py +345 -0
  24. vllm/attention/ops/flashmla.py +192 -0
  25. vllm/attention/ops/merge_attn_states.py +43 -0
  26. vllm/attention/ops/paged_attn.py +262 -0
  27. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  28. vllm/attention/ops/prefix_prefill.py +928 -0
  29. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  30. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  31. vllm/attention/ops/triton_decode_attention.py +691 -0
  32. vllm/attention/ops/triton_flash_attention.py +984 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
  35. vllm/attention/ops/triton_unified_attention.py +894 -0
  36. vllm/attention/selector.py +245 -0
  37. vllm/attention/utils/__init__.py +0 -0
  38. vllm/attention/utils/fa_utils.py +85 -0
  39. vllm/attention/utils/kv_sharing_utils.py +33 -0
  40. vllm/beam_search.py +87 -0
  41. vllm/benchmarks/__init__.py +0 -0
  42. vllm/benchmarks/datasets.py +2723 -0
  43. vllm/benchmarks/latency.py +170 -0
  44. vllm/benchmarks/lib/__init__.py +3 -0
  45. vllm/benchmarks/lib/endpoint_request_func.py +533 -0
  46. vllm/benchmarks/lib/ready_checker.py +73 -0
  47. vllm/benchmarks/lib/utils.py +80 -0
  48. vllm/benchmarks/serve.py +1358 -0
  49. vllm/benchmarks/throughput.py +696 -0
  50. vllm/collect_env.py +823 -0
  51. vllm/compilation/__init__.py +0 -0
  52. vllm/compilation/activation_quant_fusion.py +189 -0
  53. vllm/compilation/backends.py +650 -0
  54. vllm/compilation/base_static_graph.py +56 -0
  55. vllm/compilation/collective_fusion.py +1188 -0
  56. vllm/compilation/compiler_interface.py +573 -0
  57. vllm/compilation/counter.py +47 -0
  58. vllm/compilation/cuda_graph.py +199 -0
  59. vllm/compilation/cuda_piecewise_backend.py +117 -0
  60. vllm/compilation/decorators.py +400 -0
  61. vllm/compilation/fix_functionalization.py +205 -0
  62. vllm/compilation/fusion.py +383 -0
  63. vllm/compilation/fusion_attn.py +295 -0
  64. vllm/compilation/fx_utils.py +84 -0
  65. vllm/compilation/inductor_pass.py +136 -0
  66. vllm/compilation/monitor.py +57 -0
  67. vllm/compilation/noop_elimination.py +158 -0
  68. vllm/compilation/pass_manager.py +125 -0
  69. vllm/compilation/post_cleanup.py +20 -0
  70. vllm/compilation/sequence_parallelism.py +478 -0
  71. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  72. vllm/compilation/vllm_inductor_pass.py +156 -0
  73. vllm/compilation/wrapper.py +136 -0
  74. vllm/config/__init__.py +814 -0
  75. vllm/config/cache.py +220 -0
  76. vllm/config/compilation.py +673 -0
  77. vllm/config/device.py +74 -0
  78. vllm/config/kv_events.py +50 -0
  79. vllm/config/kv_transfer.py +111 -0
  80. vllm/config/load.py +113 -0
  81. vllm/config/lora.py +132 -0
  82. vllm/config/model.py +1912 -0
  83. vllm/config/multimodal.py +129 -0
  84. vllm/config/observability.py +99 -0
  85. vllm/config/parallel.py +524 -0
  86. vllm/config/pooler.py +97 -0
  87. vllm/config/scheduler.py +287 -0
  88. vllm/config/speculative.py +568 -0
  89. vllm/config/speech_to_text.py +39 -0
  90. vllm/config/structured_outputs.py +64 -0
  91. vllm/config/utils.py +145 -0
  92. vllm/connections.py +186 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +311 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +41 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +440 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +295 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +323 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
  106. vllm/distributed/device_communicators/pynccl.py +340 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +589 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +635 -0
  113. vllm/distributed/device_communicators/symm_mem.py +136 -0
  114. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  115. vllm/distributed/device_communicators/xpu_communicator.py +94 -0
  116. vllm/distributed/eplb/__init__.py +8 -0
  117. vllm/distributed/eplb/eplb_state.py +620 -0
  118. vllm/distributed/eplb/rebalance_algo.py +239 -0
  119. vllm/distributed/eplb/rebalance_execute.py +424 -0
  120. vllm/distributed/kv_events.py +362 -0
  121. vllm/distributed/kv_transfer/README.md +29 -0
  122. vllm/distributed/kv_transfer/__init__.py +13 -0
  123. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  124. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  125. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  126. vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
  132. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
  133. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
  134. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
  135. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
  140. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  141. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  142. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  144. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  145. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  146. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  147. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  148. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  149. vllm/distributed/parallel_state.py +1532 -0
  150. vllm/distributed/tpu_distributed_utils.py +178 -0
  151. vllm/distributed/utils.py +536 -0
  152. vllm/engine/__init__.py +0 -0
  153. vllm/engine/arg_utils.py +1778 -0
  154. vllm/engine/async_llm_engine.py +6 -0
  155. vllm/engine/llm_engine.py +6 -0
  156. vllm/engine/metrics.py +577 -0
  157. vllm/engine/metrics_types.py +84 -0
  158. vllm/engine/protocol.py +333 -0
  159. vllm/entrypoints/__init__.py +0 -0
  160. vllm/entrypoints/api_server.py +178 -0
  161. vllm/entrypoints/chat_utils.py +1705 -0
  162. vllm/entrypoints/cli/__init__.py +12 -0
  163. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  164. vllm/entrypoints/cli/benchmark/base.py +25 -0
  165. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  166. vllm/entrypoints/cli/benchmark/main.py +55 -0
  167. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  168. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  169. vllm/entrypoints/cli/collect_env.py +36 -0
  170. vllm/entrypoints/cli/main.py +60 -0
  171. vllm/entrypoints/cli/openai.py +233 -0
  172. vllm/entrypoints/cli/run_batch.py +67 -0
  173. vllm/entrypoints/cli/serve.py +232 -0
  174. vllm/entrypoints/cli/types.py +29 -0
  175. vllm/entrypoints/constants.py +10 -0
  176. vllm/entrypoints/context.py +481 -0
  177. vllm/entrypoints/harmony_utils.py +436 -0
  178. vllm/entrypoints/launcher.py +164 -0
  179. vllm/entrypoints/llm.py +1629 -0
  180. vllm/entrypoints/logger.py +79 -0
  181. vllm/entrypoints/openai/__init__.py +0 -0
  182. vllm/entrypoints/openai/api_server.py +1953 -0
  183. vllm/entrypoints/openai/cli_args.py +288 -0
  184. vllm/entrypoints/openai/logits_processors.py +90 -0
  185. vllm/entrypoints/openai/protocol.py +2757 -0
  186. vllm/entrypoints/openai/run_batch.py +491 -0
  187. vllm/entrypoints/openai/serving_chat.py +1597 -0
  188. vllm/entrypoints/openai/serving_classification.py +173 -0
  189. vllm/entrypoints/openai/serving_completion.py +692 -0
  190. vllm/entrypoints/openai/serving_embedding.py +631 -0
  191. vllm/entrypoints/openai/serving_engine.py +992 -0
  192. vllm/entrypoints/openai/serving_models.py +288 -0
  193. vllm/entrypoints/openai/serving_pooling.py +276 -0
  194. vllm/entrypoints/openai/serving_responses.py +1709 -0
  195. vllm/entrypoints/openai/serving_score.py +479 -0
  196. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  197. vllm/entrypoints/openai/serving_transcription.py +136 -0
  198. vllm/entrypoints/openai/speech_to_text.py +388 -0
  199. vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
  200. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  201. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  202. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  203. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  204. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  205. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  206. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
  207. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  208. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  209. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  210. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  211. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  212. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  213. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
  214. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  216. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
  217. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  218. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  219. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  220. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
  221. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  222. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  223. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  224. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  225. vllm/entrypoints/renderer.py +395 -0
  226. vllm/entrypoints/score_utils.py +232 -0
  227. vllm/entrypoints/ssl.py +75 -0
  228. vllm/entrypoints/tool.py +139 -0
  229. vllm/entrypoints/tool_server.py +206 -0
  230. vllm/entrypoints/utils.py +233 -0
  231. vllm/env_override.py +23 -0
  232. vllm/envs.py +1590 -0
  233. vllm/executor/__init__.py +0 -0
  234. vllm/executor/executor_base.py +381 -0
  235. vllm/executor/msgspec_utils.py +35 -0
  236. vllm/executor/ray_distributed_executor.py +699 -0
  237. vllm/executor/ray_utils.py +410 -0
  238. vllm/executor/uniproc_executor.py +176 -0
  239. vllm/forward_context.py +402 -0
  240. vllm/inputs/__init__.py +30 -0
  241. vllm/inputs/data.py +356 -0
  242. vllm/inputs/parse.py +151 -0
  243. vllm/inputs/preprocess.py +664 -0
  244. vllm/logger.py +229 -0
  245. vllm/logging_utils/__init__.py +10 -0
  246. vllm/logging_utils/dump_input.py +81 -0
  247. vllm/logging_utils/formatter.py +79 -0
  248. vllm/logging_utils/log_time.py +32 -0
  249. vllm/logits_process.py +119 -0
  250. vllm/logprobs.py +28 -0
  251. vllm/lora/__init__.py +0 -0
  252. vllm/lora/layers/__init__.py +34 -0
  253. vllm/lora/layers/base.py +69 -0
  254. vllm/lora/layers/base_linear.py +185 -0
  255. vllm/lora/layers/column_parallel_linear.py +609 -0
  256. vllm/lora/layers/logits_processor.py +247 -0
  257. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  258. vllm/lora/layers/replicated_linear.py +60 -0
  259. vllm/lora/layers/row_parallel_linear.py +196 -0
  260. vllm/lora/layers/utils.py +65 -0
  261. vllm/lora/layers/vocal_parallel_embedding.py +174 -0
  262. vllm/lora/lora_weights.py +199 -0
  263. vllm/lora/models.py +816 -0
  264. vllm/lora/ops/__init__.py +0 -0
  265. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  266. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  267. vllm/lora/ops/torch_ops/__init__.py +16 -0
  268. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  269. vllm/lora/ops/triton_ops/__init__.py +12 -0
  270. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  271. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  272. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  273. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  274. vllm/lora/ops/triton_ops/utils.py +126 -0
  275. vllm/lora/ops/xla_ops/__init__.py +7 -0
  276. vllm/lora/ops/xla_ops/lora_ops.py +144 -0
  277. vllm/lora/peft_helper.py +127 -0
  278. vllm/lora/punica_wrapper/__init__.py +10 -0
  279. vllm/lora/punica_wrapper/punica_base.py +458 -0
  280. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  281. vllm/lora/punica_wrapper/punica_gpu.py +272 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  284. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  285. vllm/lora/punica_wrapper/utils.py +136 -0
  286. vllm/lora/request.py +97 -0
  287. vllm/lora/resolver.py +85 -0
  288. vllm/lora/utils.py +246 -0
  289. vllm/lora/worker_manager.py +267 -0
  290. vllm/model_executor/__init__.py +12 -0
  291. vllm/model_executor/custom_op.py +194 -0
  292. vllm/model_executor/layers/__init__.py +0 -0
  293. vllm/model_executor/layers/activation.py +575 -0
  294. vllm/model_executor/layers/attention_layer_base.py +23 -0
  295. vllm/model_executor/layers/fla/__init__.py +8 -0
  296. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  297. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  298. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  299. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  300. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  301. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  302. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  303. vllm/model_executor/layers/fla/ops/index.py +39 -0
  304. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  305. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  306. vllm/model_executor/layers/fla/ops/op.py +39 -0
  307. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  308. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  309. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  310. vllm/model_executor/layers/fused_moe/__init__.py +89 -0
  311. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
  312. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
  313. vllm/model_executor/layers/fused_moe/config.py +804 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  545. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
  546. vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
  547. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
  548. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  549. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
  550. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
  551. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
  552. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
  553. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
  554. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
  555. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
  556. vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
  557. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
  558. vllm/model_executor/layers/fused_moe/layer.py +2195 -0
  559. vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
  560. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  561. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  562. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  563. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  564. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
  565. vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
  566. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
  567. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  568. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  569. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
  570. vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
  571. vllm/model_executor/layers/fused_moe/utils.py +274 -0
  572. vllm/model_executor/layers/layernorm.py +395 -0
  573. vllm/model_executor/layers/lightning_attn.py +661 -0
  574. vllm/model_executor/layers/linear.py +1603 -0
  575. vllm/model_executor/layers/logits_processor.py +106 -0
  576. vllm/model_executor/layers/mamba/__init__.py +0 -0
  577. vllm/model_executor/layers/mamba/abstract.py +42 -0
  578. vllm/model_executor/layers/mamba/linear_attn.py +403 -0
  579. vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
  580. vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
  581. vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
  582. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  583. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
  584. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  585. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  586. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
  587. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
  588. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
  589. vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
  590. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
  591. vllm/model_executor/layers/mamba/short_conv.py +253 -0
  592. vllm/model_executor/layers/mla.py +173 -0
  593. vllm/model_executor/layers/pooler.py +719 -0
  594. vllm/model_executor/layers/quantization/__init__.py +157 -0
  595. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  596. vllm/model_executor/layers/quantization/awq.py +228 -0
  597. vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
  598. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  599. vllm/model_executor/layers/quantization/base_config.py +170 -0
  600. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  601. vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
  602. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  603. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
  604. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
  605. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  606. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  607. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  608. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  609. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  610. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
  611. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  612. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  613. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  614. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
  615. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  616. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  625. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  626. vllm/model_executor/layers/quantization/experts_int8.py +223 -0
  627. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  628. vllm/model_executor/layers/quantization/fp8.py +1098 -0
  629. vllm/model_executor/layers/quantization/gguf.py +599 -0
  630. vllm/model_executor/layers/quantization/gptq.py +340 -0
  631. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  632. vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
  633. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  634. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  635. vllm/model_executor/layers/quantization/inc.py +61 -0
  636. vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
  637. vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
  638. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  639. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  640. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  641. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  642. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  643. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  644. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  645. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  646. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  647. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  648. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  649. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  650. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  651. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
  652. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  653. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  654. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  655. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  656. vllm/model_executor/layers/quantization/kv_cache.py +143 -0
  657. vllm/model_executor/layers/quantization/modelopt.py +1596 -0
  658. vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
  659. vllm/model_executor/layers/quantization/mxfp4.py +988 -0
  660. vllm/model_executor/layers/quantization/petit.py +306 -0
  661. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  662. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  663. vllm/model_executor/layers/quantization/quark/quark.py +432 -0
  664. vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
  665. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  666. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  667. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
  668. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  669. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  670. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  671. vllm/model_executor/layers/quantization/rtn.py +466 -0
  672. vllm/model_executor/layers/quantization/schema.py +86 -0
  673. vllm/model_executor/layers/quantization/torchao.py +214 -0
  674. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  675. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  676. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  677. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  889. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
  890. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
  891. vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
  892. vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
  893. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  894. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  895. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  896. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  897. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  898. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  899. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  900. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  901. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
  902. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  903. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  904. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  905. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  906. vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
  907. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  908. vllm/model_executor/layers/resampler.py +270 -0
  909. vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
  910. vllm/model_executor/layers/rotary_embedding/base.py +177 -0
  911. vllm/model_executor/layers/rotary_embedding/common.py +150 -0
  912. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
  913. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  914. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  915. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  916. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  917. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  918. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  919. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  920. vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
  921. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  922. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  923. vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
  924. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  925. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  926. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  927. vllm/model_executor/layers/utils.py +195 -0
  928. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  929. vllm/model_executor/model_loader/__init__.py +138 -0
  930. vllm/model_executor/model_loader/base_loader.py +52 -0
  931. vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
  932. vllm/model_executor/model_loader/default_loader.py +277 -0
  933. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  934. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  935. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  936. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  937. vllm/model_executor/model_loader/tensorizer.py +738 -0
  938. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  939. vllm/model_executor/model_loader/tpu.py +114 -0
  940. vllm/model_executor/model_loader/utils.py +292 -0
  941. vllm/model_executor/model_loader/weight_utils.py +990 -0
  942. vllm/model_executor/models/__init__.py +33 -0
  943. vllm/model_executor/models/adapters.py +542 -0
  944. vllm/model_executor/models/aimv2.py +246 -0
  945. vllm/model_executor/models/apertus.py +579 -0
  946. vllm/model_executor/models/arcee.py +422 -0
  947. vllm/model_executor/models/arctic.py +558 -0
  948. vllm/model_executor/models/aria.py +650 -0
  949. vllm/model_executor/models/aya_vision.py +468 -0
  950. vllm/model_executor/models/baichuan.py +474 -0
  951. vllm/model_executor/models/bailing_moe.py +642 -0
  952. vllm/model_executor/models/bamba.py +514 -0
  953. vllm/model_executor/models/bert.py +665 -0
  954. vllm/model_executor/models/bert_with_rope.py +687 -0
  955. vllm/model_executor/models/blip.py +339 -0
  956. vllm/model_executor/models/blip2.py +712 -0
  957. vllm/model_executor/models/bloom.py +374 -0
  958. vllm/model_executor/models/chameleon.py +1139 -0
  959. vllm/model_executor/models/chatglm.py +476 -0
  960. vllm/model_executor/models/clip.py +407 -0
  961. vllm/model_executor/models/cohere2_vision.py +481 -0
  962. vllm/model_executor/models/commandr.py +465 -0
  963. vllm/model_executor/models/config.py +445 -0
  964. vllm/model_executor/models/dbrx.py +471 -0
  965. vllm/model_executor/models/deepseek.py +497 -0
  966. vllm/model_executor/models/deepseek_eagle.py +240 -0
  967. vllm/model_executor/models/deepseek_mtp.py +289 -0
  968. vllm/model_executor/models/deepseek_v2.py +1444 -0
  969. vllm/model_executor/models/deepseek_vl2.py +658 -0
  970. vllm/model_executor/models/dots1.py +546 -0
  971. vllm/model_executor/models/dots_ocr.py +873 -0
  972. vllm/model_executor/models/ernie45.py +43 -0
  973. vllm/model_executor/models/ernie45_moe.py +607 -0
  974. vllm/model_executor/models/ernie45_vl.py +1527 -0
  975. vllm/model_executor/models/ernie45_vl_moe.py +727 -0
  976. vllm/model_executor/models/ernie_mtp.py +268 -0
  977. vllm/model_executor/models/exaone.py +550 -0
  978. vllm/model_executor/models/exaone4.py +533 -0
  979. vllm/model_executor/models/fairseq2_llama.py +154 -0
  980. vllm/model_executor/models/falcon.py +509 -0
  981. vllm/model_executor/models/falcon_h1.py +674 -0
  982. vllm/model_executor/models/fuyu.py +399 -0
  983. vllm/model_executor/models/gemma.py +425 -0
  984. vllm/model_executor/models/gemma2.py +422 -0
  985. vllm/model_executor/models/gemma3.py +555 -0
  986. vllm/model_executor/models/gemma3_mm.py +721 -0
  987. vllm/model_executor/models/gemma3n.py +1113 -0
  988. vllm/model_executor/models/gemma3n_mm.py +761 -0
  989. vllm/model_executor/models/glm.py +23 -0
  990. vllm/model_executor/models/glm4.py +304 -0
  991. vllm/model_executor/models/glm4_1v.py +1690 -0
  992. vllm/model_executor/models/glm4_moe.py +727 -0
  993. vllm/model_executor/models/glm4_moe_mtp.py +301 -0
  994. vllm/model_executor/models/glm4v.py +654 -0
  995. vllm/model_executor/models/gpt2.py +380 -0
  996. vllm/model_executor/models/gpt_bigcode.py +344 -0
  997. vllm/model_executor/models/gpt_j.py +339 -0
  998. vllm/model_executor/models/gpt_neox.py +330 -0
  999. vllm/model_executor/models/gpt_oss.py +712 -0
  1000. vllm/model_executor/models/granite.py +489 -0
  1001. vllm/model_executor/models/granite_speech.py +794 -0
  1002. vllm/model_executor/models/granitemoe.py +550 -0
  1003. vllm/model_executor/models/granitemoehybrid.py +614 -0
  1004. vllm/model_executor/models/granitemoeshared.py +332 -0
  1005. vllm/model_executor/models/gritlm.py +262 -0
  1006. vllm/model_executor/models/grok1.py +547 -0
  1007. vllm/model_executor/models/h2ovl.py +536 -0
  1008. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1009. vllm/model_executor/models/hyperclovax_vision.py +1192 -0
  1010. vllm/model_executor/models/idefics2_vision_model.py +417 -0
  1011. vllm/model_executor/models/idefics3.py +756 -0
  1012. vllm/model_executor/models/interfaces.py +959 -0
  1013. vllm/model_executor/models/interfaces_base.py +192 -0
  1014. vllm/model_executor/models/intern_vit.py +441 -0
  1015. vllm/model_executor/models/internlm2.py +450 -0
  1016. vllm/model_executor/models/internlm2_ve.py +148 -0
  1017. vllm/model_executor/models/interns1.py +838 -0
  1018. vllm/model_executor/models/interns1_vit.py +418 -0
  1019. vllm/model_executor/models/internvl.py +1423 -0
  1020. vllm/model_executor/models/jais.py +373 -0
  1021. vllm/model_executor/models/jamba.py +591 -0
  1022. vllm/model_executor/models/jina_vl.py +144 -0
  1023. vllm/model_executor/models/keye.py +1680 -0
  1024. vllm/model_executor/models/keye_vl1_5.py +602 -0
  1025. vllm/model_executor/models/kimi_vl.py +618 -0
  1026. vllm/model_executor/models/lfm2.py +548 -0
  1027. vllm/model_executor/models/llama.py +669 -0
  1028. vllm/model_executor/models/llama4.py +746 -0
  1029. vllm/model_executor/models/llama4_eagle.py +239 -0
  1030. vllm/model_executor/models/llama_eagle.py +179 -0
  1031. vllm/model_executor/models/llama_eagle3.py +296 -0
  1032. vllm/model_executor/models/llava.py +870 -0
  1033. vllm/model_executor/models/llava_next.py +571 -0
  1034. vllm/model_executor/models/llava_next_video.py +476 -0
  1035. vllm/model_executor/models/llava_onevision.py +942 -0
  1036. vllm/model_executor/models/longcat_flash.py +715 -0
  1037. vllm/model_executor/models/longcat_flash_mtp.py +352 -0
  1038. vllm/model_executor/models/mamba.py +275 -0
  1039. vllm/model_executor/models/mamba2.py +291 -0
  1040. vllm/model_executor/models/medusa.py +169 -0
  1041. vllm/model_executor/models/midashenglm.py +792 -0
  1042. vllm/model_executor/models/mimo.py +188 -0
  1043. vllm/model_executor/models/mimo_mtp.py +280 -0
  1044. vllm/model_executor/models/minicpm.py +631 -0
  1045. vllm/model_executor/models/minicpm3.py +230 -0
  1046. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1047. vllm/model_executor/models/minicpmo.py +770 -0
  1048. vllm/model_executor/models/minicpmv.py +1784 -0
  1049. vllm/model_executor/models/minimax_text_01.py +986 -0
  1050. vllm/model_executor/models/minimax_vl_01.py +426 -0
  1051. vllm/model_executor/models/mistral3.py +628 -0
  1052. vllm/model_executor/models/mixtral.py +606 -0
  1053. vllm/model_executor/models/mllama4.py +1076 -0
  1054. vllm/model_executor/models/mlp_speculator.py +206 -0
  1055. vllm/model_executor/models/modernbert.py +374 -0
  1056. vllm/model_executor/models/module_mapping.py +72 -0
  1057. vllm/model_executor/models/molmo.py +1567 -0
  1058. vllm/model_executor/models/moonvit.py +673 -0
  1059. vllm/model_executor/models/motif.py +345 -0
  1060. vllm/model_executor/models/mpt.py +329 -0
  1061. vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
  1062. vllm/model_executor/models/nemotron.py +507 -0
  1063. vllm/model_executor/models/nemotron_h.py +565 -0
  1064. vllm/model_executor/models/nemotron_nas.py +481 -0
  1065. vllm/model_executor/models/nemotron_vl.py +652 -0
  1066. vllm/model_executor/models/nvlm_d.py +203 -0
  1067. vllm/model_executor/models/olmo.py +404 -0
  1068. vllm/model_executor/models/olmo2.py +439 -0
  1069. vllm/model_executor/models/olmoe.py +483 -0
  1070. vllm/model_executor/models/opt.py +412 -0
  1071. vllm/model_executor/models/orion.py +348 -0
  1072. vllm/model_executor/models/ovis.py +559 -0
  1073. vllm/model_executor/models/ovis2_5.py +642 -0
  1074. vllm/model_executor/models/paligemma.py +411 -0
  1075. vllm/model_executor/models/persimmon.py +343 -0
  1076. vllm/model_executor/models/phi.py +356 -0
  1077. vllm/model_executor/models/phi3.py +19 -0
  1078. vllm/model_executor/models/phi3v.py +698 -0
  1079. vllm/model_executor/models/phi4_multimodal.py +1475 -0
  1080. vllm/model_executor/models/phi4mm.py +1279 -0
  1081. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1082. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1083. vllm/model_executor/models/phimoe.py +679 -0
  1084. vllm/model_executor/models/pixtral.py +1345 -0
  1085. vllm/model_executor/models/plamo2.py +978 -0
  1086. vllm/model_executor/models/qwen.py +361 -0
  1087. vllm/model_executor/models/qwen2.py +523 -0
  1088. vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
  1089. vllm/model_executor/models/qwen2_5_vl.py +1481 -0
  1090. vllm/model_executor/models/qwen2_audio.py +489 -0
  1091. vllm/model_executor/models/qwen2_moe.py +558 -0
  1092. vllm/model_executor/models/qwen2_rm.py +122 -0
  1093. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1094. vllm/model_executor/models/qwen3.py +341 -0
  1095. vllm/model_executor/models/qwen3_moe.py +692 -0
  1096. vllm/model_executor/models/qwen3_next.py +1266 -0
  1097. vllm/model_executor/models/qwen3_next_mtp.py +281 -0
  1098. vllm/model_executor/models/qwen3_vl.py +1613 -0
  1099. vllm/model_executor/models/qwen3_vl_moe.py +358 -0
  1100. vllm/model_executor/models/qwen_vl.py +795 -0
  1101. vllm/model_executor/models/radio.py +576 -0
  1102. vllm/model_executor/models/registry.py +990 -0
  1103. vllm/model_executor/models/roberta.py +252 -0
  1104. vllm/model_executor/models/rvl.py +103 -0
  1105. vllm/model_executor/models/seed_oss.py +485 -0
  1106. vllm/model_executor/models/siglip.py +540 -0
  1107. vllm/model_executor/models/siglip2navit.py +689 -0
  1108. vllm/model_executor/models/skyworkr1v.py +911 -0
  1109. vllm/model_executor/models/smolvlm.py +44 -0
  1110. vllm/model_executor/models/solar.py +504 -0
  1111. vllm/model_executor/models/stablelm.py +341 -0
  1112. vllm/model_executor/models/starcoder2.py +354 -0
  1113. vllm/model_executor/models/step3_text.py +510 -0
  1114. vllm/model_executor/models/step3_vl.py +1072 -0
  1115. vllm/model_executor/models/swin.py +475 -0
  1116. vllm/model_executor/models/tarsier.py +639 -0
  1117. vllm/model_executor/models/telechat2.py +151 -0
  1118. vllm/model_executor/models/teleflm.py +79 -0
  1119. vllm/model_executor/models/terratorch.py +294 -0
  1120. vllm/model_executor/models/transformers.py +948 -0
  1121. vllm/model_executor/models/ultravox.py +654 -0
  1122. vllm/model_executor/models/utils.py +808 -0
  1123. vllm/model_executor/models/vision.py +404 -0
  1124. vllm/model_executor/models/voxtral.py +786 -0
  1125. vllm/model_executor/models/whisper.py +963 -0
  1126. vllm/model_executor/models/zamba2.py +960 -0
  1127. vllm/model_executor/parameter.py +620 -0
  1128. vllm/model_executor/utils.py +86 -0
  1129. vllm/model_executor/warmup/__init__.py +0 -0
  1130. vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
  1131. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1132. vllm/multimodal/__init__.py +33 -0
  1133. vllm/multimodal/audio.py +116 -0
  1134. vllm/multimodal/base.py +27 -0
  1135. vllm/multimodal/cache.py +697 -0
  1136. vllm/multimodal/evs.py +273 -0
  1137. vllm/multimodal/hasher.py +102 -0
  1138. vllm/multimodal/image.py +130 -0
  1139. vllm/multimodal/inputs.py +987 -0
  1140. vllm/multimodal/parse.py +511 -0
  1141. vllm/multimodal/processing.py +2148 -0
  1142. vllm/multimodal/profiling.py +284 -0
  1143. vllm/multimodal/registry.py +345 -0
  1144. vllm/multimodal/utils.py +503 -0
  1145. vllm/multimodal/video.py +319 -0
  1146. vllm/outputs.py +324 -0
  1147. vllm/platforms/__init__.py +263 -0
  1148. vllm/platforms/cpu.py +340 -0
  1149. vllm/platforms/cuda.py +668 -0
  1150. vllm/platforms/interface.py +620 -0
  1151. vllm/platforms/rocm.py +497 -0
  1152. vllm/platforms/tpu.py +233 -0
  1153. vllm/platforms/xpu.py +243 -0
  1154. vllm/plugins/__init__.py +72 -0
  1155. vllm/plugins/io_processors/__init__.py +68 -0
  1156. vllm/plugins/io_processors/interface.py +67 -0
  1157. vllm/plugins/lora_resolvers/README.md +16 -0
  1158. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1159. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1160. vllm/pooling_params.py +191 -0
  1161. vllm/profiler/__init__.py +0 -0
  1162. vllm/profiler/layerwise_profile.py +375 -0
  1163. vllm/profiler/utils.py +148 -0
  1164. vllm/py.typed +2 -0
  1165. vllm/ray/__init__.py +0 -0
  1166. vllm/ray/lazy_utils.py +22 -0
  1167. vllm/ray/ray_env.py +72 -0
  1168. vllm/reasoning/__init__.py +29 -0
  1169. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1170. vllm/reasoning/basic_parsers.py +156 -0
  1171. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1172. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1173. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1174. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1175. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1176. vllm/reasoning/mistral_reasoning_parser.py +56 -0
  1177. vllm/reasoning/qwen3_reasoning_parser.py +72 -0
  1178. vllm/reasoning/seedoss_reasoning_parser.py +28 -0
  1179. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1180. vllm/sampling_params.py +593 -0
  1181. vllm/scalar_type.py +349 -0
  1182. vllm/scripts.py +15 -0
  1183. vllm/sequence.py +103 -0
  1184. vllm/tasks.py +11 -0
  1185. vllm/test_utils.py +129 -0
  1186. vllm/third_party/__init__.py +0 -0
  1187. vllm/third_party/pynvml.py +6140 -0
  1188. vllm/tracing.py +136 -0
  1189. vllm/transformers_utils/__init__.py +24 -0
  1190. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1191. vllm/transformers_utils/chat_templates/registry.py +70 -0
  1192. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1193. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1194. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1195. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1196. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1197. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1198. vllm/transformers_utils/config.py +1102 -0
  1199. vllm/transformers_utils/config_parser_base.py +20 -0
  1200. vllm/transformers_utils/configs/__init__.py +63 -0
  1201. vllm/transformers_utils/configs/arctic.py +207 -0
  1202. vllm/transformers_utils/configs/chatglm.py +72 -0
  1203. vllm/transformers_utils/configs/deepseek_v3.py +101 -0
  1204. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1205. vllm/transformers_utils/configs/dotsocr.py +69 -0
  1206. vllm/transformers_utils/configs/eagle.py +84 -0
  1207. vllm/transformers_utils/configs/falcon.py +90 -0
  1208. vllm/transformers_utils/configs/jais.py +237 -0
  1209. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1210. vllm/transformers_utils/configs/medusa.py +63 -0
  1211. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1212. vllm/transformers_utils/configs/mistral.py +165 -0
  1213. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1214. vllm/transformers_utils/configs/moonvit.py +33 -0
  1215. vllm/transformers_utils/configs/nemotron.py +205 -0
  1216. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1217. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1218. vllm/transformers_utils/configs/olmo3.py +80 -0
  1219. vllm/transformers_utils/configs/ovis.py +176 -0
  1220. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1221. vllm/transformers_utils/configs/radio.py +91 -0
  1222. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1223. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1224. vllm/transformers_utils/configs/speculators/base.py +111 -0
  1225. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1226. vllm/transformers_utils/configs/ultravox.py +116 -0
  1227. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1228. vllm/transformers_utils/dynamic_module.py +60 -0
  1229. vllm/transformers_utils/processor.py +299 -0
  1230. vllm/transformers_utils/processors/__init__.py +16 -0
  1231. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1232. vllm/transformers_utils/processors/ovis.py +420 -0
  1233. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1234. vllm/transformers_utils/runai_utils.py +104 -0
  1235. vllm/transformers_utils/s3_utils.py +93 -0
  1236. vllm/transformers_utils/tokenizer.py +292 -0
  1237. vllm/transformers_utils/tokenizer_base.py +154 -0
  1238. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1239. vllm/transformers_utils/tokenizers/mistral.py +521 -0
  1240. vllm/transformers_utils/utils.py +108 -0
  1241. vllm/triton_utils/__init__.py +16 -0
  1242. vllm/triton_utils/importing.py +96 -0
  1243. vllm/usage/__init__.py +0 -0
  1244. vllm/usage/usage_lib.py +259 -0
  1245. vllm/utils/__init__.py +3566 -0
  1246. vllm/utils/deep_gemm.py +319 -0
  1247. vllm/utils/flashinfer.py +443 -0
  1248. vllm/utils/jsontree.py +178 -0
  1249. vllm/utils/tensor_schema.py +235 -0
  1250. vllm/v1/__init__.py +0 -0
  1251. vllm/v1/attention/__init__.py +0 -0
  1252. vllm/v1/attention/backends/__init__.py +0 -0
  1253. vllm/v1/attention/backends/cpu_attn.py +919 -0
  1254. vllm/v1/attention/backends/flash_attn.py +795 -0
  1255. vllm/v1/attention/backends/flashinfer.py +1181 -0
  1256. vllm/v1/attention/backends/flex_attention.py +861 -0
  1257. vllm/v1/attention/backends/gdn_attn.py +332 -0
  1258. vllm/v1/attention/backends/linear_attn.py +67 -0
  1259. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1260. vllm/v1/attention/backends/mamba2_attn.py +232 -0
  1261. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1262. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1263. vllm/v1/attention/backends/mla/common.py +1783 -0
  1264. vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
  1265. vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
  1266. vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
  1267. vllm/v1/attention/backends/mla/flashmla.py +203 -0
  1268. vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
  1269. vllm/v1/attention/backends/mla/indexer.py +342 -0
  1270. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1271. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1272. vllm/v1/attention/backends/pallas.py +409 -0
  1273. vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
  1274. vllm/v1/attention/backends/rocm_attn.py +426 -0
  1275. vllm/v1/attention/backends/short_conv_attn.py +94 -0
  1276. vllm/v1/attention/backends/tree_attn.py +451 -0
  1277. vllm/v1/attention/backends/triton_attn.py +361 -0
  1278. vllm/v1/attention/backends/utils.py +990 -0
  1279. vllm/v1/attention/backends/xformers.py +438 -0
  1280. vllm/v1/core/__init__.py +0 -0
  1281. vllm/v1/core/block_pool.py +416 -0
  1282. vllm/v1/core/encoder_cache_manager.py +333 -0
  1283. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1284. vllm/v1/core/kv_cache_manager.py +399 -0
  1285. vllm/v1/core/kv_cache_utils.py +1291 -0
  1286. vllm/v1/core/sched/__init__.py +0 -0
  1287. vllm/v1/core/sched/async_scheduler.py +47 -0
  1288. vllm/v1/core/sched/interface.py +158 -0
  1289. vllm/v1/core/sched/output.py +166 -0
  1290. vllm/v1/core/sched/request_queue.py +224 -0
  1291. vllm/v1/core/sched/scheduler.py +1296 -0
  1292. vllm/v1/core/sched/utils.py +69 -0
  1293. vllm/v1/core/single_type_kv_cache_manager.py +671 -0
  1294. vllm/v1/cudagraph_dispatcher.py +125 -0
  1295. vllm/v1/engine/__init__.py +203 -0
  1296. vllm/v1/engine/async_llm.py +742 -0
  1297. vllm/v1/engine/coordinator.py +357 -0
  1298. vllm/v1/engine/core.py +1235 -0
  1299. vllm/v1/engine/core_client.py +1334 -0
  1300. vllm/v1/engine/detokenizer.py +349 -0
  1301. vllm/v1/engine/exceptions.py +17 -0
  1302. vllm/v1/engine/llm_engine.py +370 -0
  1303. vllm/v1/engine/logprobs.py +201 -0
  1304. vllm/v1/engine/output_processor.py +576 -0
  1305. vllm/v1/engine/parallel_sampling.py +133 -0
  1306. vllm/v1/engine/processor.py +545 -0
  1307. vllm/v1/engine/utils.py +860 -0
  1308. vllm/v1/executor/__init__.py +0 -0
  1309. vllm/v1/executor/abstract.py +137 -0
  1310. vllm/v1/executor/multiproc_executor.py +726 -0
  1311. vllm/v1/executor/ray_distributed_executor.py +108 -0
  1312. vllm/v1/executor/utils.py +23 -0
  1313. vllm/v1/kv_cache_interface.py +375 -0
  1314. vllm/v1/kv_offload/__init__.py +0 -0
  1315. vllm/v1/kv_offload/abstract.py +165 -0
  1316. vllm/v1/kv_offload/backend.py +96 -0
  1317. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1318. vllm/v1/kv_offload/backends/cpu.py +61 -0
  1319. vllm/v1/kv_offload/cpu.py +75 -0
  1320. vllm/v1/kv_offload/factory.py +56 -0
  1321. vllm/v1/kv_offload/lru_manager.py +132 -0
  1322. vllm/v1/kv_offload/mediums.py +39 -0
  1323. vllm/v1/kv_offload/spec.py +61 -0
  1324. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1325. vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
  1326. vllm/v1/kv_offload/worker/worker.py +142 -0
  1327. vllm/v1/metrics/__init__.py +0 -0
  1328. vllm/v1/metrics/loggers.py +741 -0
  1329. vllm/v1/metrics/prometheus.py +82 -0
  1330. vllm/v1/metrics/ray_wrappers.py +152 -0
  1331. vllm/v1/metrics/reader.py +246 -0
  1332. vllm/v1/metrics/stats.py +257 -0
  1333. vllm/v1/outputs.py +161 -0
  1334. vllm/v1/pool/__init__.py +0 -0
  1335. vllm/v1/pool/metadata.py +77 -0
  1336. vllm/v1/request.py +241 -0
  1337. vllm/v1/sample/__init__.py +0 -0
  1338. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1339. vllm/v1/sample/logits_processor/builtin.py +275 -0
  1340. vllm/v1/sample/logits_processor/interface.py +97 -0
  1341. vllm/v1/sample/logits_processor/state.py +161 -0
  1342. vllm/v1/sample/metadata.py +43 -0
  1343. vllm/v1/sample/ops/__init__.py +0 -0
  1344. vllm/v1/sample/ops/bad_words.py +39 -0
  1345. vllm/v1/sample/ops/logprobs.py +26 -0
  1346. vllm/v1/sample/ops/penalties.py +43 -0
  1347. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1348. vllm/v1/sample/rejection_sampler.py +623 -0
  1349. vllm/v1/sample/sampler.py +285 -0
  1350. vllm/v1/sample/tpu/__init__.py +0 -0
  1351. vllm/v1/sample/tpu/metadata.py +124 -0
  1352. vllm/v1/sample/tpu/sampler.py +213 -0
  1353. vllm/v1/serial_utils.py +423 -0
  1354. vllm/v1/spec_decode/__init__.py +0 -0
  1355. vllm/v1/spec_decode/eagle.py +1011 -0
  1356. vllm/v1/spec_decode/medusa.py +66 -0
  1357. vllm/v1/spec_decode/metadata.py +62 -0
  1358. vllm/v1/spec_decode/metrics.py +211 -0
  1359. vllm/v1/spec_decode/ngram_proposer.py +276 -0
  1360. vllm/v1/spec_decode/utils.py +14 -0
  1361. vllm/v1/structured_output/__init__.py +295 -0
  1362. vllm/v1/structured_output/backend_guidance.py +245 -0
  1363. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1364. vllm/v1/structured_output/backend_outlines.py +320 -0
  1365. vllm/v1/structured_output/backend_types.py +134 -0
  1366. vllm/v1/structured_output/backend_xgrammar.py +327 -0
  1367. vllm/v1/structured_output/request.py +86 -0
  1368. vllm/v1/structured_output/utils.py +454 -0
  1369. vllm/v1/utils.py +396 -0
  1370. vllm/v1/worker/__init__.py +0 -0
  1371. vllm/v1/worker/block_table.py +210 -0
  1372. vllm/v1/worker/cpu_model_runner.py +175 -0
  1373. vllm/v1/worker/cpu_worker.py +156 -0
  1374. vllm/v1/worker/gpu_input_batch.py +863 -0
  1375. vllm/v1/worker/gpu_model_runner.py +4160 -0
  1376. vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
  1377. vllm/v1/worker/gpu_worker.py +710 -0
  1378. vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
  1379. vllm/v1/worker/lora_model_runner_mixin.py +183 -0
  1380. vllm/v1/worker/tpu_input_batch.py +587 -0
  1381. vllm/v1/worker/tpu_model_runner.py +1946 -0
  1382. vllm/v1/worker/tpu_worker.py +346 -0
  1383. vllm/v1/worker/ubatch_splitting.py +192 -0
  1384. vllm/v1/worker/ubatch_utils.py +27 -0
  1385. vllm/v1/worker/ubatching.py +224 -0
  1386. vllm/v1/worker/utils.py +344 -0
  1387. vllm/v1/worker/worker_base.py +65 -0
  1388. vllm/v1/worker/xpu_model_runner.py +57 -0
  1389. vllm/v1/worker/xpu_worker.py +179 -0
  1390. vllm/version.py +41 -0
  1391. vllm/vllm_flash_attn/.gitkeep +0 -0
  1392. vllm/worker/__init__.py +0 -0
  1393. vllm/worker/worker_base.py +279 -0
  1394. vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
  1395. vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
  1396. vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
  1397. vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
  1398. vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1953 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import asyncio
5
+ import gc
6
+ import hashlib
7
+ import importlib
8
+ import inspect
9
+ import json
10
+ import multiprocessing
11
+ import multiprocessing.forkserver as forkserver
12
+ import os
13
+ import secrets
14
+ import signal
15
+ import socket
16
+ import tempfile
17
+ import uuid
18
+ from argparse import Namespace
19
+ from collections.abc import AsyncGenerator, AsyncIterator, Awaitable
20
+ from contextlib import asynccontextmanager
21
+ from http import HTTPStatus
22
+ from typing import Annotated, Any, Callable, Literal, Optional
23
+
24
+ import prometheus_client
25
+ import pydantic
26
+ import regex as re
27
+ import uvloop
28
+ from fastapi import (APIRouter, Depends, FastAPI, Form, HTTPException, Query,
29
+ Request)
30
+ from fastapi.exceptions import RequestValidationError
31
+ from fastapi.middleware.cors import CORSMiddleware
32
+ from fastapi.responses import JSONResponse, Response, StreamingResponse
33
+ from prometheus_client import make_asgi_app
34
+ from prometheus_fastapi_instrumentator import Instrumentator
35
+ from starlette.concurrency import iterate_in_threadpool
36
+ from starlette.datastructures import URL, Headers, MutableHeaders, State
37
+ from starlette.routing import Mount
38
+ from starlette.types import ASGIApp, Message, Receive, Scope, Send
39
+ from typing_extensions import assert_never
40
+
41
+ import vllm.envs as envs
42
+ from vllm.config import VllmConfig
43
+ from vllm.engine.arg_utils import AsyncEngineArgs
44
+ from vllm.engine.protocol import EngineClient
45
+ from vllm.entrypoints.chat_utils import (load_chat_template,
46
+ resolve_hf_chat_template,
47
+ resolve_mistral_chat_template)
48
+ from vllm.entrypoints.launcher import serve_http
49
+ from vllm.entrypoints.logger import RequestLogger
50
+ from vllm.entrypoints.openai.cli_args import (make_arg_parser,
51
+ validate_parsed_serve_args)
52
+ # yapf conflicts with isort for this block
53
+ # yapf: disable
54
+ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
55
+ ChatCompletionResponse,
56
+ ClassificationRequest,
57
+ ClassificationResponse,
58
+ CompletionRequest,
59
+ CompletionResponse,
60
+ DetokenizeRequest,
61
+ DetokenizeResponse,
62
+ EmbeddingRequest,
63
+ EmbeddingResponse, ErrorInfo,
64
+ ErrorResponse,
65
+ IOProcessorResponse,
66
+ LoadLoRAAdapterRequest,
67
+ PoolingRequest, PoolingResponse,
68
+ RerankRequest, RerankResponse,
69
+ ResponsesRequest,
70
+ ResponsesResponse, ScoreRequest,
71
+ ScoreResponse,
72
+ StreamingResponsesResponse,
73
+ TokenizeRequest,
74
+ TokenizeResponse,
75
+ TranscriptionRequest,
76
+ TranscriptionResponse,
77
+ TranslationRequest,
78
+ TranslationResponse,
79
+ UnloadLoRAAdapterRequest)
80
+ # yapf: enable
81
+ from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
82
+ from vllm.entrypoints.openai.serving_classification import (
83
+ ServingClassification)
84
+ from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
85
+ from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding
86
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
87
+ from vllm.entrypoints.openai.serving_models import (BaseModelPath,
88
+ LoRAModulePath,
89
+ OpenAIServingModels)
90
+ from vllm.entrypoints.openai.serving_pooling import OpenAIServingPooling
91
+ from vllm.entrypoints.openai.serving_responses import OpenAIServingResponses
92
+ from vllm.entrypoints.openai.serving_score import ServingScores
93
+ from vllm.entrypoints.openai.serving_tokenization import (
94
+ OpenAIServingTokenization)
95
+ from vllm.entrypoints.openai.serving_transcription import (
96
+ OpenAIServingTranscription, OpenAIServingTranslation)
97
+ from vllm.entrypoints.openai.tool_parsers import ToolParserManager
98
+ from vllm.entrypoints.tool_server import (DemoToolServer, MCPToolServer,
99
+ ToolServer)
100
+ from vllm.entrypoints.utils import (cli_env_setup, load_aware_call,
101
+ log_non_default_args, with_cancellation)
102
+ from vllm.logger import init_logger
103
+ from vllm.reasoning import ReasoningParserManager
104
+ from vllm.transformers_utils.tokenizer import MistralTokenizer
105
+ from vllm.usage.usage_lib import UsageContext
106
+ from vllm.utils import (Device, FlexibleArgumentParser, decorate_logs,
107
+ is_valid_ipv6_address, set_ulimit)
108
+ from vllm.v1.engine.exceptions import EngineDeadError
109
+ from vllm.v1.metrics.prometheus import get_prometheus_registry
110
+ from vllm.version import __version__ as VLLM_VERSION
111
+
112
+ prometheus_multiproc_dir: tempfile.TemporaryDirectory
113
+
114
+ # Cannot use __name__ (https://github.com/vllm-project/vllm/pull/4765)
115
+ logger = init_logger('vllm.entrypoints.openai.api_server')
116
+
117
+ _running_tasks: set[asyncio.Task] = set()
118
+
119
+
120
+ @asynccontextmanager
121
+ async def lifespan(app: FastAPI):
122
+ try:
123
+ if app.state.log_stats:
124
+ engine_client: EngineClient = app.state.engine_client
125
+
126
+ async def _force_log():
127
+ while True:
128
+ await asyncio.sleep(envs.VLLM_LOG_STATS_INTERVAL)
129
+ await engine_client.do_log_stats()
130
+
131
+ task = asyncio.create_task(_force_log())
132
+ _running_tasks.add(task)
133
+ task.add_done_callback(_running_tasks.remove)
134
+ else:
135
+ task = None
136
+
137
+ # Mark the startup heap as static so that it's ignored by GC.
138
+ # Reduces pause times of oldest generation collections.
139
+ gc.collect()
140
+ gc.freeze()
141
+ try:
142
+ yield
143
+ finally:
144
+ if task is not None:
145
+ task.cancel()
146
+ finally:
147
+ # Ensure app state including engine ref is gc'd
148
+ del app.state
149
+
150
+
151
+ @asynccontextmanager
152
+ async def build_async_engine_client(
153
+ args: Namespace,
154
+ *,
155
+ usage_context: UsageContext = UsageContext.OPENAI_API_SERVER,
156
+ disable_frontend_multiprocessing: Optional[bool] = None,
157
+ client_config: Optional[dict[str, Any]] = None,
158
+ ) -> AsyncIterator[EngineClient]:
159
+
160
+ if os.getenv("VLLM_WORKER_MULTIPROC_METHOD") == "forkserver":
161
+ # The executor is expected to be mp.
162
+ # Pre-import heavy modules in the forkserver process
163
+ logger.debug("Setup forkserver with pre-imports")
164
+ multiprocessing.set_start_method('forkserver')
165
+ multiprocessing.set_forkserver_preload(["vllm.v1.engine.async_llm"])
166
+ forkserver.ensure_running()
167
+ logger.debug("Forkserver setup complete!")
168
+
169
+ # Context manager to handle engine_client lifecycle
170
+ # Ensures everything is shutdown and cleaned up on error/exit
171
+ engine_args = AsyncEngineArgs.from_cli_args(args)
172
+ if client_config:
173
+ engine_args._api_process_count = client_config.get("client_count", 1)
174
+ engine_args._api_process_rank = client_config.get("client_index", 0)
175
+
176
+ if disable_frontend_multiprocessing is None:
177
+ disable_frontend_multiprocessing = bool(
178
+ args.disable_frontend_multiprocessing)
179
+
180
+ async with build_async_engine_client_from_engine_args(
181
+ engine_args,
182
+ usage_context=usage_context,
183
+ disable_frontend_multiprocessing=disable_frontend_multiprocessing,
184
+ client_config=client_config,
185
+ ) as engine:
186
+ yield engine
187
+
188
+
189
+ @asynccontextmanager
190
+ async def build_async_engine_client_from_engine_args(
191
+ engine_args: AsyncEngineArgs,
192
+ *,
193
+ usage_context: UsageContext = UsageContext.OPENAI_API_SERVER,
194
+ disable_frontend_multiprocessing: bool = False,
195
+ client_config: Optional[dict[str, Any]] = None,
196
+ ) -> AsyncIterator[EngineClient]:
197
+ """
198
+ Create EngineClient, either:
199
+ - in-process using the AsyncLLMEngine Directly
200
+ - multiprocess using AsyncLLMEngine RPC
201
+
202
+ Returns the Client or None if the creation failed.
203
+ """
204
+
205
+ # Create the EngineConfig (determines if we can use V1).
206
+ vllm_config = engine_args.create_engine_config(usage_context=usage_context)
207
+
208
+ # V1 AsyncLLM.
209
+ assert envs.VLLM_USE_V1
210
+
211
+ if disable_frontend_multiprocessing:
212
+ logger.warning(
213
+ "V1 is enabled, but got --disable-frontend-multiprocessing. "
214
+ "To disable frontend multiprocessing, set VLLM_USE_V1=0.")
215
+
216
+ from vllm.v1.engine.async_llm import AsyncLLM
217
+ async_llm: Optional[AsyncLLM] = None
218
+
219
+ # Don't mutate the input client_config
220
+ client_config = dict(client_config) if client_config else {}
221
+ client_count = client_config.pop("client_count", 1)
222
+ client_index = client_config.pop("client_index", 0)
223
+
224
+ try:
225
+ async_llm = AsyncLLM.from_vllm_config(
226
+ vllm_config=vllm_config,
227
+ usage_context=usage_context,
228
+ enable_log_requests=engine_args.enable_log_requests,
229
+ disable_log_stats=engine_args.disable_log_stats,
230
+ client_addresses=client_config,
231
+ client_count=client_count,
232
+ client_index=client_index)
233
+
234
+ # Don't keep the dummy data in memory
235
+ await async_llm.reset_mm_cache()
236
+
237
+ yield async_llm
238
+ finally:
239
+ if async_llm:
240
+ async_llm.shutdown()
241
+
242
+
243
+ async def validate_json_request(raw_request: Request):
244
+ content_type = raw_request.headers.get("content-type", "").lower()
245
+ media_type = content_type.split(";", maxsplit=1)[0]
246
+ if media_type != "application/json":
247
+ raise RequestValidationError(errors=[
248
+ "Unsupported Media Type: Only 'application/json' is allowed"
249
+ ])
250
+
251
+
252
+ router = APIRouter()
253
+
254
+
255
+ class PrometheusResponse(Response):
256
+ media_type = prometheus_client.CONTENT_TYPE_LATEST
257
+
258
+
259
+ def mount_metrics(app: FastAPI):
260
+ """Mount prometheus metrics to a FastAPI app."""
261
+
262
+ registry = get_prometheus_registry()
263
+
264
+ # `response_class=PrometheusResponse` is needed to return an HTTP response
265
+ # with header "Content-Type: text/plain; version=0.0.4; charset=utf-8"
266
+ # instead of the default "application/json" which is incorrect.
267
+ # See https://github.com/trallnag/prometheus-fastapi-instrumentator/issues/163#issue-1296092364
268
+ Instrumentator(
269
+ excluded_handlers=[
270
+ "/metrics",
271
+ "/health",
272
+ "/load",
273
+ "/ping",
274
+ "/version",
275
+ "/server_info",
276
+ ],
277
+ registry=registry,
278
+ ).add().instrument(app).expose(app, response_class=PrometheusResponse)
279
+
280
+ # Add prometheus asgi middleware to route /metrics requests
281
+ metrics_route = Mount("/metrics", make_asgi_app(registry=registry))
282
+
283
+ # Workaround for 307 Redirect for /metrics
284
+ metrics_route.path_regex = re.compile("^/metrics(?P<path>.*)$")
285
+ app.routes.append(metrics_route)
286
+
287
+
288
+ def base(request: Request) -> OpenAIServing:
289
+ # Reuse the existing instance
290
+ return tokenization(request)
291
+
292
+
293
+ def models(request: Request) -> OpenAIServingModels:
294
+ return request.app.state.openai_serving_models
295
+
296
+
297
+ def responses(request: Request) -> Optional[OpenAIServingResponses]:
298
+ return request.app.state.openai_serving_responses
299
+
300
+
301
+ def chat(request: Request) -> Optional[OpenAIServingChat]:
302
+ return request.app.state.openai_serving_chat
303
+
304
+
305
+ def completion(request: Request) -> Optional[OpenAIServingCompletion]:
306
+ return request.app.state.openai_serving_completion
307
+
308
+
309
+ def pooling(request: Request) -> Optional[OpenAIServingPooling]:
310
+ return request.app.state.openai_serving_pooling
311
+
312
+
313
+ def embedding(request: Request) -> Optional[OpenAIServingEmbedding]:
314
+ return request.app.state.openai_serving_embedding
315
+
316
+
317
+ def score(request: Request) -> Optional[ServingScores]:
318
+ return request.app.state.openai_serving_scores
319
+
320
+
321
+ def classify(request: Request) -> Optional[ServingClassification]:
322
+ return request.app.state.openai_serving_classification
323
+
324
+
325
+ def rerank(request: Request) -> Optional[ServingScores]:
326
+ return request.app.state.openai_serving_scores
327
+
328
+
329
+ def tokenization(request: Request) -> OpenAIServingTokenization:
330
+ return request.app.state.openai_serving_tokenization
331
+
332
+
333
+ def transcription(request: Request) -> OpenAIServingTranscription:
334
+ return request.app.state.openai_serving_transcription
335
+
336
+
337
+ def translation(request: Request) -> OpenAIServingTranslation:
338
+ return request.app.state.openai_serving_translation
339
+
340
+
341
+ def engine_client(request: Request) -> EngineClient:
342
+ return request.app.state.engine_client
343
+
344
+
345
+ @router.get("/health", response_class=Response)
346
+ async def health(raw_request: Request) -> Response:
347
+ """Health check."""
348
+ try:
349
+ await engine_client(raw_request).check_health()
350
+ return Response(status_code=200)
351
+ except EngineDeadError:
352
+ return Response(status_code=503)
353
+
354
+
355
+ @router.get("/load")
356
+ async def get_server_load_metrics(request: Request):
357
+ # This endpoint returns the current server load metrics.
358
+ # It tracks requests utilizing the GPU from the following routes:
359
+ # - /v1/chat/completions
360
+ # - /v1/completions
361
+ # - /v1/audio/transcriptions
362
+ # - /v1/audio/translations
363
+ # - /v1/embeddings
364
+ # - /pooling
365
+ # - /classify
366
+ # - /score
367
+ # - /v1/score
368
+ # - /rerank
369
+ # - /v1/rerank
370
+ # - /v2/rerank
371
+ return JSONResponse(
372
+ content={'server_load': request.app.state.server_load_metrics})
373
+
374
+
375
+ @router.get("/ping", response_class=Response)
376
+ @router.post("/ping", response_class=Response)
377
+ async def ping(raw_request: Request) -> Response:
378
+ """Ping check. Endpoint required for SageMaker"""
379
+ return await health(raw_request)
380
+
381
+
382
+ @router.post("/tokenize",
383
+ dependencies=[Depends(validate_json_request)],
384
+ responses={
385
+ HTTPStatus.BAD_REQUEST.value: {
386
+ "model": ErrorResponse
387
+ },
388
+ HTTPStatus.NOT_FOUND.value: {
389
+ "model": ErrorResponse
390
+ },
391
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
392
+ "model": ErrorResponse
393
+ },
394
+ HTTPStatus.NOT_IMPLEMENTED.value: {
395
+ "model": ErrorResponse
396
+ },
397
+ })
398
+ @with_cancellation
399
+ async def tokenize(request: TokenizeRequest, raw_request: Request):
400
+ handler = tokenization(raw_request)
401
+
402
+ try:
403
+ generator = await handler.create_tokenize(request, raw_request)
404
+ except NotImplementedError as e:
405
+ raise HTTPException(status_code=HTTPStatus.NOT_IMPLEMENTED.value,
406
+ detail=str(e)) from e
407
+ except Exception as e:
408
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
409
+ detail=str(e)) from e
410
+
411
+ if isinstance(generator, ErrorResponse):
412
+ return JSONResponse(content=generator.model_dump(),
413
+ status_code=generator.error.code)
414
+ elif isinstance(generator, TokenizeResponse):
415
+ return JSONResponse(content=generator.model_dump())
416
+
417
+ assert_never(generator)
418
+
419
+
420
+ @router.post("/detokenize",
421
+ dependencies=[Depends(validate_json_request)],
422
+ responses={
423
+ HTTPStatus.BAD_REQUEST.value: {
424
+ "model": ErrorResponse
425
+ },
426
+ HTTPStatus.NOT_FOUND.value: {
427
+ "model": ErrorResponse
428
+ },
429
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
430
+ "model": ErrorResponse
431
+ },
432
+ })
433
+ @with_cancellation
434
+ async def detokenize(request: DetokenizeRequest, raw_request: Request):
435
+ handler = tokenization(raw_request)
436
+
437
+ try:
438
+ generator = await handler.create_detokenize(request, raw_request)
439
+ except OverflowError as e:
440
+ raise RequestValidationError(errors=[str(e)]) from e
441
+ except Exception as e:
442
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
443
+ detail=str(e)) from e
444
+
445
+ if isinstance(generator, ErrorResponse):
446
+ return JSONResponse(content=generator.model_dump(),
447
+ status_code=generator.error.code)
448
+ elif isinstance(generator, DetokenizeResponse):
449
+ return JSONResponse(content=generator.model_dump())
450
+
451
+ assert_never(generator)
452
+
453
+
454
+ def maybe_register_tokenizer_info_endpoint(args):
455
+ """Conditionally register the tokenizer info endpoint if enabled."""
456
+ if getattr(args, 'enable_tokenizer_info_endpoint', False):
457
+
458
+ @router.get("/tokenizer_info")
459
+ async def get_tokenizer_info(raw_request: Request):
460
+ """Get comprehensive tokenizer information."""
461
+ result = await tokenization(raw_request).get_tokenizer_info()
462
+ return JSONResponse(content=result.model_dump(),
463
+ status_code=result.error.code if isinstance(
464
+ result, ErrorResponse) else 200)
465
+
466
+
467
+ @router.get("/v1/models")
468
+ async def show_available_models(raw_request: Request):
469
+ handler = models(raw_request)
470
+
471
+ models_ = await handler.show_available_models()
472
+ return JSONResponse(content=models_.model_dump())
473
+
474
+
475
+ @router.get("/version")
476
+ async def show_version():
477
+ ver = {"version": VLLM_VERSION}
478
+ return JSONResponse(content=ver)
479
+
480
+
481
+ async def _convert_stream_to_sse_events(
482
+ generator: AsyncGenerator[StreamingResponsesResponse, None]
483
+ ) -> AsyncGenerator[str, None]:
484
+ """Convert the generator to a stream of events in SSE format"""
485
+ async for event in generator:
486
+ event_type = getattr(event, 'type', 'unknown')
487
+ # https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format
488
+ event_data = (f"event: {event_type}\n"
489
+ f"data: {event.model_dump_json(indent=None)}\n\n")
490
+ yield event_data
491
+
492
+
493
+ @router.post("/v1/responses",
494
+ dependencies=[Depends(validate_json_request)],
495
+ responses={
496
+ HTTPStatus.OK.value: {
497
+ "content": {
498
+ "text/event-stream": {}
499
+ }
500
+ },
501
+ HTTPStatus.BAD_REQUEST.value: {
502
+ "model": ErrorResponse
503
+ },
504
+ HTTPStatus.NOT_FOUND.value: {
505
+ "model": ErrorResponse
506
+ },
507
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
508
+ "model": ErrorResponse
509
+ },
510
+ })
511
+ @with_cancellation
512
+ async def create_responses(request: ResponsesRequest, raw_request: Request):
513
+ handler = responses(raw_request)
514
+ if handler is None:
515
+ return base(raw_request).create_error_response(
516
+ message="The model does not support Responses API")
517
+ try:
518
+ generator = await handler.create_responses(request, raw_request)
519
+ except Exception as e:
520
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
521
+ detail=str(e)) from e
522
+
523
+ if isinstance(generator, ErrorResponse):
524
+ return JSONResponse(content=generator.model_dump(),
525
+ status_code=generator.error.code)
526
+ elif isinstance(generator, ResponsesResponse):
527
+ return JSONResponse(content=generator.model_dump())
528
+
529
+ return StreamingResponse(content=_convert_stream_to_sse_events(generator),
530
+ media_type="text/event-stream")
531
+
532
+
533
+ @router.get("/v1/responses/{response_id}")
534
+ async def retrieve_responses(
535
+ response_id: str,
536
+ raw_request: Request,
537
+ starting_after: Optional[int] = None,
538
+ stream: Optional[bool] = False,
539
+ ):
540
+ handler = responses(raw_request)
541
+ if handler is None:
542
+ return base(raw_request).create_error_response(
543
+ message="The model does not support Responses API")
544
+
545
+ try:
546
+ response = await handler.retrieve_responses(
547
+ response_id,
548
+ starting_after=starting_after,
549
+ stream=stream,
550
+ )
551
+ except Exception as e:
552
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
553
+ detail=str(e)) from e
554
+
555
+ if isinstance(response, ErrorResponse):
556
+ return JSONResponse(content=response.model_dump(),
557
+ status_code=response.error.code)
558
+ elif isinstance(response, ResponsesResponse):
559
+ return JSONResponse(content=response.model_dump())
560
+ return StreamingResponse(content=_convert_stream_to_sse_events(response),
561
+ media_type="text/event-stream")
562
+
563
+
564
+ @router.post("/v1/responses/{response_id}/cancel")
565
+ async def cancel_responses(response_id: str, raw_request: Request):
566
+ handler = responses(raw_request)
567
+ if handler is None:
568
+ return base(raw_request).create_error_response(
569
+ message="The model does not support Responses API")
570
+
571
+ try:
572
+ response = await handler.cancel_responses(response_id)
573
+ except Exception as e:
574
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
575
+ detail=str(e)) from e
576
+
577
+ if isinstance(response, ErrorResponse):
578
+ return JSONResponse(content=response.model_dump(),
579
+ status_code=response.error.code)
580
+ return JSONResponse(content=response.model_dump())
581
+
582
+
583
+ @router.post("/v1/chat/completions",
584
+ dependencies=[Depends(validate_json_request)],
585
+ responses={
586
+ HTTPStatus.OK.value: {
587
+ "content": {
588
+ "text/event-stream": {}
589
+ }
590
+ },
591
+ HTTPStatus.BAD_REQUEST.value: {
592
+ "model": ErrorResponse
593
+ },
594
+ HTTPStatus.NOT_FOUND.value: {
595
+ "model": ErrorResponse
596
+ },
597
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
598
+ "model": ErrorResponse
599
+ }
600
+ })
601
+ @with_cancellation
602
+ @load_aware_call
603
+ async def create_chat_completion(request: ChatCompletionRequest,
604
+ raw_request: Request):
605
+ handler = chat(raw_request)
606
+ if handler is None:
607
+ return base(raw_request).create_error_response(
608
+ message="The model does not support Chat Completions API")
609
+ try:
610
+ generator = await handler.create_chat_completion(request, raw_request)
611
+ except Exception as e:
612
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
613
+ detail=str(e)) from e
614
+ if isinstance(generator, ErrorResponse):
615
+ return JSONResponse(content=generator.model_dump(),
616
+ status_code=generator.error.code)
617
+
618
+ elif isinstance(generator, ChatCompletionResponse):
619
+ return JSONResponse(content=generator.model_dump())
620
+
621
+ return StreamingResponse(content=generator, media_type="text/event-stream")
622
+
623
+
624
+ @router.post("/v1/completions",
625
+ dependencies=[Depends(validate_json_request)],
626
+ responses={
627
+ HTTPStatus.OK.value: {
628
+ "content": {
629
+ "text/event-stream": {}
630
+ }
631
+ },
632
+ HTTPStatus.BAD_REQUEST.value: {
633
+ "model": ErrorResponse
634
+ },
635
+ HTTPStatus.NOT_FOUND.value: {
636
+ "model": ErrorResponse
637
+ },
638
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
639
+ "model": ErrorResponse
640
+ },
641
+ })
642
+ @with_cancellation
643
+ @load_aware_call
644
+ async def create_completion(request: CompletionRequest, raw_request: Request):
645
+ handler = completion(raw_request)
646
+ if handler is None:
647
+ return base(raw_request).create_error_response(
648
+ message="The model does not support Completions API")
649
+
650
+ try:
651
+ generator = await handler.create_completion(request, raw_request)
652
+ except OverflowError as e:
653
+ raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
654
+ detail=str(e)) from e
655
+ except Exception as e:
656
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
657
+ detail=str(e)) from e
658
+
659
+ if isinstance(generator, ErrorResponse):
660
+ return JSONResponse(content=generator.model_dump(),
661
+ status_code=generator.error.code)
662
+ elif isinstance(generator, CompletionResponse):
663
+ return JSONResponse(content=generator.model_dump())
664
+
665
+ return StreamingResponse(content=generator, media_type="text/event-stream")
666
+
667
+
668
+ @router.post("/v1/embeddings",
669
+ dependencies=[Depends(validate_json_request)],
670
+ responses={
671
+ HTTPStatus.BAD_REQUEST.value: {
672
+ "model": ErrorResponse
673
+ },
674
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
675
+ "model": ErrorResponse
676
+ },
677
+ })
678
+ @with_cancellation
679
+ @load_aware_call
680
+ async def create_embedding(request: EmbeddingRequest, raw_request: Request):
681
+ handler = embedding(raw_request)
682
+ if handler is None:
683
+ return base(raw_request).create_error_response(
684
+ message="The model does not support Embeddings API")
685
+
686
+ try:
687
+ generator = await handler.create_embedding(request, raw_request)
688
+ except Exception as e:
689
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
690
+ detail=str(e)) from e
691
+
692
+ if isinstance(generator, ErrorResponse):
693
+ return JSONResponse(content=generator.model_dump(),
694
+ status_code=generator.error.code)
695
+ elif isinstance(generator, EmbeddingResponse):
696
+ return JSONResponse(content=generator.model_dump())
697
+
698
+ assert_never(generator)
699
+
700
+
701
+ @router.post("/pooling",
702
+ dependencies=[Depends(validate_json_request)],
703
+ responses={
704
+ HTTPStatus.BAD_REQUEST.value: {
705
+ "model": ErrorResponse
706
+ },
707
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
708
+ "model": ErrorResponse
709
+ },
710
+ })
711
+ @with_cancellation
712
+ @load_aware_call
713
+ async def create_pooling(request: PoolingRequest, raw_request: Request):
714
+ handler = pooling(raw_request)
715
+ if handler is None:
716
+ return base(raw_request).create_error_response(
717
+ message="The model does not support Pooling API")
718
+ try:
719
+ generator = await handler.create_pooling(request, raw_request)
720
+ except Exception as e:
721
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
722
+ detail=str(e)) from e
723
+ if isinstance(generator, ErrorResponse):
724
+ return JSONResponse(content=generator.model_dump(),
725
+ status_code=generator.error.code)
726
+ elif isinstance(generator, (PoolingResponse, IOProcessorResponse)):
727
+ return JSONResponse(content=generator.model_dump())
728
+
729
+ assert_never(generator)
730
+
731
+
732
+ @router.post("/classify", dependencies=[Depends(validate_json_request)])
733
+ @with_cancellation
734
+ @load_aware_call
735
+ async def create_classify(request: ClassificationRequest,
736
+ raw_request: Request):
737
+ handler = classify(raw_request)
738
+ if handler is None:
739
+ return base(raw_request).create_error_response(
740
+ message="The model does not support Classification API")
741
+
742
+ try:
743
+ generator = await handler.create_classify(request, raw_request)
744
+ except Exception as e:
745
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
746
+ detail=str(e)) from e
747
+ if isinstance(generator, ErrorResponse):
748
+ return JSONResponse(content=generator.model_dump(),
749
+ status_code=generator.error.code)
750
+
751
+ elif isinstance(generator, ClassificationResponse):
752
+ return JSONResponse(content=generator.model_dump())
753
+
754
+ assert_never(generator)
755
+
756
+
757
+ @router.post("/score",
758
+ dependencies=[Depends(validate_json_request)],
759
+ responses={
760
+ HTTPStatus.BAD_REQUEST.value: {
761
+ "model": ErrorResponse
762
+ },
763
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
764
+ "model": ErrorResponse
765
+ },
766
+ })
767
+ @with_cancellation
768
+ @load_aware_call
769
+ async def create_score(request: ScoreRequest, raw_request: Request):
770
+ handler = score(raw_request)
771
+ if handler is None:
772
+ return base(raw_request).create_error_response(
773
+ message="The model does not support Score API")
774
+
775
+ try:
776
+ generator = await handler.create_score(request, raw_request)
777
+ except Exception as e:
778
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
779
+ detail=str(e)) from e
780
+ if isinstance(generator, ErrorResponse):
781
+ return JSONResponse(content=generator.model_dump(),
782
+ status_code=generator.error.code)
783
+ elif isinstance(generator, ScoreResponse):
784
+ return JSONResponse(content=generator.model_dump())
785
+
786
+ assert_never(generator)
787
+
788
+
789
+ @router.post("/v1/score",
790
+ dependencies=[Depends(validate_json_request)],
791
+ responses={
792
+ HTTPStatus.BAD_REQUEST.value: {
793
+ "model": ErrorResponse
794
+ },
795
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
796
+ "model": ErrorResponse
797
+ },
798
+ })
799
+ @with_cancellation
800
+ @load_aware_call
801
+ async def create_score_v1(request: ScoreRequest, raw_request: Request):
802
+ logger.warning(
803
+ "To indicate that Score API is not part of standard OpenAI API, we "
804
+ "have moved it to `/score`. Please update your client accordingly.")
805
+
806
+ return await create_score(request, raw_request)
807
+
808
+
809
+ @router.post("/v1/audio/transcriptions",
810
+ responses={
811
+ HTTPStatus.OK.value: {
812
+ "content": {
813
+ "text/event-stream": {}
814
+ }
815
+ },
816
+ HTTPStatus.BAD_REQUEST.value: {
817
+ "model": ErrorResponse
818
+ },
819
+ HTTPStatus.UNPROCESSABLE_ENTITY.value: {
820
+ "model": ErrorResponse
821
+ },
822
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
823
+ "model": ErrorResponse
824
+ },
825
+ })
826
+ @with_cancellation
827
+ @load_aware_call
828
+ async def create_transcriptions(raw_request: Request,
829
+ request: Annotated[TranscriptionRequest,
830
+ Form()]):
831
+ handler = transcription(raw_request)
832
+ if handler is None:
833
+ return base(raw_request).create_error_response(
834
+ message="The model does not support Transcriptions API")
835
+
836
+ audio_data = await request.file.read()
837
+ try:
838
+ generator = await handler.create_transcription(audio_data, request,
839
+ raw_request)
840
+ except Exception as e:
841
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
842
+ detail=str(e)) from e
843
+
844
+ if isinstance(generator, ErrorResponse):
845
+ return JSONResponse(content=generator.model_dump(),
846
+ status_code=generator.error.code)
847
+
848
+ elif isinstance(generator, TranscriptionResponse):
849
+ return JSONResponse(content=generator.model_dump())
850
+
851
+ return StreamingResponse(content=generator, media_type="text/event-stream")
852
+
853
+
854
+ @router.post("/v1/audio/translations",
855
+ responses={
856
+ HTTPStatus.OK.value: {
857
+ "content": {
858
+ "text/event-stream": {}
859
+ }
860
+ },
861
+ HTTPStatus.BAD_REQUEST.value: {
862
+ "model": ErrorResponse
863
+ },
864
+ HTTPStatus.UNPROCESSABLE_ENTITY.value: {
865
+ "model": ErrorResponse
866
+ },
867
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
868
+ "model": ErrorResponse
869
+ },
870
+ })
871
+ @with_cancellation
872
+ @load_aware_call
873
+ async def create_translations(request: Annotated[TranslationRequest,
874
+ Form()],
875
+ raw_request: Request):
876
+ handler = translation(raw_request)
877
+ if handler is None:
878
+ return base(raw_request).create_error_response(
879
+ message="The model does not support Translations API")
880
+
881
+ audio_data = await request.file.read()
882
+ try:
883
+ generator = await handler.create_translation(audio_data, request,
884
+ raw_request)
885
+ except Exception as e:
886
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
887
+ detail=str(e)) from e
888
+
889
+ if isinstance(generator, ErrorResponse):
890
+ return JSONResponse(content=generator.model_dump(),
891
+ status_code=generator.error.code)
892
+
893
+ elif isinstance(generator, TranslationResponse):
894
+ return JSONResponse(content=generator.model_dump())
895
+
896
+ return StreamingResponse(content=generator, media_type="text/event-stream")
897
+
898
+
899
+ @router.post("/rerank",
900
+ dependencies=[Depends(validate_json_request)],
901
+ responses={
902
+ HTTPStatus.BAD_REQUEST.value: {
903
+ "model": ErrorResponse
904
+ },
905
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
906
+ "model": ErrorResponse
907
+ },
908
+ })
909
+ @with_cancellation
910
+ @load_aware_call
911
+ async def do_rerank(request: RerankRequest, raw_request: Request):
912
+ handler = rerank(raw_request)
913
+ if handler is None:
914
+ return base(raw_request).create_error_response(
915
+ message="The model does not support Rerank (Score) API")
916
+ try:
917
+ generator = await handler.do_rerank(request, raw_request)
918
+ except Exception as e:
919
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
920
+ detail=str(e)) from e
921
+ if isinstance(generator, ErrorResponse):
922
+ return JSONResponse(content=generator.model_dump(),
923
+ status_code=generator.error.code)
924
+ elif isinstance(generator, RerankResponse):
925
+ return JSONResponse(content=generator.model_dump())
926
+
927
+ assert_never(generator)
928
+
929
+
930
+ @router.post("/v1/rerank",
931
+ dependencies=[Depends(validate_json_request)],
932
+ responses={
933
+ HTTPStatus.BAD_REQUEST.value: {
934
+ "model": ErrorResponse
935
+ },
936
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
937
+ "model": ErrorResponse
938
+ },
939
+ })
940
+ @with_cancellation
941
+ async def do_rerank_v1(request: RerankRequest, raw_request: Request):
942
+ logger.warning_once(
943
+ "To indicate that the rerank API is not part of the standard OpenAI"
944
+ " API, we have located it at `/rerank`. Please update your client "
945
+ "accordingly. (Note: Conforms to JinaAI rerank API)")
946
+
947
+ return await do_rerank(request, raw_request)
948
+
949
+
950
+ @router.post("/v2/rerank",
951
+ dependencies=[Depends(validate_json_request)],
952
+ responses={
953
+ HTTPStatus.BAD_REQUEST.value: {
954
+ "model": ErrorResponse
955
+ },
956
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
957
+ "model": ErrorResponse
958
+ },
959
+ })
960
+ @with_cancellation
961
+ async def do_rerank_v2(request: RerankRequest, raw_request: Request):
962
+ return await do_rerank(request, raw_request)
963
+
964
+
965
+ if envs.VLLM_SERVER_DEV_MODE:
966
+ logger.warning("SECURITY WARNING: Development endpoints are enabled! "
967
+ "This should NOT be used in production!")
968
+
969
+ PydanticVllmConfig = pydantic.TypeAdapter(VllmConfig)
970
+
971
+ @router.get("/server_info")
972
+ async def show_server_info(
973
+ raw_request: Request,
974
+ config_format: Annotated[Literal["text", "json"],
975
+ Query()] = "text",
976
+ ):
977
+ vllm_config: VllmConfig = raw_request.app.state.vllm_config
978
+ server_info = {
979
+ "vllm_config":
980
+ str(vllm_config)
981
+ if config_format == "text" else PydanticVllmConfig.dump_python(
982
+ vllm_config, mode="json", fallback=str)
983
+ # fallback=str is needed to handle e.g. torch.dtype
984
+ }
985
+ return JSONResponse(content=server_info)
986
+
987
+ @router.post("/reset_prefix_cache")
988
+ async def reset_prefix_cache(raw_request: Request):
989
+ """
990
+ Reset the prefix cache. Note that we currently do not check if the
991
+ prefix cache is successfully reset in the API server.
992
+ """
993
+ device = None
994
+ device_str = raw_request.query_params.get("device")
995
+ if device_str is not None:
996
+ device = Device[device_str.upper()]
997
+ logger.info("Resetting prefix cache with specific %s...", str(device))
998
+ await engine_client(raw_request).reset_prefix_cache(device)
999
+ return Response(status_code=200)
1000
+
1001
+ @router.post("/sleep")
1002
+ async def sleep(raw_request: Request):
1003
+ # get POST params
1004
+ level = raw_request.query_params.get("level", "1")
1005
+ await engine_client(raw_request).sleep(int(level))
1006
+ # FIXME: in v0 with frontend multiprocessing, the sleep command
1007
+ # is sent but does not finish yet when we return a response.
1008
+ return Response(status_code=200)
1009
+
1010
+ @router.post("/wake_up")
1011
+ async def wake_up(raw_request: Request):
1012
+ tags = raw_request.query_params.getlist("tags")
1013
+ if tags == []:
1014
+ # set to None to wake up all tags if no tags are provided
1015
+ tags = None
1016
+ logger.info("wake up the engine with tags: %s", tags)
1017
+ await engine_client(raw_request).wake_up(tags)
1018
+ # FIXME: in v0 with frontend multiprocessing, the wake-up command
1019
+ # is sent but does not finish yet when we return a response.
1020
+ return Response(status_code=200)
1021
+
1022
+ @router.get("/is_sleeping")
1023
+ async def is_sleeping(raw_request: Request):
1024
+ logger.info("check whether the engine is sleeping")
1025
+ is_sleeping = await engine_client(raw_request).is_sleeping()
1026
+ return JSONResponse(content={"is_sleeping": is_sleeping})
1027
+
1028
+ @router.post("/collective_rpc")
1029
+ async def collective_rpc(raw_request: Request):
1030
+ try:
1031
+ body = await raw_request.json()
1032
+ except json.JSONDecodeError as e:
1033
+ raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
1034
+ detail=f"JSON decode error: {e}") from e
1035
+ method = body.get("method")
1036
+ if method is None:
1037
+ raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
1038
+ detail="Missing 'method' in request body")
1039
+ # For security reason, only serialized string args/kwargs are passed.
1040
+ # User-defined `method` is responsible for deserialization if needed.
1041
+ args: list[str] = body.get("args", [])
1042
+ kwargs: dict[str, str] = body.get("kwargs", {})
1043
+ timeout: Optional[float] = body.get("timeout")
1044
+ results = await engine_client(raw_request).collective_rpc(
1045
+ method=method, timeout=timeout, args=tuple(args), kwargs=kwargs)
1046
+ if results is None:
1047
+ return Response(status_code=200)
1048
+ response: list[Any] = []
1049
+ for result in results:
1050
+ if result is None or isinstance(result, (dict, list)):
1051
+ response.append(result)
1052
+ else:
1053
+ response.append(str(result))
1054
+ return JSONResponse(content={"results": response})
1055
+
1056
+
1057
+ @router.post("/scale_elastic_ep",
1058
+ dependencies=[Depends(validate_json_request)],
1059
+ responses={
1060
+ HTTPStatus.OK.value: {
1061
+ "model": dict
1062
+ },
1063
+ HTTPStatus.BAD_REQUEST.value: {
1064
+ "model": ErrorResponse
1065
+ },
1066
+ HTTPStatus.REQUEST_TIMEOUT.value: {
1067
+ "model": ErrorResponse
1068
+ },
1069
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
1070
+ "model": ErrorResponse
1071
+ },
1072
+ })
1073
+ async def scale_elastic_ep(raw_request: Request):
1074
+ try:
1075
+ body = await raw_request.json()
1076
+ except json.JSONDecodeError as e:
1077
+ raise HTTPException(status_code=400,
1078
+ detail="Invalid JSON format") from e # noqa: B904
1079
+
1080
+ new_data_parallel_size = body.get("new_data_parallel_size")
1081
+ drain_timeout = body.get("drain_timeout", 120) # Default 2 minutes
1082
+
1083
+ if new_data_parallel_size is None:
1084
+ raise HTTPException(status_code=400,
1085
+ detail="new_data_parallel_size is required")
1086
+
1087
+ if not isinstance(new_data_parallel_size,
1088
+ int) or new_data_parallel_size <= 0:
1089
+ raise HTTPException(
1090
+ status_code=400,
1091
+ detail="new_data_parallel_size must be a positive integer")
1092
+
1093
+ if not isinstance(drain_timeout, int) or drain_timeout <= 0:
1094
+ raise HTTPException(status_code=400,
1095
+ detail="drain_timeout must be a positive integer")
1096
+
1097
+ # Set scaling flag to prevent new requests
1098
+ global _scaling_elastic_ep
1099
+ _scaling_elastic_ep = True
1100
+ client = engine_client(raw_request)
1101
+ try:
1102
+ await client.scale_elastic_ep(new_data_parallel_size, drain_timeout)
1103
+ return JSONResponse({
1104
+ "message":
1105
+ f"Scaled to {new_data_parallel_size} "
1106
+ "data parallel engines",
1107
+ })
1108
+ except TimeoutError as e:
1109
+ raise HTTPException(status_code=408,
1110
+ detail="Scale failed due to request drain timeout "
1111
+ f"after {drain_timeout} seconds") from e
1112
+ except Exception as e:
1113
+ logger.error("Scale failed: %s", e)
1114
+ raise HTTPException(status_code=500, detail="Scale failed") from e
1115
+ finally:
1116
+ _scaling_elastic_ep = False
1117
+
1118
+
1119
+ @router.post("/is_scaling_elastic_ep")
1120
+ async def is_scaling_elastic_ep(raw_request: Request):
1121
+ return JSONResponse({"is_scaling_elastic_ep": _scaling_elastic_ep})
1122
+
1123
+
1124
+ # TODO: RequestType = TypeForm[BaseModel] when recognized by type checkers
1125
+ # (requires typing_extensions >= 4.13)
1126
+ RequestType = Any
1127
+ GetHandlerFn = Callable[[Request], Optional[OpenAIServing]]
1128
+ EndpointFn = Callable[[RequestType, Request], Awaitable[Any]]
1129
+
1130
+ # NOTE: Items defined earlier take higher priority
1131
+ INVOCATION_TYPES: list[tuple[RequestType, tuple[GetHandlerFn, EndpointFn]]] = [
1132
+ (ChatCompletionRequest, (chat, create_chat_completion)),
1133
+ (CompletionRequest, (completion, create_completion)),
1134
+ (EmbeddingRequest, (embedding, create_embedding)),
1135
+ (ClassificationRequest, (classify, create_classify)),
1136
+ (ScoreRequest, (score, create_score)),
1137
+ (RerankRequest, (rerank, do_rerank)),
1138
+ (PoolingRequest, (pooling, create_pooling)),
1139
+ ]
1140
+
1141
+ # NOTE: Construct the TypeAdapters only once
1142
+ INVOCATION_VALIDATORS = [
1143
+ (pydantic.TypeAdapter(request_type), (get_handler, endpoint))
1144
+ for request_type, (get_handler, endpoint) in INVOCATION_TYPES
1145
+ ]
1146
+
1147
+
1148
+ @router.post("/invocations",
1149
+ dependencies=[Depends(validate_json_request)],
1150
+ responses={
1151
+ HTTPStatus.BAD_REQUEST.value: {
1152
+ "model": ErrorResponse
1153
+ },
1154
+ HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value: {
1155
+ "model": ErrorResponse
1156
+ },
1157
+ HTTPStatus.INTERNAL_SERVER_ERROR.value: {
1158
+ "model": ErrorResponse
1159
+ },
1160
+ })
1161
+ async def invocations(raw_request: Request):
1162
+ """For SageMaker, routes requests based on the request type."""
1163
+ try:
1164
+ body = await raw_request.json()
1165
+ except json.JSONDecodeError as e:
1166
+ raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
1167
+ detail=f"JSON decode error: {e}") from e
1168
+
1169
+ valid_endpoints = [(validator, endpoint)
1170
+ for validator, (get_handler,
1171
+ endpoint) in INVOCATION_VALIDATORS
1172
+ if get_handler(raw_request) is not None]
1173
+
1174
+ for request_validator, endpoint in valid_endpoints:
1175
+ try:
1176
+ request = request_validator.validate_python(body)
1177
+ except pydantic.ValidationError:
1178
+ continue
1179
+
1180
+ return await endpoint(request, raw_request)
1181
+
1182
+ type_names = [
1183
+ t.__name__ if isinstance(t := validator._type, type) else str(t)
1184
+ for validator, _ in valid_endpoints
1185
+ ]
1186
+ msg = ("Cannot find suitable handler for request. "
1187
+ f"Expected one of: {type_names}")
1188
+ res = base(raw_request).create_error_response(message=msg)
1189
+ return JSONResponse(content=res.model_dump(), status_code=res.error.code)
1190
+
1191
+
1192
+ if envs.VLLM_TORCH_PROFILER_DIR:
1193
+ logger.warning(
1194
+ "Torch Profiler is enabled in the API server. This should ONLY be "
1195
+ "used for local development!")
1196
+
1197
+ @router.post("/start_profile")
1198
+ async def start_profile(raw_request: Request):
1199
+ logger.info("Starting profiler...")
1200
+ await engine_client(raw_request).start_profile()
1201
+ logger.info("Profiler started.")
1202
+ return Response(status_code=200)
1203
+
1204
+ @router.post("/stop_profile")
1205
+ async def stop_profile(raw_request: Request):
1206
+ logger.info("Stopping profiler...")
1207
+ await engine_client(raw_request).stop_profile()
1208
+ logger.info("Profiler stopped.")
1209
+ return Response(status_code=200)
1210
+
1211
+
1212
+ if envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING:
1213
+ logger.warning(
1214
+ "LoRA dynamic loading & unloading is enabled in the API server. "
1215
+ "This should ONLY be used for local development!")
1216
+
1217
+ @router.post("/v1/load_lora_adapter",
1218
+ dependencies=[Depends(validate_json_request)])
1219
+ async def load_lora_adapter(request: LoadLoRAAdapterRequest,
1220
+ raw_request: Request):
1221
+ handler = models(raw_request)
1222
+ response = await handler.load_lora_adapter(request)
1223
+ if isinstance(response, ErrorResponse):
1224
+ return JSONResponse(content=response.model_dump(),
1225
+ status_code=response.error.code)
1226
+
1227
+ return Response(status_code=200, content=response)
1228
+
1229
+ @router.post("/v1/unload_lora_adapter",
1230
+ dependencies=[Depends(validate_json_request)])
1231
+ async def unload_lora_adapter(request: UnloadLoRAAdapterRequest,
1232
+ raw_request: Request):
1233
+ handler = models(raw_request)
1234
+ response = await handler.unload_lora_adapter(request)
1235
+ if isinstance(response, ErrorResponse):
1236
+ return JSONResponse(content=response.model_dump(),
1237
+ status_code=response.error.code)
1238
+
1239
+ return Response(status_code=200, content=response)
1240
+
1241
+
1242
+ def load_log_config(log_config_file: Optional[str]) -> Optional[dict]:
1243
+ if not log_config_file:
1244
+ return None
1245
+ try:
1246
+ with open(log_config_file) as f:
1247
+ return json.load(f)
1248
+ except Exception as e:
1249
+ logger.warning("Failed to load log config from file %s: error %s",
1250
+ log_config_file, e)
1251
+ return None
1252
+
1253
+
1254
+ class AuthenticationMiddleware:
1255
+ """
1256
+ Pure ASGI middleware that authenticates each request by checking
1257
+ if the Authorization Bearer token exists and equals anyof "{api_key}".
1258
+
1259
+ Notes
1260
+ -----
1261
+ There are two cases in which authentication is skipped:
1262
+ 1. The HTTP method is OPTIONS.
1263
+ 2. The request path doesn't start with /v1 (e.g. /health).
1264
+ """
1265
+
1266
+ def __init__(self, app: ASGIApp, tokens: list[str]) -> None:
1267
+ self.app = app
1268
+ self.api_tokens = [
1269
+ hashlib.sha256(t.encode("utf-8")).digest() for t in tokens
1270
+ ]
1271
+
1272
+ def verify_token(self, headers: Headers) -> bool:
1273
+ authorization_header_value = headers.get("Authorization")
1274
+ if not authorization_header_value:
1275
+ return False
1276
+
1277
+ scheme, _, param = authorization_header_value.partition(" ")
1278
+ if scheme.lower() != "bearer":
1279
+ return False
1280
+
1281
+ param_hash = hashlib.sha256(param.encode("utf-8")).digest()
1282
+
1283
+ token_match = False
1284
+ for token_hash in self.api_tokens:
1285
+ token_match |= secrets.compare_digest(param_hash, token_hash)
1286
+
1287
+ return token_match
1288
+
1289
+ def __call__(self, scope: Scope, receive: Receive,
1290
+ send: Send) -> Awaitable[None]:
1291
+ if scope["type"] not in ("http",
1292
+ "websocket") or scope["method"] == "OPTIONS":
1293
+ # scope["type"] can be "lifespan" or "startup" for example,
1294
+ # in which case we don't need to do anything
1295
+ return self.app(scope, receive, send)
1296
+ root_path = scope.get("root_path", "")
1297
+ url_path = URL(scope=scope).path.removeprefix(root_path)
1298
+ headers = Headers(scope=scope)
1299
+ # Type narrow to satisfy mypy.
1300
+ if url_path.startswith("/v1") and not self.verify_token(headers):
1301
+ response = JSONResponse(content={"error": "Unauthorized"},
1302
+ status_code=401)
1303
+ return response(scope, receive, send)
1304
+ return self.app(scope, receive, send)
1305
+
1306
+
1307
+ class XRequestIdMiddleware:
1308
+ """
1309
+ Middleware the set's the X-Request-Id header for each response
1310
+ to a random uuid4 (hex) value if the header isn't already
1311
+ present in the request, otherwise use the provided request id.
1312
+ """
1313
+
1314
+ def __init__(self, app: ASGIApp) -> None:
1315
+ self.app = app
1316
+
1317
+ def __call__(self, scope: Scope, receive: Receive,
1318
+ send: Send) -> Awaitable[None]:
1319
+ if scope["type"] not in ("http", "websocket"):
1320
+ return self.app(scope, receive, send)
1321
+
1322
+ # Extract the request headers.
1323
+ request_headers = Headers(scope=scope)
1324
+
1325
+ async def send_with_request_id(message: Message) -> None:
1326
+ """
1327
+ Custom send function to mutate the response headers
1328
+ and append X-Request-Id to it.
1329
+ """
1330
+ if message["type"] == "http.response.start":
1331
+ response_headers = MutableHeaders(raw=message["headers"])
1332
+ request_id = request_headers.get("X-Request-Id",
1333
+ uuid.uuid4().hex)
1334
+ response_headers.append("X-Request-Id", request_id)
1335
+ await send(message)
1336
+
1337
+ return self.app(scope, receive, send_with_request_id)
1338
+
1339
+
1340
+ # Global variable to track scaling state
1341
+ _scaling_elastic_ep = False
1342
+
1343
+
1344
+ class ScalingMiddleware:
1345
+ """
1346
+ Middleware that checks if the model is currently scaling and
1347
+ returns a 503 Service Unavailable response if it is.
1348
+
1349
+ This middleware applies to all HTTP requests and prevents
1350
+ processing when the model is in a scaling state.
1351
+ """
1352
+
1353
+ def __init__(self, app: ASGIApp) -> None:
1354
+ self.app = app
1355
+
1356
+ def __call__(self, scope: Scope, receive: Receive,
1357
+ send: Send) -> Awaitable[None]:
1358
+ if scope["type"] != "http":
1359
+ return self.app(scope, receive, send)
1360
+
1361
+ # Check global scaling state
1362
+ global _scaling_elastic_ep
1363
+ if _scaling_elastic_ep:
1364
+ # Return 503 Service Unavailable response
1365
+ response = JSONResponse(content={
1366
+ "error":
1367
+ "The model is currently scaling. Please try again later."
1368
+ },
1369
+ status_code=503)
1370
+ return response(scope, receive, send)
1371
+
1372
+ return self.app(scope, receive, send)
1373
+
1374
+
1375
+ def _extract_content_from_chunk(chunk_data: dict) -> str:
1376
+ """Extract content from a streaming response chunk."""
1377
+ try:
1378
+ from vllm.entrypoints.openai.protocol import (
1379
+ ChatCompletionStreamResponse, CompletionStreamResponse)
1380
+
1381
+ # Try using Completion types for type-safe parsing
1382
+ if chunk_data.get('object') == 'chat.completion.chunk':
1383
+ chat_response = ChatCompletionStreamResponse.model_validate(
1384
+ chunk_data)
1385
+ if chat_response.choices and chat_response.choices[0].delta.content:
1386
+ return chat_response.choices[0].delta.content
1387
+ elif chunk_data.get('object') == 'text_completion':
1388
+ completion_response = CompletionStreamResponse.model_validate(
1389
+ chunk_data)
1390
+ if completion_response.choices and completion_response.choices[
1391
+ 0].text:
1392
+ return completion_response.choices[0].text
1393
+ except pydantic.ValidationError:
1394
+ # Fallback to manual parsing
1395
+ if 'choices' in chunk_data and chunk_data['choices']:
1396
+ choice = chunk_data['choices'][0]
1397
+ if 'delta' in choice and choice['delta'].get('content'):
1398
+ return choice['delta']['content']
1399
+ elif choice.get('text'):
1400
+ return choice['text']
1401
+ return ""
1402
+
1403
+
1404
+ class SSEDecoder:
1405
+ """Robust Server-Sent Events decoder for streaming responses."""
1406
+
1407
+ def __init__(self):
1408
+ self.buffer = ""
1409
+ self.content_buffer = []
1410
+
1411
+ def decode_chunk(self, chunk: bytes) -> list[dict]:
1412
+ """Decode a chunk of SSE data and return parsed events."""
1413
+ import json
1414
+
1415
+ try:
1416
+ chunk_str = chunk.decode('utf-8')
1417
+ except UnicodeDecodeError:
1418
+ # Skip malformed chunks
1419
+ return []
1420
+
1421
+ self.buffer += chunk_str
1422
+ events = []
1423
+
1424
+ # Process complete lines
1425
+ while '\n' in self.buffer:
1426
+ line, self.buffer = self.buffer.split('\n', 1)
1427
+ line = line.rstrip('\r') # Handle CRLF
1428
+
1429
+ if line.startswith('data: '):
1430
+ data_str = line[6:].strip()
1431
+ if data_str == '[DONE]':
1432
+ events.append({'type': 'done'})
1433
+ elif data_str:
1434
+ try:
1435
+ event_data = json.loads(data_str)
1436
+ events.append({'type': 'data', 'data': event_data})
1437
+ except json.JSONDecodeError:
1438
+ # Skip malformed JSON
1439
+ continue
1440
+
1441
+ return events
1442
+
1443
+ def extract_content(self, event_data: dict) -> str:
1444
+ """Extract content from event data."""
1445
+ return _extract_content_from_chunk(event_data)
1446
+
1447
+ def add_content(self, content: str) -> None:
1448
+ """Add content to the buffer."""
1449
+ if content:
1450
+ self.content_buffer.append(content)
1451
+
1452
+ def get_complete_content(self) -> str:
1453
+ """Get the complete buffered content."""
1454
+ return ''.join(self.content_buffer)
1455
+
1456
+
1457
+ def _log_streaming_response(response, response_body: list) -> None:
1458
+ """Log streaming response with robust SSE parsing."""
1459
+ from starlette.concurrency import iterate_in_threadpool
1460
+
1461
+ sse_decoder = SSEDecoder()
1462
+ chunk_count = 0
1463
+
1464
+ def buffered_iterator():
1465
+ nonlocal chunk_count
1466
+
1467
+ for chunk in response_body:
1468
+ chunk_count += 1
1469
+ yield chunk
1470
+
1471
+ # Parse SSE events from chunk
1472
+ events = sse_decoder.decode_chunk(chunk)
1473
+
1474
+ for event in events:
1475
+ if event['type'] == 'data':
1476
+ content = sse_decoder.extract_content(event['data'])
1477
+ sse_decoder.add_content(content)
1478
+ elif event['type'] == 'done':
1479
+ # Log complete content when done
1480
+ full_content = sse_decoder.get_complete_content()
1481
+ if full_content:
1482
+ # Truncate if too long
1483
+ if len(full_content) > 2048:
1484
+ full_content = full_content[:2048] + ""
1485
+ "...[truncated]"
1486
+ logger.info(
1487
+ "response_body={streaming_complete: " \
1488
+ "content='%s', chunks=%d}",
1489
+ full_content, chunk_count)
1490
+ else:
1491
+ logger.info(
1492
+ "response_body={streaming_complete: " \
1493
+ "no_content, chunks=%d}",
1494
+ chunk_count)
1495
+ return
1496
+
1497
+ response.body_iterator = iterate_in_threadpool(buffered_iterator())
1498
+ logger.info("response_body={streaming_started: chunks=%d}",
1499
+ len(response_body))
1500
+
1501
+
1502
+ def _log_non_streaming_response(response_body: list) -> None:
1503
+ """Log non-streaming response."""
1504
+ try:
1505
+ decoded_body = response_body[0].decode()
1506
+ logger.info("response_body={%s}", decoded_body)
1507
+ except UnicodeDecodeError:
1508
+ logger.info("response_body={<binary_data>}")
1509
+
1510
+
1511
+ def build_app(args: Namespace) -> FastAPI:
1512
+ if args.disable_fastapi_docs:
1513
+ app = FastAPI(openapi_url=None,
1514
+ docs_url=None,
1515
+ redoc_url=None,
1516
+ lifespan=lifespan)
1517
+ else:
1518
+ app = FastAPI(lifespan=lifespan)
1519
+ app.include_router(router)
1520
+ app.root_path = args.root_path
1521
+
1522
+ mount_metrics(app)
1523
+
1524
+ app.add_middleware(
1525
+ CORSMiddleware,
1526
+ allow_origins=args.allowed_origins,
1527
+ allow_credentials=args.allow_credentials,
1528
+ allow_methods=args.allowed_methods,
1529
+ allow_headers=args.allowed_headers,
1530
+ )
1531
+
1532
+ @app.exception_handler(HTTPException)
1533
+ async def http_exception_handler(_: Request, exc: HTTPException):
1534
+ err = ErrorResponse(
1535
+ error=ErrorInfo(message=exc.detail,
1536
+ type=HTTPStatus(exc.status_code).phrase,
1537
+ code=exc.status_code))
1538
+ return JSONResponse(err.model_dump(), status_code=exc.status_code)
1539
+
1540
+ @app.exception_handler(RequestValidationError)
1541
+ async def validation_exception_handler(_: Request,
1542
+ exc: RequestValidationError):
1543
+ exc_str = str(exc)
1544
+ errors_str = str(exc.errors())
1545
+
1546
+ if exc.errors() and errors_str and errors_str != exc_str:
1547
+ message = f"{exc_str} {errors_str}"
1548
+ else:
1549
+ message = exc_str
1550
+
1551
+ err = ErrorResponse(error=ErrorInfo(message=message,
1552
+ type=HTTPStatus.BAD_REQUEST.phrase,
1553
+ code=HTTPStatus.BAD_REQUEST))
1554
+ return JSONResponse(err.model_dump(),
1555
+ status_code=HTTPStatus.BAD_REQUEST)
1556
+
1557
+ # Ensure --api-key option from CLI takes precedence over VLLM_API_KEY
1558
+ if tokens := [key for key in (args.api_key or [envs.VLLM_API_KEY]) if key]:
1559
+ app.add_middleware(AuthenticationMiddleware, tokens=tokens)
1560
+
1561
+ if args.enable_request_id_headers:
1562
+ app.add_middleware(XRequestIdMiddleware)
1563
+
1564
+ # Add scaling middleware to check for scaling state
1565
+ app.add_middleware(ScalingMiddleware)
1566
+
1567
+ if envs.VLLM_DEBUG_LOG_API_SERVER_RESPONSE:
1568
+ logger.warning("CAUTION: Enabling log response in the API Server. "
1569
+ "This can include sensitive information and should be "
1570
+ "avoided in production.")
1571
+
1572
+ @app.middleware("http")
1573
+ async def log_response(request: Request, call_next):
1574
+ response = await call_next(request)
1575
+ response_body = [
1576
+ section async for section in response.body_iterator
1577
+ ]
1578
+ response.body_iterator = iterate_in_threadpool(iter(response_body))
1579
+ # Check if this is a streaming response by looking at content-type
1580
+ content_type = response.headers.get("content-type", "")
1581
+ is_streaming = content_type == "text/event-stream; charset=utf-8"
1582
+
1583
+ # Log response body based on type
1584
+ if not response_body:
1585
+ logger.info("response_body={<empty>}")
1586
+ elif is_streaming:
1587
+ _log_streaming_response(response, response_body)
1588
+ else:
1589
+ _log_non_streaming_response(response_body)
1590
+ return response
1591
+
1592
+ for middleware in args.middleware:
1593
+ module_path, object_name = middleware.rsplit(".", 1)
1594
+ imported = getattr(importlib.import_module(module_path), object_name)
1595
+ if inspect.isclass(imported):
1596
+ app.add_middleware(imported) # type: ignore[arg-type]
1597
+ elif inspect.iscoroutinefunction(imported):
1598
+ app.middleware("http")(imported)
1599
+ else:
1600
+ raise ValueError(f"Invalid middleware {middleware}. "
1601
+ f"Must be a function or a class.")
1602
+
1603
+ return app
1604
+
1605
+
1606
+ async def init_app_state(
1607
+ engine_client: EngineClient,
1608
+ vllm_config: VllmConfig,
1609
+ state: State,
1610
+ args: Namespace,
1611
+ ) -> None:
1612
+ if args.served_model_name is not None:
1613
+ served_model_names = args.served_model_name
1614
+ else:
1615
+ served_model_names = [args.model]
1616
+
1617
+ if args.enable_log_requests:
1618
+ request_logger = RequestLogger(max_log_len=args.max_log_len)
1619
+ else:
1620
+ request_logger = None
1621
+
1622
+ base_model_paths = [
1623
+ BaseModelPath(name=name, model_path=args.model)
1624
+ for name in served_model_names
1625
+ ]
1626
+
1627
+ state.engine_client = engine_client
1628
+ state.log_stats = not args.disable_log_stats
1629
+ state.vllm_config = vllm_config
1630
+ model_config = vllm_config.model_config
1631
+
1632
+ supported_tasks = await engine_client.get_supported_tasks()
1633
+
1634
+ logger.info("Supported_tasks: %s", supported_tasks)
1635
+
1636
+ resolved_chat_template = load_chat_template(args.chat_template)
1637
+ if resolved_chat_template is not None:
1638
+ # Get the tokenizer to check official template
1639
+ tokenizer = await engine_client.get_tokenizer()
1640
+
1641
+ if isinstance(tokenizer, MistralTokenizer):
1642
+ # The warning is logged in resolve_mistral_chat_template.
1643
+ resolved_chat_template = resolve_mistral_chat_template(
1644
+ chat_template=resolved_chat_template)
1645
+ else:
1646
+ hf_chat_template = resolve_hf_chat_template(
1647
+ tokenizer=tokenizer,
1648
+ chat_template=None,
1649
+ tools=None,
1650
+ model_config=vllm_config.model_config,
1651
+ )
1652
+
1653
+ if hf_chat_template != resolved_chat_template:
1654
+ logger.warning(
1655
+ "Using supplied chat template: %s\n"
1656
+ "It is different from official chat template '%s'. "
1657
+ "This discrepancy may lead to performance degradation.",
1658
+ resolved_chat_template, args.model)
1659
+
1660
+ if args.tool_server == "demo":
1661
+ tool_server: Optional[ToolServer] = DemoToolServer()
1662
+ assert isinstance(tool_server, DemoToolServer)
1663
+ await tool_server.init_and_validate()
1664
+ elif args.tool_server:
1665
+ tool_server = MCPToolServer()
1666
+ await tool_server.add_tool_server(args.tool_server)
1667
+ else:
1668
+ tool_server = None
1669
+
1670
+ # Merge default_mm_loras into the static lora_modules
1671
+ default_mm_loras = (vllm_config.lora_config.default_mm_loras
1672
+ if vllm_config.lora_config is not None else {})
1673
+
1674
+ lora_modules = args.lora_modules
1675
+ if default_mm_loras:
1676
+ default_mm_lora_paths = [
1677
+ LoRAModulePath(
1678
+ name=modality,
1679
+ path=lora_path,
1680
+ ) for modality, lora_path in default_mm_loras.items()
1681
+ ]
1682
+ if args.lora_modules is None:
1683
+ lora_modules = default_mm_lora_paths
1684
+ else:
1685
+ lora_modules += default_mm_lora_paths
1686
+
1687
+ state.openai_serving_models = OpenAIServingModels(
1688
+ engine_client=engine_client,
1689
+ model_config=model_config,
1690
+ base_model_paths=base_model_paths,
1691
+ lora_modules=lora_modules,
1692
+ )
1693
+ await state.openai_serving_models.init_static_loras()
1694
+ state.openai_serving_responses = OpenAIServingResponses(
1695
+ engine_client,
1696
+ model_config,
1697
+ state.openai_serving_models,
1698
+ request_logger=request_logger,
1699
+ chat_template=resolved_chat_template,
1700
+ chat_template_content_format=args.chat_template_content_format,
1701
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1702
+ enable_auto_tools=args.enable_auto_tool_choice,
1703
+ tool_parser=args.tool_call_parser,
1704
+ tool_server=tool_server,
1705
+ reasoning_parser=args.structured_outputs_config.reasoning_parser,
1706
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1707
+ enable_force_include_usage=args.enable_force_include_usage,
1708
+ enable_log_outputs=args.enable_log_outputs,
1709
+ log_error_stack=args.log_error_stack,
1710
+ ) if "generate" in supported_tasks else None
1711
+ state.openai_serving_chat = OpenAIServingChat(
1712
+ engine_client,
1713
+ model_config,
1714
+ state.openai_serving_models,
1715
+ args.response_role,
1716
+ request_logger=request_logger,
1717
+ chat_template=resolved_chat_template,
1718
+ chat_template_content_format=args.chat_template_content_format,
1719
+ trust_request_chat_template=args.trust_request_chat_template,
1720
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1721
+ enable_auto_tools=args.enable_auto_tool_choice,
1722
+ exclude_tools_when_tool_choice_none=args.
1723
+ exclude_tools_when_tool_choice_none,
1724
+ tool_parser=args.tool_call_parser,
1725
+ reasoning_parser=args.structured_outputs_config.reasoning_parser,
1726
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1727
+ enable_force_include_usage=args.enable_force_include_usage,
1728
+ enable_log_outputs=args.enable_log_outputs,
1729
+ log_error_stack=args.log_error_stack,
1730
+ ) if "generate" in supported_tasks else None
1731
+ state.openai_serving_completion = OpenAIServingCompletion(
1732
+ engine_client,
1733
+ model_config,
1734
+ state.openai_serving_models,
1735
+ request_logger=request_logger,
1736
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
1737
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
1738
+ enable_force_include_usage=args.enable_force_include_usage,
1739
+ log_error_stack=args.log_error_stack,
1740
+ ) if "generate" in supported_tasks else None
1741
+ state.openai_serving_pooling = OpenAIServingPooling(
1742
+ engine_client,
1743
+ vllm_config,
1744
+ state.openai_serving_models,
1745
+ request_logger=request_logger,
1746
+ chat_template=resolved_chat_template,
1747
+ chat_template_content_format=args.chat_template_content_format,
1748
+ log_error_stack=args.log_error_stack,
1749
+ ) if "encode" in supported_tasks else None
1750
+ state.openai_serving_embedding = OpenAIServingEmbedding(
1751
+ engine_client,
1752
+ model_config,
1753
+ state.openai_serving_models,
1754
+ request_logger=request_logger,
1755
+ chat_template=resolved_chat_template,
1756
+ chat_template_content_format=args.chat_template_content_format,
1757
+ log_error_stack=args.log_error_stack,
1758
+ ) if "embed" in supported_tasks else None
1759
+ state.openai_serving_classification = ServingClassification(
1760
+ engine_client,
1761
+ model_config,
1762
+ state.openai_serving_models,
1763
+ request_logger=request_logger,
1764
+ log_error_stack=args.log_error_stack,
1765
+ ) if "classify" in supported_tasks else None
1766
+ state.openai_serving_scores = ServingScores(
1767
+ engine_client,
1768
+ model_config,
1769
+ state.openai_serving_models,
1770
+ request_logger=request_logger,
1771
+ log_error_stack=args.log_error_stack,
1772
+ ) if ("embed" in supported_tasks or "score" in supported_tasks) else None
1773
+ state.openai_serving_tokenization = OpenAIServingTokenization(
1774
+ engine_client,
1775
+ model_config,
1776
+ state.openai_serving_models,
1777
+ request_logger=request_logger,
1778
+ chat_template=resolved_chat_template,
1779
+ chat_template_content_format=args.chat_template_content_format,
1780
+ log_error_stack=args.log_error_stack,
1781
+ )
1782
+ state.openai_serving_transcription = OpenAIServingTranscription(
1783
+ engine_client,
1784
+ model_config,
1785
+ state.openai_serving_models,
1786
+ request_logger=request_logger,
1787
+ log_error_stack=args.log_error_stack,
1788
+ ) if "transcription" in supported_tasks else None
1789
+ state.openai_serving_translation = OpenAIServingTranslation(
1790
+ engine_client,
1791
+ model_config,
1792
+ state.openai_serving_models,
1793
+ request_logger=request_logger,
1794
+ log_error_stack=args.log_error_stack,
1795
+ ) if "transcription" in supported_tasks else None
1796
+
1797
+ state.enable_server_load_tracking = args.enable_server_load_tracking
1798
+ state.server_load_metrics = 0
1799
+
1800
+
1801
+ def create_server_socket(addr: tuple[str, int]) -> socket.socket:
1802
+ family = socket.AF_INET
1803
+ if is_valid_ipv6_address(addr[0]):
1804
+ family = socket.AF_INET6
1805
+
1806
+ sock = socket.socket(family=family, type=socket.SOCK_STREAM)
1807
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1808
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1809
+ sock.bind(addr)
1810
+
1811
+ return sock
1812
+
1813
+
1814
+ def create_server_unix_socket(path: str) -> socket.socket:
1815
+ sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
1816
+ sock.bind(path)
1817
+ return sock
1818
+
1819
+
1820
+ def validate_api_server_args(args):
1821
+ valid_tool_parses = ToolParserManager.tool_parsers.keys()
1822
+ if args.enable_auto_tool_choice \
1823
+ and args.tool_call_parser not in valid_tool_parses:
1824
+ raise KeyError(f"invalid tool call parser: {args.tool_call_parser} "
1825
+ f"(chose from {{ {','.join(valid_tool_parses)} }})")
1826
+
1827
+ valid_reasoning_parses = ReasoningParserManager.reasoning_parsers.keys()
1828
+ if ((reasoning_parser := args.structured_outputs_config.reasoning_parser)
1829
+ and reasoning_parser not in valid_reasoning_parses):
1830
+ raise KeyError(
1831
+ f"invalid reasoning parser: {reasoning_parser} "
1832
+ f"(chose from {{ {','.join(valid_reasoning_parses)} }})")
1833
+
1834
+
1835
+ def setup_server(args):
1836
+ """Validate API server args, set up signal handler, create socket
1837
+ ready to serve."""
1838
+
1839
+ logger.info("vLLM API server version %s", VLLM_VERSION)
1840
+ log_non_default_args(args)
1841
+
1842
+ if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
1843
+ ToolParserManager.import_tool_parser(args.tool_parser_plugin)
1844
+
1845
+ validate_api_server_args(args)
1846
+
1847
+ # workaround to make sure that we bind the port before the engine is set up.
1848
+ # This avoids race conditions with ray.
1849
+ # see https://github.com/vllm-project/vllm/issues/8204
1850
+ if args.uds:
1851
+ sock = create_server_unix_socket(args.uds)
1852
+ else:
1853
+ sock_addr = (args.host or "", args.port)
1854
+ sock = create_server_socket(sock_addr)
1855
+
1856
+ # workaround to avoid footguns where uvicorn drops requests with too
1857
+ # many concurrent requests active
1858
+ set_ulimit()
1859
+
1860
+ def signal_handler(*_) -> None:
1861
+ # Interrupt server on sigterm while initializing
1862
+ raise KeyboardInterrupt("terminated")
1863
+
1864
+ signal.signal(signal.SIGTERM, signal_handler)
1865
+
1866
+ if args.uds:
1867
+ listen_address = f"unix:{args.uds}"
1868
+ else:
1869
+ addr, port = sock_addr
1870
+ is_ssl = args.ssl_keyfile and args.ssl_certfile
1871
+ host_part = f"[{addr}]" if is_valid_ipv6_address(
1872
+ addr) else addr or "0.0.0.0"
1873
+ listen_address = f"http{'s' if is_ssl else ''}://{host_part}:{port}"
1874
+ return listen_address, sock
1875
+
1876
+
1877
+ async def run_server(args, **uvicorn_kwargs) -> None:
1878
+ """Run a single-worker API server."""
1879
+
1880
+ # Add process-specific prefix to stdout and stderr.
1881
+ decorate_logs("APIServer")
1882
+
1883
+ listen_address, sock = setup_server(args)
1884
+ await run_server_worker(listen_address, sock, args, **uvicorn_kwargs)
1885
+
1886
+
1887
+ async def run_server_worker(listen_address,
1888
+ sock,
1889
+ args,
1890
+ client_config=None,
1891
+ **uvicorn_kwargs) -> None:
1892
+ """Run a single API server worker."""
1893
+
1894
+ if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
1895
+ ToolParserManager.import_tool_parser(args.tool_parser_plugin)
1896
+
1897
+ # Load logging config for uvicorn if specified
1898
+ log_config = load_log_config(args.log_config_file)
1899
+ if log_config is not None:
1900
+ uvicorn_kwargs['log_config'] = log_config
1901
+
1902
+ async with build_async_engine_client(
1903
+ args,
1904
+ client_config=client_config,
1905
+ ) as engine_client:
1906
+ maybe_register_tokenizer_info_endpoint(args)
1907
+ app = build_app(args)
1908
+
1909
+ vllm_config = await engine_client.get_vllm_config()
1910
+ await init_app_state(engine_client, vllm_config, app.state, args)
1911
+
1912
+ logger.info("Starting vLLM API server %d on %s",
1913
+ vllm_config.parallel_config._api_process_rank,
1914
+ listen_address)
1915
+ shutdown_task = await serve_http(
1916
+ app,
1917
+ sock=sock,
1918
+ enable_ssl_refresh=args.enable_ssl_refresh,
1919
+ host=args.host,
1920
+ port=args.port,
1921
+ log_level=args.uvicorn_log_level,
1922
+ # NOTE: When the 'disable_uvicorn_access_log' value is True,
1923
+ # no access log will be output.
1924
+ access_log=not args.disable_uvicorn_access_log,
1925
+ timeout_keep_alive=envs.VLLM_HTTP_TIMEOUT_KEEP_ALIVE,
1926
+ ssl_keyfile=args.ssl_keyfile,
1927
+ ssl_certfile=args.ssl_certfile,
1928
+ ssl_ca_certs=args.ssl_ca_certs,
1929
+ ssl_cert_reqs=args.ssl_cert_reqs,
1930
+ h11_max_incomplete_event_size=args.h11_max_incomplete_event_size,
1931
+ h11_max_header_count=args.h11_max_header_count,
1932
+ **uvicorn_kwargs,
1933
+ )
1934
+
1935
+ # NB: Await server shutdown only after the backend context is exited
1936
+ try:
1937
+ await shutdown_task
1938
+ finally:
1939
+ sock.close()
1940
+
1941
+
1942
+ if __name__ == "__main__":
1943
+ # NOTE(simon):
1944
+ # This section should be in sync with vllm/entrypoints/cli/main.py for CLI
1945
+ # entrypoints.
1946
+ cli_env_setup()
1947
+ parser = FlexibleArgumentParser(
1948
+ description="vLLM OpenAI-Compatible RESTful API server.")
1949
+ parser = make_arg_parser(parser)
1950
+ args = parser.parse_args()
1951
+ validate_parsed_serve_args(args)
1952
+
1953
+ uvloop.run(run_server(args))