vllm-cpu-avx512vnni 0.10.2.post2__cp312-cp312-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +220 -0
- vllm/_bc_linter.py +59 -0
- vllm/_custom_ops.py +2022 -0
- vllm/_ipex_ops.py +404 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +50 -0
- vllm/assets/video.py +138 -0
- vllm/attention/__init__.py +19 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +348 -0
- vllm/attention/backends/differential_flash_attn.py +935 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1499 -0
- vllm/attention/backends/flash_attn.py +933 -0
- vllm/attention/backends/flashmla.py +238 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1310 -0
- vllm/attention/backends/placeholder_attn.py +340 -0
- vllm/attention/backends/rocm_aiter_mla.py +410 -0
- vllm/attention/backends/rocm_flash_attn.py +953 -0
- vllm/attention/backends/triton_mla.py +111 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +805 -0
- vllm/attention/layer.py +552 -0
- vllm/attention/layers/__init__.py +0 -0
- vllm/attention/layers/chunked_local_attention.py +91 -0
- vllm/attention/layers/cross_attention.py +159 -0
- vllm/attention/layers/encoder_only_attention.py +86 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
- vllm/attention/ops/common.py +139 -0
- vllm/attention/ops/flashmla.py +123 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/paged_attn.py +261 -0
- vllm/attention/ops/pallas_kv_cache_update.py +124 -0
- vllm/attention/ops/prefix_prefill.py +928 -0
- vllm/attention/ops/rocm_aiter_mla.py +104 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +676 -0
- vllm/attention/ops/triton_flash_attention.py +984 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +854 -0
- vllm/attention/selector.py +243 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/fa_utils.py +85 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +2651 -0
- vllm/benchmarks/latency.py +170 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +510 -0
- vllm/benchmarks/lib/ready_checker.py +72 -0
- vllm/benchmarks/lib/utils.py +80 -0
- vllm/benchmarks/serve.py +1247 -0
- vllm/benchmarks/throughput.py +696 -0
- vllm/collect_env.py +823 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +193 -0
- vllm/compilation/backends.py +641 -0
- vllm/compilation/base_static_graph.py +51 -0
- vllm/compilation/collective_fusion.py +1190 -0
- vllm/compilation/compiler_interface.py +572 -0
- vllm/compilation/counter.py +47 -0
- vllm/compilation/cuda_graph.py +193 -0
- vllm/compilation/cuda_piecewise_backend.py +117 -0
- vllm/compilation/decorators.py +316 -0
- vllm/compilation/fix_functionalization.py +208 -0
- vllm/compilation/fusion.py +600 -0
- vllm/compilation/fusion_attn.py +303 -0
- vllm/compilation/fx_utils.py +84 -0
- vllm/compilation/inductor_pass.py +136 -0
- vllm/compilation/monitor.py +57 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +165 -0
- vllm/compilation/pass_manager.py +88 -0
- vllm/compilation/sequence_parallelism.py +484 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +50 -0
- vllm/compilation/wrapper.py +138 -0
- vllm/config/__init__.py +3921 -0
- vllm/config/cache.py +214 -0
- vllm/config/compilation.py +580 -0
- vllm/config/kv_events.py +50 -0
- vllm/config/kv_transfer.py +111 -0
- vllm/config/load.py +113 -0
- vllm/config/lora.py +132 -0
- vllm/config/parallel.py +446 -0
- vllm/config/scheduler.py +304 -0
- vllm/config/utils.py +29 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +439 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +523 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +139 -0
- vllm/core/placeholder_block_space_manager.py +103 -0
- vllm/core/scheduler.py +2028 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +286 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +259 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +292 -0
- vllm/distributed/device_communicators/base_device_communicator.py +277 -0
- vllm/distributed/device_communicators/cpu_communicator.py +201 -0
- vllm/distributed/device_communicators/cuda_communicator.py +294 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
- vllm/distributed/device_communicators/pynccl.py +290 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +382 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
- vllm/distributed/device_communicators/ray_communicator.py +258 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/symm_mem.py +136 -0
- vllm/distributed/device_communicators/tpu_communicator.py +102 -0
- vllm/distributed/device_communicators/xpu_communicator.py +69 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/eplb_state.py +619 -0
- vllm/distributed/eplb/rebalance_algo.py +234 -0
- vllm/distributed/eplb/rebalance_execute.py +424 -0
- vllm/distributed/kv_events.py +362 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +13 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +108 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +246 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +356 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +266 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1319 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +484 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +542 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +266 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +414 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
- vllm/distributed/parallel_state.py +1489 -0
- vllm/distributed/tpu_distributed_utils.py +178 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1857 -0
- vllm/engine/async_llm_engine.py +1044 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +1849 -0
- vllm/engine/metrics.py +577 -0
- vllm/engine/metrics_types.py +84 -0
- vllm/engine/multiprocessing/__init__.py +145 -0
- vllm/engine/multiprocessing/client.py +643 -0
- vllm/engine/multiprocessing/engine.py +470 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +61 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +343 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1535 -0
- vllm/entrypoints/cli/__init__.py +12 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +58 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +36 -0
- vllm/entrypoints/cli/main.py +60 -0
- vllm/entrypoints/cli/openai.py +214 -0
- vllm/entrypoints/cli/run_batch.py +69 -0
- vllm/entrypoints/cli/serve.py +232 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +10 -0
- vllm/entrypoints/context.py +444 -0
- vllm/entrypoints/harmony_utils.py +431 -0
- vllm/entrypoints/launcher.py +168 -0
- vllm/entrypoints/llm.py +1579 -0
- vllm/entrypoints/logger.py +79 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +2011 -0
- vllm/entrypoints/openai/cli_args.py +281 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +2590 -0
- vllm/entrypoints/openai/run_batch.py +497 -0
- vllm/entrypoints/openai/serving_chat.py +1591 -0
- vllm/entrypoints/openai/serving_classification.py +176 -0
- vllm/entrypoints/openai/serving_completion.py +688 -0
- vllm/entrypoints/openai/serving_embedding.py +632 -0
- vllm/entrypoints/openai/serving_engine.py +996 -0
- vllm/entrypoints/openai/serving_models.py +288 -0
- vllm/entrypoints/openai/serving_pooling.py +277 -0
- vllm/entrypoints/openai/serving_responses.py +1690 -0
- vllm/entrypoints/openai/serving_score.py +479 -0
- vllm/entrypoints/openai/serving_tokenization.py +196 -0
- vllm/entrypoints/openai/serving_transcription.py +136 -0
- vllm/entrypoints/openai/speech_to_text.py +388 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +51 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +418 -0
- vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +73 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
- vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
- vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
- vllm/entrypoints/renderer.py +395 -0
- vllm/entrypoints/score_utils.py +232 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/tool.py +139 -0
- vllm/entrypoints/tool_server.py +195 -0
- vllm/entrypoints/utils.py +328 -0
- vllm/env_override.py +23 -0
- vllm/envs.py +1354 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +378 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +35 -0
- vllm/executor/multiproc_worker_utils.py +279 -0
- vllm/executor/ray_distributed_executor.py +699 -0
- vllm/executor/ray_utils.py +410 -0
- vllm/executor/uniproc_executor.py +152 -0
- vllm/forward_context.py +273 -0
- vllm/inputs/__init__.py +44 -0
- vllm/inputs/data.py +356 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +973 -0
- vllm/inputs/registry.py +251 -0
- vllm/logger.py +229 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +81 -0
- vllm/logging_utils/formatter.py +79 -0
- vllm/logits_process.py +119 -0
- vllm/logprobs.py +28 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +34 -0
- vllm/lora/layers/base.py +69 -0
- vllm/lora/layers/base_linear.py +184 -0
- vllm/lora/layers/column_parallel_linear.py +622 -0
- vllm/lora/layers/logits_processor.py +247 -0
- vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
- vllm/lora/layers/replicated_linear.py +61 -0
- vllm/lora/layers/row_parallel_linear.py +201 -0
- vllm/lora/layers/utils.py +60 -0
- vllm/lora/layers/vocal_parallel_embedding.py +172 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +792 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +7 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +291 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +245 -0
- vllm/lora/ops/triton_ops/utils.py +126 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +127 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +458 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +279 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +391 -0
- vllm/lora/punica_wrapper/punica_xpu.py +276 -0
- vllm/lora/punica_wrapper/utils.py +136 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +246 -0
- vllm/lora/worker_manager.py +256 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +194 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +575 -0
- vllm/model_executor/layers/attention_layer_base.py +23 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +225 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
- vllm/model_executor/layers/fla/ops/index.py +39 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
- vllm/model_executor/layers/fla/ops/op.py +39 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
- vllm/model_executor/layers/fla/ops/utils.py +180 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
- vllm/model_executor/layers/fused_moe/__init__.py +80 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +304 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +164 -0
- vllm/model_executor/layers/fused_moe/config.py +497 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +297 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +996 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +370 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +280 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +229 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +243 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +97 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1042 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +240 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +2081 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +247 -0
- vllm/model_executor/layers/fused_moe/layer.py +1951 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +892 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +321 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +72 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +431 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +171 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +197 -0
- vllm/model_executor/layers/fused_moe/utils.py +270 -0
- vllm/model_executor/layers/layernorm.py +381 -0
- vllm/model_executor/layers/lightning_attn.py +661 -0
- vllm/model_executor/layers/linear.py +1567 -0
- vllm/model_executor/layers/logits_processor.py +199 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +45 -0
- vllm/model_executor/layers/mamba/linear_attn.py +432 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +186 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +517 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +803 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +202 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +982 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +574 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +248 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +248 -0
- vllm/model_executor/layers/mamba/short_conv.py +270 -0
- vllm/model_executor/layers/mla.py +158 -0
- vllm/model_executor/layers/pooler.py +732 -0
- vllm/model_executor/layers/quantization/__init__.py +157 -0
- vllm/model_executor/layers/quantization/auto_round.py +388 -0
- vllm/model_executor/layers/quantization/awq.py +228 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +548 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +164 -0
- vllm/model_executor/layers/quantization/bitblas.py +464 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +621 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +795 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1651 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +161 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +156 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +227 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +135 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +21 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepgemm.py +81 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
- vllm/model_executor/layers/quantization/experts_int8.py +215 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +1179 -0
- vllm/model_executor/layers/quantization/gguf.py +597 -0
- vllm/model_executor/layers/quantization/gptq.py +300 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +700 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
- vllm/model_executor/layers/quantization/inc.py +61 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +103 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +410 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +163 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/modelopt.py +1548 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +473 -0
- vllm/model_executor/layers/quantization/mxfp4.py +951 -0
- vllm/model_executor/layers/quantization/petit.py +306 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +431 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +434 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +112 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/rtn.py +456 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +214 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +85 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +258 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +795 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +96 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +132 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +627 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +190 -0
- vllm/model_executor/layers/rotary_embedding/base.py +156 -0
- vllm/model_executor/layers/rotary_embedding/common.py +105 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +140 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +1140 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
- vllm/model_executor/layers/sampler.py +1198 -0
- vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
- vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
- vllm/model_executor/layers/utils.py +196 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +138 -0
- vllm/model_executor/model_loader/base_loader.py +52 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +787 -0
- vllm/model_executor/model_loader/default_loader.py +278 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +155 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
- vllm/model_executor/model_loader/tensorizer.py +743 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
- vllm/model_executor/model_loader/tpu.py +114 -0
- vllm/model_executor/model_loader/utils.py +271 -0
- vllm/model_executor/model_loader/weight_utils.py +946 -0
- vllm/model_executor/models/__init__.py +30 -0
- vllm/model_executor/models/adapters.py +542 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/apertus.py +582 -0
- vllm/model_executor/models/arcee.py +423 -0
- vllm/model_executor/models/arctic.py +560 -0
- vllm/model_executor/models/aria.py +662 -0
- vllm/model_executor/models/aya_vision.py +470 -0
- vllm/model_executor/models/baichuan.py +475 -0
- vllm/model_executor/models/bailing_moe.py +529 -0
- vllm/model_executor/models/bamba.py +582 -0
- vllm/model_executor/models/bart.py +1343 -0
- vllm/model_executor/models/bert.py +613 -0
- vllm/model_executor/models/bert_with_rope.py +687 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +716 -0
- vllm/model_executor/models/bloom.py +374 -0
- vllm/model_executor/models/chameleon.py +1141 -0
- vllm/model_executor/models/chatglm.py +479 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/cohere2_vision.py +484 -0
- vllm/model_executor/models/commandr.py +467 -0
- vllm/model_executor/models/config.py +434 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +473 -0
- vllm/model_executor/models/deepseek.py +491 -0
- vllm/model_executor/models/deepseek_eagle.py +241 -0
- vllm/model_executor/models/deepseek_mtp.py +282 -0
- vllm/model_executor/models/deepseek_v2.py +1058 -0
- vllm/model_executor/models/deepseek_vl2.py +661 -0
- vllm/model_executor/models/donut.py +387 -0
- vllm/model_executor/models/dots1.py +547 -0
- vllm/model_executor/models/ernie45.py +43 -0
- vllm/model_executor/models/ernie45_moe.py +608 -0
- vllm/model_executor/models/ernie45_vl.py +1510 -0
- vllm/model_executor/models/ernie45_vl_moe.py +728 -0
- vllm/model_executor/models/ernie_mtp.py +287 -0
- vllm/model_executor/models/exaone.py +552 -0
- vllm/model_executor/models/exaone4.py +535 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +511 -0
- vllm/model_executor/models/falcon_h1.py +739 -0
- vllm/model_executor/models/florence2.py +1107 -0
- vllm/model_executor/models/fuyu.py +401 -0
- vllm/model_executor/models/gemma.py +428 -0
- vllm/model_executor/models/gemma2.py +425 -0
- vllm/model_executor/models/gemma3.py +542 -0
- vllm/model_executor/models/gemma3_mm.py +723 -0
- vllm/model_executor/models/gemma3n.py +830 -0
- vllm/model_executor/models/gemma3n_mm.py +767 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4_1v.py +1669 -0
- vllm/model_executor/models/glm4_moe.py +703 -0
- vllm/model_executor/models/glm4_moe_mtp.py +306 -0
- vllm/model_executor/models/glm4v.py +654 -0
- vllm/model_executor/models/gpt2.py +383 -0
- vllm/model_executor/models/gpt_bigcode.py +346 -0
- vllm/model_executor/models/gpt_j.py +340 -0
- vllm/model_executor/models/gpt_neox.py +333 -0
- vllm/model_executor/models/gpt_oss.py +687 -0
- vllm/model_executor/models/granite.py +498 -0
- vllm/model_executor/models/granite_speech.py +799 -0
- vllm/model_executor/models/granitemoe.py +541 -0
- vllm/model_executor/models/granitemoehybrid.py +684 -0
- vllm/model_executor/models/granitemoeshared.py +342 -0
- vllm/model_executor/models/gritlm.py +262 -0
- vllm/model_executor/models/grok1.py +550 -0
- vllm/model_executor/models/h2ovl.py +536 -0
- vllm/model_executor/models/hunyuan_v1.py +937 -0
- vllm/model_executor/models/hyperclovax_vision.py +1206 -0
- vllm/model_executor/models/idefics2_vision_model.py +416 -0
- vllm/model_executor/models/idefics3.py +758 -0
- vllm/model_executor/models/interfaces.py +854 -0
- vllm/model_executor/models/interfaces_base.py +195 -0
- vllm/model_executor/models/intern_vit.py +481 -0
- vllm/model_executor/models/internlm2.py +453 -0
- vllm/model_executor/models/internlm2_ve.py +148 -0
- vllm/model_executor/models/interns1.py +832 -0
- vllm/model_executor/models/interns1_vit.py +418 -0
- vllm/model_executor/models/internvl.py +1423 -0
- vllm/model_executor/models/jais.py +374 -0
- vllm/model_executor/models/jamba.py +630 -0
- vllm/model_executor/models/jina_vl.py +144 -0
- vllm/model_executor/models/keye.py +1684 -0
- vllm/model_executor/models/keye_vl1_5.py +601 -0
- vllm/model_executor/models/kimi_vl.py +620 -0
- vllm/model_executor/models/lfm2.py +558 -0
- vllm/model_executor/models/llama.py +671 -0
- vllm/model_executor/models/llama4.py +732 -0
- vllm/model_executor/models/llama4_eagle.py +241 -0
- vllm/model_executor/models/llama_eagle.py +171 -0
- vllm/model_executor/models/llama_eagle3.py +292 -0
- vllm/model_executor/models/llava.py +872 -0
- vllm/model_executor/models/llava_next.py +572 -0
- vllm/model_executor/models/llava_next_video.py +479 -0
- vllm/model_executor/models/llava_onevision.py +945 -0
- vllm/model_executor/models/mamba.py +310 -0
- vllm/model_executor/models/mamba2.py +346 -0
- vllm/model_executor/models/mamba_cache.py +83 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/midashenglm.py +788 -0
- vllm/model_executor/models/mimo.py +191 -0
- vllm/model_executor/models/mimo_mtp.py +273 -0
- vllm/model_executor/models/minicpm.py +593 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +804 -0
- vllm/model_executor/models/minicpmv.py +1786 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1027 -0
- vllm/model_executor/models/minimax_vl_01.py +431 -0
- vllm/model_executor/models/mistral3.py +628 -0
- vllm/model_executor/models/mixtral.py +494 -0
- vllm/model_executor/models/mllama.py +1697 -0
- vllm/model_executor/models/mllama4.py +1079 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +374 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1569 -0
- vllm/model_executor/models/moonvit.py +663 -0
- vllm/model_executor/models/motif.py +345 -0
- vllm/model_executor/models/mpt.py +332 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1395 -0
- vllm/model_executor/models/nemotron.py +509 -0
- vllm/model_executor/models/nemotron_h.py +633 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nemotron_vl.py +655 -0
- vllm/model_executor/models/nvlm_d.py +203 -0
- vllm/model_executor/models/olmo.py +406 -0
- vllm/model_executor/models/olmo2.py +428 -0
- vllm/model_executor/models/olmoe.py +485 -0
- vllm/model_executor/models/opt.py +413 -0
- vllm/model_executor/models/orion.py +350 -0
- vllm/model_executor/models/ovis.py +572 -0
- vllm/model_executor/models/ovis2_5.py +644 -0
- vllm/model_executor/models/paligemma.py +414 -0
- vllm/model_executor/models/persimmon.py +345 -0
- vllm/model_executor/models/phi.py +357 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3v.py +701 -0
- vllm/model_executor/models/phi4_multimodal.py +1478 -0
- vllm/model_executor/models/phi4flash.py +737 -0
- vllm/model_executor/models/phi4mm.py +1281 -0
- vllm/model_executor/models/phi4mm_audio.py +1254 -0
- vllm/model_executor/models/phi4mm_utils.py +1875 -0
- vllm/model_executor/models/phimoe.py +681 -0
- vllm/model_executor/models/pixtral.py +1348 -0
- vllm/model_executor/models/plamo2.py +1126 -0
- vllm/model_executor/models/qwen.py +363 -0
- vllm/model_executor/models/qwen2.py +526 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +985 -0
- vllm/model_executor/models/qwen2_5_vl.py +1256 -0
- vllm/model_executor/models/qwen2_audio.py +492 -0
- vllm/model_executor/models/qwen2_moe.py +558 -0
- vllm/model_executor/models/qwen2_rm.py +122 -0
- vllm/model_executor/models/qwen2_vl.py +1512 -0
- vllm/model_executor/models/qwen3.py +344 -0
- vllm/model_executor/models/qwen3_moe.py +704 -0
- vllm/model_executor/models/qwen3_next.py +1298 -0
- vllm/model_executor/models/qwen3_next_mtp.py +285 -0
- vllm/model_executor/models/qwen_vl.py +795 -0
- vllm/model_executor/models/registry.py +891 -0
- vllm/model_executor/models/roberta.py +252 -0
- vllm/model_executor/models/rvl.py +103 -0
- vllm/model_executor/models/seed_oss.py +488 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/siglip2navit.py +688 -0
- vllm/model_executor/models/skyworkr1v.py +914 -0
- vllm/model_executor/models/smolvlm.py +44 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +344 -0
- vllm/model_executor/models/starcoder2.py +357 -0
- vllm/model_executor/models/step3_text.py +521 -0
- vllm/model_executor/models/step3_vl.py +1091 -0
- vllm/model_executor/models/swin.py +475 -0
- vllm/model_executor/models/tarsier.py +649 -0
- vllm/model_executor/models/telechat2.py +151 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/terratorch.py +294 -0
- vllm/model_executor/models/transformers.py +883 -0
- vllm/model_executor/models/ultravox.py +667 -0
- vllm/model_executor/models/utils.py +770 -0
- vllm/model_executor/models/vision.py +125 -0
- vllm/model_executor/models/voxtral.py +789 -0
- vllm/model_executor/models/whisper.py +966 -0
- vllm/model_executor/models/zamba2.py +1056 -0
- vllm/model_executor/parameter.py +599 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +97 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +223 -0
- vllm/model_executor/warmup/kernel_warmup.py +83 -0
- vllm/multimodal/__init__.py +35 -0
- vllm/multimodal/audio.py +116 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/cache.py +507 -0
- vllm/multimodal/hasher.py +110 -0
- vllm/multimodal/image.py +130 -0
- vllm/multimodal/inputs.py +979 -0
- vllm/multimodal/parse.py +496 -0
- vllm/multimodal/processing.py +1921 -0
- vllm/multimodal/profiling.py +313 -0
- vllm/multimodal/registry.py +375 -0
- vllm/multimodal/utils.py +754 -0
- vllm/multimodal/video.py +312 -0
- vllm/outputs.py +517 -0
- vllm/platforms/__init__.py +263 -0
- vllm/platforms/cpu.py +353 -0
- vllm/platforms/cuda.py +731 -0
- vllm/platforms/interface.py +599 -0
- vllm/platforms/rocm.py +504 -0
- vllm/platforms/tpu.py +236 -0
- vllm/platforms/xpu.py +243 -0
- vllm/plugins/__init__.py +72 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +67 -0
- vllm/plugins/lora_resolvers/README.md +16 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +183 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +22 -0
- vllm/ray/ray_env.py +72 -0
- vllm/reasoning/__init__.py +25 -0
- vllm/reasoning/abs_reasoning_parsers.py +202 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
- vllm/reasoning/gptoss_reasoning_parser.py +87 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
- vllm/reasoning/mistral_reasoning_parser.py +47 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/reasoning/step3_reasoning_parser.py +109 -0
- vllm/sampling_params.py +577 -0
- vllm/scalar_type.py +349 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1465 -0
- vllm/tasks.py +11 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +136 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +71 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1043 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +55 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +84 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/midashenglm.py +101 -0
- vllm/transformers_utils/configs/mistral.py +165 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +259 -0
- vllm/transformers_utils/configs/nemotron_vl.py +56 -0
- vllm/transformers_utils/configs/ovis.py +176 -0
- vllm/transformers_utils/configs/qwen3_next.py +275 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +32 -0
- vllm/transformers_utils/configs/speculators/base.py +91 -0
- vllm/transformers_utils/configs/step3_vl.py +123 -0
- vllm/transformers_utils/configs/ultravox.py +120 -0
- vllm/transformers_utils/detokenizer.py +169 -0
- vllm/transformers_utils/detokenizer_utils.py +199 -0
- vllm/transformers_utils/dynamic_module.py +60 -0
- vllm/transformers_utils/processor.py +245 -0
- vllm/transformers_utils/processors/__init__.py +16 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/processors/ovis2_5.py +458 -0
- vllm/transformers_utils/runai_utils.py +99 -0
- vllm/transformers_utils/s3_utils.py +90 -0
- vllm/transformers_utils/tokenizer.py +293 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +132 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +520 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +16 -0
- vllm/triton_utils/importing.py +95 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +259 -0
- vllm/utils/__init__.py +3438 -0
- vllm/utils/deep_gemm.py +212 -0
- vllm/utils/flashinfer.py +372 -0
- vllm/utils/jsontree.py +90 -0
- vllm/utils/tensor_schema.py +236 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +922 -0
- vllm/v1/attention/backends/flash_attn.py +800 -0
- vllm/v1/attention/backends/flashinfer.py +1128 -0
- vllm/v1/attention/backends/flex_attention.py +796 -0
- vllm/v1/attention/backends/gdn_attn.py +320 -0
- vllm/v1/attention/backends/linear_attn.py +68 -0
- vllm/v1/attention/backends/mamba1_attn.py +81 -0
- vllm/v1/attention/backends/mamba2_attn.py +224 -0
- vllm/v1/attention/backends/mamba_attn.py +52 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +1608 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +301 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +273 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +110 -0
- vllm/v1/attention/backends/mla/flashmla.py +213 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
- vllm/v1/attention/backends/mla/triton_mla.py +175 -0
- vllm/v1/attention/backends/pallas.py +413 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +548 -0
- vllm/v1/attention/backends/short_conv_attn.py +82 -0
- vllm/v1/attention/backends/tree_attn.py +450 -0
- vllm/v1/attention/backends/triton_attn.py +430 -0
- vllm/v1/attention/backends/utils.py +834 -0
- vllm/v1/attention/backends/xformers.py +437 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +330 -0
- vllm/v1/core/encoder_cache_manager.py +333 -0
- vllm/v1/core/kv_cache_coordinator.py +440 -0
- vllm/v1/core/kv_cache_manager.py +398 -0
- vllm/v1/core/kv_cache_utils.py +1169 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +47 -0
- vllm/v1/core/sched/interface.py +158 -0
- vllm/v1/core/sched/output.py +162 -0
- vllm/v1/core/sched/request_queue.py +224 -0
- vllm/v1/core/sched/scheduler.py +1287 -0
- vllm/v1/core/sched/utils.py +69 -0
- vllm/v1/core/single_type_kv_cache_manager.py +670 -0
- vllm/v1/cudagraph_dispatcher.py +121 -0
- vllm/v1/engine/__init__.py +202 -0
- vllm/v1/engine/async_llm.py +757 -0
- vllm/v1/engine/coordinator.py +357 -0
- vllm/v1/engine/core.py +1245 -0
- vllm/v1/engine/core_client.py +1333 -0
- vllm/v1/engine/detokenizer.py +300 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +332 -0
- vllm/v1/engine/logprobs.py +201 -0
- vllm/v1/engine/output_processor.py +558 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +524 -0
- vllm/v1/engine/utils.py +857 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +126 -0
- vllm/v1/executor/multiproc_executor.py +683 -0
- vllm/v1/executor/ray_distributed_executor.py +109 -0
- vllm/v1/kv_cache_interface.py +275 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +717 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +133 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +248 -0
- vllm/v1/outputs.py +147 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +77 -0
- vllm/v1/request.py +237 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +294 -0
- vllm/v1/sample/logits_processor/builtin.py +273 -0
- vllm/v1/sample/logits_processor/interface.py +97 -0
- vllm/v1/sample/logits_processor/state.py +161 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/logprobs.py +26 -0
- vllm/v1/sample/ops/penalties.py +43 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +254 -0
- vllm/v1/sample/rejection_sampler.py +623 -0
- vllm/v1/sample/sampler.py +281 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +213 -0
- vllm/v1/serial_utils.py +395 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +740 -0
- vllm/v1/spec_decode/medusa.py +66 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +191 -0
- vllm/v1/spec_decode/ngram_proposer.py +157 -0
- vllm/v1/spec_decode/utils.py +14 -0
- vllm/v1/structured_output/__init__.py +297 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
- vllm/v1/structured_output/backend_outlines.py +320 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +323 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +373 -0
- vllm/v1/utils.py +382 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +221 -0
- vllm/v1/worker/cpu_model_runner.py +163 -0
- vllm/v1/worker/cpu_worker.py +183 -0
- vllm/v1/worker/gpu_input_batch.py +821 -0
- vllm/v1/worker/gpu_model_runner.py +3743 -0
- vllm/v1/worker/gpu_worker.py +697 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +122 -0
- vllm/v1/worker/lora_model_runner_mixin.py +192 -0
- vllm/v1/worker/tpu_input_batch.py +585 -0
- vllm/v1/worker/tpu_model_runner.py +1947 -0
- vllm/v1/worker/tpu_worker.py +340 -0
- vllm/v1/worker/utils.py +290 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/v1/worker/xpu_model_runner.py +53 -0
- vllm/v1/worker/xpu_worker.py +179 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/enc_dec_model_runner.py +553 -0
- vllm/worker/model_runner.py +2016 -0
- vllm/worker/model_runner_base.py +307 -0
- vllm/worker/utils.py +49 -0
- vllm/worker/worker.py +670 -0
- vllm/worker/worker_base.py +651 -0
- vllm_cpu_avx512vnni-0.10.2.post2.dist-info/METADATA +326 -0
- vllm_cpu_avx512vnni-0.10.2.post2.dist-info/RECORD +1395 -0
- vllm_cpu_avx512vnni-0.10.2.post2.dist-info/WHEEL +5 -0
- vllm_cpu_avx512vnni-0.10.2.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu_avx512vnni-0.10.2.post2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2011 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
import asyncio
|
|
5
|
+
import atexit
|
|
6
|
+
import gc
|
|
7
|
+
import importlib
|
|
8
|
+
import inspect
|
|
9
|
+
import json
|
|
10
|
+
import multiprocessing
|
|
11
|
+
import multiprocessing.forkserver as forkserver
|
|
12
|
+
import os
|
|
13
|
+
import signal
|
|
14
|
+
import socket
|
|
15
|
+
import tempfile
|
|
16
|
+
import uuid
|
|
17
|
+
from argparse import Namespace
|
|
18
|
+
from collections.abc import AsyncIterator, Awaitable
|
|
19
|
+
from contextlib import asynccontextmanager
|
|
20
|
+
from functools import partial
|
|
21
|
+
from http import HTTPStatus
|
|
22
|
+
from typing import Annotated, Any, Callable, Optional
|
|
23
|
+
|
|
24
|
+
import prometheus_client
|
|
25
|
+
import pydantic
|
|
26
|
+
import regex as re
|
|
27
|
+
import uvloop
|
|
28
|
+
from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Request
|
|
29
|
+
from fastapi.exceptions import RequestValidationError
|
|
30
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
31
|
+
from fastapi.responses import JSONResponse, Response, StreamingResponse
|
|
32
|
+
from prometheus_client import make_asgi_app
|
|
33
|
+
from prometheus_fastapi_instrumentator import Instrumentator
|
|
34
|
+
from starlette.concurrency import iterate_in_threadpool
|
|
35
|
+
from starlette.datastructures import URL, Headers, MutableHeaders, State
|
|
36
|
+
from starlette.routing import Mount
|
|
37
|
+
from starlette.types import ASGIApp, Message, Receive, Scope, Send
|
|
38
|
+
from typing_extensions import assert_never
|
|
39
|
+
|
|
40
|
+
import vllm.envs as envs
|
|
41
|
+
from vllm.config import VllmConfig
|
|
42
|
+
from vllm.engine.arg_utils import AsyncEngineArgs
|
|
43
|
+
from vllm.engine.async_llm_engine import AsyncLLMEngine # type: ignore
|
|
44
|
+
from vllm.engine.multiprocessing.client import MQLLMEngineClient
|
|
45
|
+
from vllm.engine.multiprocessing.engine import run_mp_engine
|
|
46
|
+
from vllm.engine.protocol import EngineClient
|
|
47
|
+
from vllm.entrypoints.chat_utils import (load_chat_template,
|
|
48
|
+
resolve_hf_chat_template,
|
|
49
|
+
resolve_mistral_chat_template)
|
|
50
|
+
from vllm.entrypoints.launcher import serve_http
|
|
51
|
+
from vllm.entrypoints.logger import RequestLogger
|
|
52
|
+
from vllm.entrypoints.openai.cli_args import (make_arg_parser,
|
|
53
|
+
validate_parsed_serve_args)
|
|
54
|
+
# yapf conflicts with isort for this block
|
|
55
|
+
# yapf: disable
|
|
56
|
+
from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
|
|
57
|
+
ChatCompletionResponse,
|
|
58
|
+
ClassificationRequest,
|
|
59
|
+
ClassificationResponse,
|
|
60
|
+
CompletionRequest,
|
|
61
|
+
CompletionResponse,
|
|
62
|
+
DetokenizeRequest,
|
|
63
|
+
DetokenizeResponse,
|
|
64
|
+
EmbeddingRequest,
|
|
65
|
+
EmbeddingResponse, ErrorInfo,
|
|
66
|
+
ErrorResponse,
|
|
67
|
+
IOProcessorResponse,
|
|
68
|
+
LoadLoRAAdapterRequest,
|
|
69
|
+
PoolingRequest, PoolingResponse,
|
|
70
|
+
RerankRequest, RerankResponse,
|
|
71
|
+
ResponsesRequest,
|
|
72
|
+
ResponsesResponse, ScoreRequest,
|
|
73
|
+
ScoreResponse, TokenizeRequest,
|
|
74
|
+
TokenizeResponse,
|
|
75
|
+
TranscriptionRequest,
|
|
76
|
+
TranscriptionResponse,
|
|
77
|
+
TranslationRequest,
|
|
78
|
+
TranslationResponse,
|
|
79
|
+
UnloadLoRAAdapterRequest)
|
|
80
|
+
# yapf: enable
|
|
81
|
+
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
|
|
82
|
+
from vllm.entrypoints.openai.serving_classification import (
|
|
83
|
+
ServingClassification)
|
|
84
|
+
from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
|
|
85
|
+
from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding
|
|
86
|
+
from vllm.entrypoints.openai.serving_engine import OpenAIServing
|
|
87
|
+
from vllm.entrypoints.openai.serving_models import (BaseModelPath,
|
|
88
|
+
LoRAModulePath,
|
|
89
|
+
OpenAIServingModels)
|
|
90
|
+
from vllm.entrypoints.openai.serving_pooling import OpenAIServingPooling
|
|
91
|
+
from vllm.entrypoints.openai.serving_responses import OpenAIServingResponses
|
|
92
|
+
from vllm.entrypoints.openai.serving_score import ServingScores
|
|
93
|
+
from vllm.entrypoints.openai.serving_tokenization import (
|
|
94
|
+
OpenAIServingTokenization)
|
|
95
|
+
from vllm.entrypoints.openai.serving_transcription import (
|
|
96
|
+
OpenAIServingTranscription, OpenAIServingTranslation)
|
|
97
|
+
from vllm.entrypoints.openai.tool_parsers import ToolParserManager
|
|
98
|
+
from vllm.entrypoints.tool_server import (DemoToolServer, MCPToolServer,
|
|
99
|
+
ToolServer)
|
|
100
|
+
from vllm.entrypoints.utils import (cli_env_setup, load_aware_call,
|
|
101
|
+
log_non_default_args, with_cancellation)
|
|
102
|
+
from vllm.logger import init_logger
|
|
103
|
+
from vllm.reasoning import ReasoningParserManager
|
|
104
|
+
from vllm.transformers_utils.config import (
|
|
105
|
+
maybe_register_config_serialize_by_value)
|
|
106
|
+
from vllm.transformers_utils.tokenizer import MistralTokenizer
|
|
107
|
+
from vllm.usage.usage_lib import UsageContext
|
|
108
|
+
from vllm.utils import (Device, FlexibleArgumentParser, decorate_logs,
|
|
109
|
+
get_open_zmq_ipc_path, is_valid_ipv6_address,
|
|
110
|
+
set_ulimit)
|
|
111
|
+
from vllm.v1.metrics.prometheus import get_prometheus_registry
|
|
112
|
+
from vllm.version import __version__ as VLLM_VERSION
|
|
113
|
+
|
|
114
|
+
prometheus_multiproc_dir: tempfile.TemporaryDirectory
|
|
115
|
+
|
|
116
|
+
# Cannot use __name__ (https://github.com/vllm-project/vllm/pull/4765)
|
|
117
|
+
logger = init_logger('vllm.entrypoints.openai.api_server')
|
|
118
|
+
|
|
119
|
+
_running_tasks: set[asyncio.Task] = set()
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
@asynccontextmanager
|
|
123
|
+
async def lifespan(app: FastAPI):
|
|
124
|
+
try:
|
|
125
|
+
if app.state.log_stats:
|
|
126
|
+
engine_client: EngineClient = app.state.engine_client
|
|
127
|
+
|
|
128
|
+
async def _force_log():
|
|
129
|
+
while True:
|
|
130
|
+
await asyncio.sleep(envs.VLLM_LOG_STATS_INTERVAL)
|
|
131
|
+
await engine_client.do_log_stats()
|
|
132
|
+
|
|
133
|
+
task = asyncio.create_task(_force_log())
|
|
134
|
+
_running_tasks.add(task)
|
|
135
|
+
task.add_done_callback(_running_tasks.remove)
|
|
136
|
+
else:
|
|
137
|
+
task = None
|
|
138
|
+
|
|
139
|
+
# Mark the startup heap as static so that it's ignored by GC.
|
|
140
|
+
# Reduces pause times of oldest generation collections.
|
|
141
|
+
gc.collect()
|
|
142
|
+
gc.freeze()
|
|
143
|
+
try:
|
|
144
|
+
yield
|
|
145
|
+
finally:
|
|
146
|
+
if task is not None:
|
|
147
|
+
task.cancel()
|
|
148
|
+
finally:
|
|
149
|
+
# Ensure app state including engine ref is gc'd
|
|
150
|
+
del app.state
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
@asynccontextmanager
|
|
154
|
+
async def build_async_engine_client(
|
|
155
|
+
args: Namespace,
|
|
156
|
+
*,
|
|
157
|
+
usage_context: UsageContext = UsageContext.OPENAI_API_SERVER,
|
|
158
|
+
disable_frontend_multiprocessing: Optional[bool] = None,
|
|
159
|
+
client_config: Optional[dict[str, Any]] = None,
|
|
160
|
+
) -> AsyncIterator[EngineClient]:
|
|
161
|
+
|
|
162
|
+
if os.getenv("VLLM_WORKER_MULTIPROC_METHOD") == "forkserver":
|
|
163
|
+
# The executor is expected to be mp.
|
|
164
|
+
# Pre-import heavy modules in the forkserver process
|
|
165
|
+
logger.debug("Setup forkserver with pre-imports")
|
|
166
|
+
multiprocessing.set_start_method('forkserver')
|
|
167
|
+
multiprocessing.set_forkserver_preload(["vllm.v1.engine.async_llm"])
|
|
168
|
+
forkserver.ensure_running()
|
|
169
|
+
logger.debug("Forkserver setup complete!")
|
|
170
|
+
|
|
171
|
+
# Context manager to handle engine_client lifecycle
|
|
172
|
+
# Ensures everything is shutdown and cleaned up on error/exit
|
|
173
|
+
engine_args = AsyncEngineArgs.from_cli_args(args)
|
|
174
|
+
|
|
175
|
+
if disable_frontend_multiprocessing is None:
|
|
176
|
+
disable_frontend_multiprocessing = bool(
|
|
177
|
+
args.disable_frontend_multiprocessing)
|
|
178
|
+
|
|
179
|
+
async with build_async_engine_client_from_engine_args(
|
|
180
|
+
engine_args,
|
|
181
|
+
usage_context=usage_context,
|
|
182
|
+
disable_frontend_multiprocessing=disable_frontend_multiprocessing,
|
|
183
|
+
client_config=client_config,
|
|
184
|
+
) as engine:
|
|
185
|
+
yield engine
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
@asynccontextmanager
|
|
189
|
+
async def build_async_engine_client_from_engine_args(
|
|
190
|
+
engine_args: AsyncEngineArgs,
|
|
191
|
+
*,
|
|
192
|
+
usage_context: UsageContext = UsageContext.OPENAI_API_SERVER,
|
|
193
|
+
disable_frontend_multiprocessing: bool = False,
|
|
194
|
+
client_config: Optional[dict[str, Any]] = None,
|
|
195
|
+
) -> AsyncIterator[EngineClient]:
|
|
196
|
+
"""
|
|
197
|
+
Create EngineClient, either:
|
|
198
|
+
- in-process using the AsyncLLMEngine Directly
|
|
199
|
+
- multiprocess using AsyncLLMEngine RPC
|
|
200
|
+
|
|
201
|
+
Returns the Client or None if the creation failed.
|
|
202
|
+
"""
|
|
203
|
+
|
|
204
|
+
# Create the EngineConfig (determines if we can use V1).
|
|
205
|
+
vllm_config = engine_args.create_engine_config(usage_context=usage_context)
|
|
206
|
+
|
|
207
|
+
# V1 AsyncLLM.
|
|
208
|
+
if envs.VLLM_USE_V1:
|
|
209
|
+
if disable_frontend_multiprocessing:
|
|
210
|
+
logger.warning(
|
|
211
|
+
"V1 is enabled, but got --disable-frontend-multiprocessing. "
|
|
212
|
+
"To disable frontend multiprocessing, set VLLM_USE_V1=0.")
|
|
213
|
+
|
|
214
|
+
from vllm.v1.engine.async_llm import AsyncLLM
|
|
215
|
+
async_llm: Optional[AsyncLLM] = None
|
|
216
|
+
client_count = client_config.pop(
|
|
217
|
+
"client_count") if client_config else 1
|
|
218
|
+
client_index = client_config.pop(
|
|
219
|
+
"client_index") if client_config else 0
|
|
220
|
+
try:
|
|
221
|
+
async_llm = AsyncLLM.from_vllm_config(
|
|
222
|
+
vllm_config=vllm_config,
|
|
223
|
+
usage_context=usage_context,
|
|
224
|
+
enable_log_requests=engine_args.enable_log_requests,
|
|
225
|
+
disable_log_stats=engine_args.disable_log_stats,
|
|
226
|
+
client_addresses=client_config,
|
|
227
|
+
client_count=client_count,
|
|
228
|
+
client_index=client_index)
|
|
229
|
+
|
|
230
|
+
# Don't keep the dummy data in memory
|
|
231
|
+
await async_llm.reset_mm_cache()
|
|
232
|
+
|
|
233
|
+
yield async_llm
|
|
234
|
+
finally:
|
|
235
|
+
if async_llm:
|
|
236
|
+
async_llm.shutdown()
|
|
237
|
+
|
|
238
|
+
# V0 AsyncLLM.
|
|
239
|
+
elif (MQLLMEngineClient.is_unsupported_config(vllm_config)
|
|
240
|
+
or disable_frontend_multiprocessing):
|
|
241
|
+
|
|
242
|
+
engine_client: Optional[EngineClient] = None
|
|
243
|
+
try:
|
|
244
|
+
engine_client = AsyncLLMEngine.from_vllm_config(
|
|
245
|
+
vllm_config=vllm_config,
|
|
246
|
+
usage_context=usage_context,
|
|
247
|
+
enable_log_requests=engine_args.enable_log_requests,
|
|
248
|
+
disable_log_stats=engine_args.disable_log_stats)
|
|
249
|
+
yield engine_client
|
|
250
|
+
finally:
|
|
251
|
+
if engine_client and hasattr(engine_client, "shutdown"):
|
|
252
|
+
engine_client.shutdown()
|
|
253
|
+
|
|
254
|
+
# V0MQLLMEngine.
|
|
255
|
+
else:
|
|
256
|
+
if "PROMETHEUS_MULTIPROC_DIR" not in os.environ:
|
|
257
|
+
# Make TemporaryDirectory for prometheus multiprocessing
|
|
258
|
+
# Note: global TemporaryDirectory will be automatically
|
|
259
|
+
# cleaned up upon exit.
|
|
260
|
+
global prometheus_multiproc_dir
|
|
261
|
+
prometheus_multiproc_dir = tempfile.TemporaryDirectory()
|
|
262
|
+
os.environ[
|
|
263
|
+
"PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir.name
|
|
264
|
+
else:
|
|
265
|
+
logger.warning(
|
|
266
|
+
"Found PROMETHEUS_MULTIPROC_DIR was set by user. "
|
|
267
|
+
"This directory must be wiped between vLLM runs or "
|
|
268
|
+
"you will find inaccurate metrics. Unset the variable "
|
|
269
|
+
"and vLLM will properly handle cleanup.")
|
|
270
|
+
|
|
271
|
+
# Select random path for IPC.
|
|
272
|
+
ipc_path = get_open_zmq_ipc_path()
|
|
273
|
+
logger.debug("Multiprocessing frontend to use %s for IPC Path.",
|
|
274
|
+
ipc_path)
|
|
275
|
+
|
|
276
|
+
# Start RPCServer in separate process (holds the LLMEngine).
|
|
277
|
+
# the current process might have CUDA context,
|
|
278
|
+
# so we need to spawn a new process
|
|
279
|
+
context = multiprocessing.get_context("spawn")
|
|
280
|
+
|
|
281
|
+
# Ensure we can serialize transformer config before spawning
|
|
282
|
+
maybe_register_config_serialize_by_value()
|
|
283
|
+
|
|
284
|
+
# The Process can raise an exception during startup, which may
|
|
285
|
+
# not actually result in an exitcode being reported. As a result
|
|
286
|
+
# we use a shared variable to communicate the information.
|
|
287
|
+
engine_alive = multiprocessing.Value('b', True, lock=False)
|
|
288
|
+
engine_process = context.Process(
|
|
289
|
+
target=run_mp_engine,
|
|
290
|
+
args=(vllm_config, UsageContext.OPENAI_API_SERVER, ipc_path,
|
|
291
|
+
engine_args.disable_log_stats,
|
|
292
|
+
engine_args.enable_log_requests, engine_alive))
|
|
293
|
+
engine_process.start()
|
|
294
|
+
engine_pid = engine_process.pid
|
|
295
|
+
assert engine_pid is not None, "Engine process failed to start."
|
|
296
|
+
logger.info("Started engine process with PID %d", engine_pid)
|
|
297
|
+
|
|
298
|
+
def _cleanup_ipc_path():
|
|
299
|
+
socket_path = ipc_path.replace("ipc://", "")
|
|
300
|
+
if os.path.exists(socket_path):
|
|
301
|
+
os.remove(socket_path)
|
|
302
|
+
|
|
303
|
+
# Ensure we clean up the local IPC socket file on exit.
|
|
304
|
+
atexit.register(_cleanup_ipc_path)
|
|
305
|
+
|
|
306
|
+
# Build RPCClient, which conforms to EngineClient Protocol.
|
|
307
|
+
build_client = partial(MQLLMEngineClient, ipc_path, vllm_config,
|
|
308
|
+
engine_pid)
|
|
309
|
+
mq_engine_client = await asyncio.get_running_loop().run_in_executor(
|
|
310
|
+
None, build_client)
|
|
311
|
+
try:
|
|
312
|
+
while True:
|
|
313
|
+
try:
|
|
314
|
+
await mq_engine_client.setup()
|
|
315
|
+
break
|
|
316
|
+
except TimeoutError:
|
|
317
|
+
if (not engine_process.is_alive()
|
|
318
|
+
or not engine_alive.value):
|
|
319
|
+
raise RuntimeError(
|
|
320
|
+
"Engine process failed to start. See stack "
|
|
321
|
+
"trace for the root cause.") from None
|
|
322
|
+
|
|
323
|
+
yield mq_engine_client # type: ignore[misc]
|
|
324
|
+
finally:
|
|
325
|
+
# Ensure rpc server process was terminated
|
|
326
|
+
engine_process.terminate()
|
|
327
|
+
|
|
328
|
+
# Close all open connections to the backend
|
|
329
|
+
mq_engine_client.close()
|
|
330
|
+
|
|
331
|
+
# Wait for engine process to join
|
|
332
|
+
engine_process.join(4)
|
|
333
|
+
if engine_process.exitcode is None:
|
|
334
|
+
# Kill if taking longer than 5 seconds to stop
|
|
335
|
+
engine_process.kill()
|
|
336
|
+
|
|
337
|
+
# Lazy import for prometheus multiprocessing.
|
|
338
|
+
# We need to set PROMETHEUS_MULTIPROC_DIR environment variable
|
|
339
|
+
# before prometheus_client is imported.
|
|
340
|
+
# See https://prometheus.github.io/client_python/multiprocess/
|
|
341
|
+
from prometheus_client import multiprocess
|
|
342
|
+
multiprocess.mark_process_dead(engine_process.pid)
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
async def validate_json_request(raw_request: Request):
|
|
346
|
+
content_type = raw_request.headers.get("content-type", "").lower()
|
|
347
|
+
media_type = content_type.split(";", maxsplit=1)[0]
|
|
348
|
+
if media_type != "application/json":
|
|
349
|
+
raise RequestValidationError(errors=[
|
|
350
|
+
"Unsupported Media Type: Only 'application/json' is allowed"
|
|
351
|
+
])
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
router = APIRouter()
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
class PrometheusResponse(Response):
|
|
358
|
+
media_type = prometheus_client.CONTENT_TYPE_LATEST
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def mount_metrics(app: FastAPI):
|
|
362
|
+
"""Mount prometheus metrics to a FastAPI app."""
|
|
363
|
+
|
|
364
|
+
registry = get_prometheus_registry()
|
|
365
|
+
|
|
366
|
+
# `response_class=PrometheusResponse` is needed to return an HTTP response
|
|
367
|
+
# with header "Content-Type: text/plain; version=0.0.4; charset=utf-8"
|
|
368
|
+
# instead of the default "application/json" which is incorrect.
|
|
369
|
+
# See https://github.com/trallnag/prometheus-fastapi-instrumentator/issues/163#issue-1296092364
|
|
370
|
+
Instrumentator(
|
|
371
|
+
excluded_handlers=[
|
|
372
|
+
"/metrics",
|
|
373
|
+
"/health",
|
|
374
|
+
"/load",
|
|
375
|
+
"/ping",
|
|
376
|
+
"/version",
|
|
377
|
+
"/server_info",
|
|
378
|
+
],
|
|
379
|
+
registry=registry,
|
|
380
|
+
).add().instrument(app).expose(app, response_class=PrometheusResponse)
|
|
381
|
+
|
|
382
|
+
# Add prometheus asgi middleware to route /metrics requests
|
|
383
|
+
metrics_route = Mount("/metrics", make_asgi_app(registry=registry))
|
|
384
|
+
|
|
385
|
+
# Workaround for 307 Redirect for /metrics
|
|
386
|
+
metrics_route.path_regex = re.compile("^/metrics(?P<path>.*)$")
|
|
387
|
+
app.routes.append(metrics_route)
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def base(request: Request) -> OpenAIServing:
|
|
391
|
+
# Reuse the existing instance
|
|
392
|
+
return tokenization(request)
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def models(request: Request) -> OpenAIServingModels:
|
|
396
|
+
return request.app.state.openai_serving_models
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
def responses(request: Request) -> Optional[OpenAIServingResponses]:
|
|
400
|
+
return request.app.state.openai_serving_responses
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def chat(request: Request) -> Optional[OpenAIServingChat]:
|
|
404
|
+
return request.app.state.openai_serving_chat
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def completion(request: Request) -> Optional[OpenAIServingCompletion]:
|
|
408
|
+
return request.app.state.openai_serving_completion
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
def pooling(request: Request) -> Optional[OpenAIServingPooling]:
|
|
412
|
+
return request.app.state.openai_serving_pooling
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def embedding(request: Request) -> Optional[OpenAIServingEmbedding]:
|
|
416
|
+
return request.app.state.openai_serving_embedding
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
def score(request: Request) -> Optional[ServingScores]:
|
|
420
|
+
return request.app.state.openai_serving_scores
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def classify(request: Request) -> Optional[ServingClassification]:
|
|
424
|
+
return request.app.state.openai_serving_classification
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
def rerank(request: Request) -> Optional[ServingScores]:
|
|
428
|
+
return request.app.state.openai_serving_scores
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def tokenization(request: Request) -> OpenAIServingTokenization:
|
|
432
|
+
return request.app.state.openai_serving_tokenization
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
def transcription(request: Request) -> OpenAIServingTranscription:
|
|
436
|
+
return request.app.state.openai_serving_transcription
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
def translation(request: Request) -> OpenAIServingTranslation:
|
|
440
|
+
return request.app.state.openai_serving_translation
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
def engine_client(request: Request) -> EngineClient:
|
|
444
|
+
return request.app.state.engine_client
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
@router.get("/health", response_class=Response)
|
|
448
|
+
async def health(raw_request: Request) -> Response:
|
|
449
|
+
"""Health check."""
|
|
450
|
+
await engine_client(raw_request).check_health()
|
|
451
|
+
return Response(status_code=200)
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
@router.get("/load")
|
|
455
|
+
async def get_server_load_metrics(request: Request):
|
|
456
|
+
# This endpoint returns the current server load metrics.
|
|
457
|
+
# It tracks requests utilizing the GPU from the following routes:
|
|
458
|
+
# - /v1/chat/completions
|
|
459
|
+
# - /v1/completions
|
|
460
|
+
# - /v1/audio/transcriptions
|
|
461
|
+
# - /v1/audio/translations
|
|
462
|
+
# - /v1/embeddings
|
|
463
|
+
# - /pooling
|
|
464
|
+
# - /classify
|
|
465
|
+
# - /score
|
|
466
|
+
# - /v1/score
|
|
467
|
+
# - /rerank
|
|
468
|
+
# - /v1/rerank
|
|
469
|
+
# - /v2/rerank
|
|
470
|
+
return JSONResponse(
|
|
471
|
+
content={'server_load': request.app.state.server_load_metrics})
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
@router.get("/ping", response_class=Response)
|
|
475
|
+
@router.post("/ping", response_class=Response)
|
|
476
|
+
async def ping(raw_request: Request) -> Response:
|
|
477
|
+
"""Ping check. Endpoint required for SageMaker"""
|
|
478
|
+
return await health(raw_request)
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
@router.post("/tokenize",
|
|
482
|
+
dependencies=[Depends(validate_json_request)],
|
|
483
|
+
responses={
|
|
484
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
485
|
+
"model": ErrorResponse
|
|
486
|
+
},
|
|
487
|
+
HTTPStatus.NOT_FOUND.value: {
|
|
488
|
+
"model": ErrorResponse
|
|
489
|
+
},
|
|
490
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
491
|
+
"model": ErrorResponse
|
|
492
|
+
},
|
|
493
|
+
HTTPStatus.NOT_IMPLEMENTED.value: {
|
|
494
|
+
"model": ErrorResponse
|
|
495
|
+
},
|
|
496
|
+
})
|
|
497
|
+
@with_cancellation
|
|
498
|
+
async def tokenize(request: TokenizeRequest, raw_request: Request):
|
|
499
|
+
handler = tokenization(raw_request)
|
|
500
|
+
|
|
501
|
+
try:
|
|
502
|
+
generator = await handler.create_tokenize(request, raw_request)
|
|
503
|
+
except NotImplementedError as e:
|
|
504
|
+
raise HTTPException(status_code=HTTPStatus.NOT_IMPLEMENTED.value,
|
|
505
|
+
detail=str(e)) from e
|
|
506
|
+
except Exception as e:
|
|
507
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
508
|
+
detail=str(e)) from e
|
|
509
|
+
|
|
510
|
+
if isinstance(generator, ErrorResponse):
|
|
511
|
+
return JSONResponse(content=generator.model_dump(),
|
|
512
|
+
status_code=generator.error.code)
|
|
513
|
+
elif isinstance(generator, TokenizeResponse):
|
|
514
|
+
return JSONResponse(content=generator.model_dump())
|
|
515
|
+
|
|
516
|
+
assert_never(generator)
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
@router.post("/detokenize",
|
|
520
|
+
dependencies=[Depends(validate_json_request)],
|
|
521
|
+
responses={
|
|
522
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
523
|
+
"model": ErrorResponse
|
|
524
|
+
},
|
|
525
|
+
HTTPStatus.NOT_FOUND.value: {
|
|
526
|
+
"model": ErrorResponse
|
|
527
|
+
},
|
|
528
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
529
|
+
"model": ErrorResponse
|
|
530
|
+
},
|
|
531
|
+
})
|
|
532
|
+
@with_cancellation
|
|
533
|
+
async def detokenize(request: DetokenizeRequest, raw_request: Request):
|
|
534
|
+
handler = tokenization(raw_request)
|
|
535
|
+
|
|
536
|
+
try:
|
|
537
|
+
generator = await handler.create_detokenize(request, raw_request)
|
|
538
|
+
except OverflowError as e:
|
|
539
|
+
raise RequestValidationError(errors=[str(e)]) from e
|
|
540
|
+
except Exception as e:
|
|
541
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
542
|
+
detail=str(e)) from e
|
|
543
|
+
|
|
544
|
+
if isinstance(generator, ErrorResponse):
|
|
545
|
+
return JSONResponse(content=generator.model_dump(),
|
|
546
|
+
status_code=generator.error.code)
|
|
547
|
+
elif isinstance(generator, DetokenizeResponse):
|
|
548
|
+
return JSONResponse(content=generator.model_dump())
|
|
549
|
+
|
|
550
|
+
assert_never(generator)
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
def maybe_register_tokenizer_info_endpoint(args):
|
|
554
|
+
"""Conditionally register the tokenizer info endpoint if enabled."""
|
|
555
|
+
if getattr(args, 'enable_tokenizer_info_endpoint', False):
|
|
556
|
+
|
|
557
|
+
@router.get("/tokenizer_info")
|
|
558
|
+
async def get_tokenizer_info(raw_request: Request):
|
|
559
|
+
"""Get comprehensive tokenizer information."""
|
|
560
|
+
result = await tokenization(raw_request).get_tokenizer_info()
|
|
561
|
+
return JSONResponse(content=result.model_dump(),
|
|
562
|
+
status_code=result.error.code if isinstance(
|
|
563
|
+
result, ErrorResponse) else 200)
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
@router.get("/v1/models")
|
|
567
|
+
async def show_available_models(raw_request: Request):
|
|
568
|
+
handler = models(raw_request)
|
|
569
|
+
|
|
570
|
+
models_ = await handler.show_available_models()
|
|
571
|
+
return JSONResponse(content=models_.model_dump())
|
|
572
|
+
|
|
573
|
+
|
|
574
|
+
@router.get("/version")
|
|
575
|
+
async def show_version():
|
|
576
|
+
ver = {"version": VLLM_VERSION}
|
|
577
|
+
return JSONResponse(content=ver)
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
@router.post("/v1/responses",
|
|
581
|
+
dependencies=[Depends(validate_json_request)],
|
|
582
|
+
responses={
|
|
583
|
+
HTTPStatus.OK.value: {
|
|
584
|
+
"content": {
|
|
585
|
+
"text/event-stream": {}
|
|
586
|
+
}
|
|
587
|
+
},
|
|
588
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
589
|
+
"model": ErrorResponse
|
|
590
|
+
},
|
|
591
|
+
HTTPStatus.NOT_FOUND.value: {
|
|
592
|
+
"model": ErrorResponse
|
|
593
|
+
},
|
|
594
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
595
|
+
"model": ErrorResponse
|
|
596
|
+
},
|
|
597
|
+
})
|
|
598
|
+
@with_cancellation
|
|
599
|
+
async def create_responses(request: ResponsesRequest, raw_request: Request):
|
|
600
|
+
handler = responses(raw_request)
|
|
601
|
+
if handler is None:
|
|
602
|
+
return base(raw_request).create_error_response(
|
|
603
|
+
message="The model does not support Responses API")
|
|
604
|
+
try:
|
|
605
|
+
generator = await handler.create_responses(request, raw_request)
|
|
606
|
+
except Exception as e:
|
|
607
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
608
|
+
detail=str(e)) from e
|
|
609
|
+
|
|
610
|
+
if isinstance(generator, ErrorResponse):
|
|
611
|
+
return JSONResponse(content=generator.model_dump(),
|
|
612
|
+
status_code=generator.error.code)
|
|
613
|
+
elif isinstance(generator, ResponsesResponse):
|
|
614
|
+
return JSONResponse(content=generator.model_dump())
|
|
615
|
+
return StreamingResponse(content=generator, media_type="text/event-stream")
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
@router.get("/v1/responses/{response_id}")
|
|
619
|
+
async def retrieve_responses(
|
|
620
|
+
response_id: str,
|
|
621
|
+
raw_request: Request,
|
|
622
|
+
starting_after: Optional[int] = None,
|
|
623
|
+
stream: Optional[bool] = False,
|
|
624
|
+
):
|
|
625
|
+
handler = responses(raw_request)
|
|
626
|
+
if handler is None:
|
|
627
|
+
return base(raw_request).create_error_response(
|
|
628
|
+
message="The model does not support Responses API")
|
|
629
|
+
|
|
630
|
+
try:
|
|
631
|
+
response = await handler.retrieve_responses(
|
|
632
|
+
response_id,
|
|
633
|
+
starting_after=starting_after,
|
|
634
|
+
stream=stream,
|
|
635
|
+
)
|
|
636
|
+
except Exception as e:
|
|
637
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
638
|
+
detail=str(e)) from e
|
|
639
|
+
|
|
640
|
+
if isinstance(response, ErrorResponse):
|
|
641
|
+
return JSONResponse(content=response.model_dump(),
|
|
642
|
+
status_code=response.error.code)
|
|
643
|
+
elif stream:
|
|
644
|
+
return StreamingResponse(content=response,
|
|
645
|
+
media_type="text/event-stream")
|
|
646
|
+
return JSONResponse(content=response.model_dump())
|
|
647
|
+
|
|
648
|
+
|
|
649
|
+
@router.post("/v1/responses/{response_id}/cancel")
|
|
650
|
+
async def cancel_responses(response_id: str, raw_request: Request):
|
|
651
|
+
handler = responses(raw_request)
|
|
652
|
+
if handler is None:
|
|
653
|
+
return base(raw_request).create_error_response(
|
|
654
|
+
message="The model does not support Responses API")
|
|
655
|
+
|
|
656
|
+
try:
|
|
657
|
+
response = await handler.cancel_responses(response_id)
|
|
658
|
+
except Exception as e:
|
|
659
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
660
|
+
detail=str(e)) from e
|
|
661
|
+
|
|
662
|
+
if isinstance(response, ErrorResponse):
|
|
663
|
+
return JSONResponse(content=response.model_dump(),
|
|
664
|
+
status_code=response.error.code)
|
|
665
|
+
return JSONResponse(content=response.model_dump())
|
|
666
|
+
|
|
667
|
+
|
|
668
|
+
@router.post("/v1/chat/completions",
|
|
669
|
+
dependencies=[Depends(validate_json_request)],
|
|
670
|
+
responses={
|
|
671
|
+
HTTPStatus.OK.value: {
|
|
672
|
+
"content": {
|
|
673
|
+
"text/event-stream": {}
|
|
674
|
+
}
|
|
675
|
+
},
|
|
676
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
677
|
+
"model": ErrorResponse
|
|
678
|
+
},
|
|
679
|
+
HTTPStatus.NOT_FOUND.value: {
|
|
680
|
+
"model": ErrorResponse
|
|
681
|
+
},
|
|
682
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
683
|
+
"model": ErrorResponse
|
|
684
|
+
}
|
|
685
|
+
})
|
|
686
|
+
@with_cancellation
|
|
687
|
+
@load_aware_call
|
|
688
|
+
async def create_chat_completion(request: ChatCompletionRequest,
|
|
689
|
+
raw_request: Request):
|
|
690
|
+
handler = chat(raw_request)
|
|
691
|
+
if handler is None:
|
|
692
|
+
return base(raw_request).create_error_response(
|
|
693
|
+
message="The model does not support Chat Completions API")
|
|
694
|
+
try:
|
|
695
|
+
generator = await handler.create_chat_completion(request, raw_request)
|
|
696
|
+
except Exception as e:
|
|
697
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
698
|
+
detail=str(e)) from e
|
|
699
|
+
if isinstance(generator, ErrorResponse):
|
|
700
|
+
return JSONResponse(content=generator.model_dump(),
|
|
701
|
+
status_code=generator.error.code)
|
|
702
|
+
|
|
703
|
+
elif isinstance(generator, ChatCompletionResponse):
|
|
704
|
+
return JSONResponse(content=generator.model_dump())
|
|
705
|
+
|
|
706
|
+
return StreamingResponse(content=generator, media_type="text/event-stream")
|
|
707
|
+
|
|
708
|
+
|
|
709
|
+
@router.post("/v1/completions",
|
|
710
|
+
dependencies=[Depends(validate_json_request)],
|
|
711
|
+
responses={
|
|
712
|
+
HTTPStatus.OK.value: {
|
|
713
|
+
"content": {
|
|
714
|
+
"text/event-stream": {}
|
|
715
|
+
}
|
|
716
|
+
},
|
|
717
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
718
|
+
"model": ErrorResponse
|
|
719
|
+
},
|
|
720
|
+
HTTPStatus.NOT_FOUND.value: {
|
|
721
|
+
"model": ErrorResponse
|
|
722
|
+
},
|
|
723
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
724
|
+
"model": ErrorResponse
|
|
725
|
+
},
|
|
726
|
+
})
|
|
727
|
+
@with_cancellation
|
|
728
|
+
@load_aware_call
|
|
729
|
+
async def create_completion(request: CompletionRequest, raw_request: Request):
|
|
730
|
+
handler = completion(raw_request)
|
|
731
|
+
if handler is None:
|
|
732
|
+
return base(raw_request).create_error_response(
|
|
733
|
+
message="The model does not support Completions API")
|
|
734
|
+
|
|
735
|
+
try:
|
|
736
|
+
generator = await handler.create_completion(request, raw_request)
|
|
737
|
+
except OverflowError as e:
|
|
738
|
+
raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
|
|
739
|
+
detail=str(e)) from e
|
|
740
|
+
except Exception as e:
|
|
741
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
742
|
+
detail=str(e)) from e
|
|
743
|
+
|
|
744
|
+
if isinstance(generator, ErrorResponse):
|
|
745
|
+
return JSONResponse(content=generator.model_dump(),
|
|
746
|
+
status_code=generator.error.code)
|
|
747
|
+
elif isinstance(generator, CompletionResponse):
|
|
748
|
+
return JSONResponse(content=generator.model_dump())
|
|
749
|
+
|
|
750
|
+
return StreamingResponse(content=generator, media_type="text/event-stream")
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
@router.post("/v1/embeddings",
|
|
754
|
+
dependencies=[Depends(validate_json_request)],
|
|
755
|
+
responses={
|
|
756
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
757
|
+
"model": ErrorResponse
|
|
758
|
+
},
|
|
759
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
760
|
+
"model": ErrorResponse
|
|
761
|
+
},
|
|
762
|
+
})
|
|
763
|
+
@with_cancellation
|
|
764
|
+
@load_aware_call
|
|
765
|
+
async def create_embedding(request: EmbeddingRequest, raw_request: Request):
|
|
766
|
+
handler = embedding(raw_request)
|
|
767
|
+
if handler is None:
|
|
768
|
+
return base(raw_request).create_error_response(
|
|
769
|
+
message="The model does not support Embeddings API")
|
|
770
|
+
|
|
771
|
+
try:
|
|
772
|
+
generator = await handler.create_embedding(request, raw_request)
|
|
773
|
+
except Exception as e:
|
|
774
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
775
|
+
detail=str(e)) from e
|
|
776
|
+
|
|
777
|
+
if isinstance(generator, ErrorResponse):
|
|
778
|
+
return JSONResponse(content=generator.model_dump(),
|
|
779
|
+
status_code=generator.error.code)
|
|
780
|
+
elif isinstance(generator, EmbeddingResponse):
|
|
781
|
+
return JSONResponse(content=generator.model_dump())
|
|
782
|
+
|
|
783
|
+
assert_never(generator)
|
|
784
|
+
|
|
785
|
+
|
|
786
|
+
@router.post("/pooling",
|
|
787
|
+
dependencies=[Depends(validate_json_request)],
|
|
788
|
+
responses={
|
|
789
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
790
|
+
"model": ErrorResponse
|
|
791
|
+
},
|
|
792
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
793
|
+
"model": ErrorResponse
|
|
794
|
+
},
|
|
795
|
+
})
|
|
796
|
+
@with_cancellation
|
|
797
|
+
@load_aware_call
|
|
798
|
+
async def create_pooling(request: PoolingRequest, raw_request: Request):
|
|
799
|
+
handler = pooling(raw_request)
|
|
800
|
+
if handler is None:
|
|
801
|
+
return base(raw_request).create_error_response(
|
|
802
|
+
message="The model does not support Pooling API")
|
|
803
|
+
try:
|
|
804
|
+
generator = await handler.create_pooling(request, raw_request)
|
|
805
|
+
except Exception as e:
|
|
806
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
807
|
+
detail=str(e)) from e
|
|
808
|
+
if isinstance(generator, ErrorResponse):
|
|
809
|
+
return JSONResponse(content=generator.model_dump(),
|
|
810
|
+
status_code=generator.error.code)
|
|
811
|
+
elif isinstance(generator, (PoolingResponse, IOProcessorResponse)):
|
|
812
|
+
return JSONResponse(content=generator.model_dump())
|
|
813
|
+
|
|
814
|
+
assert_never(generator)
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
@router.post("/classify", dependencies=[Depends(validate_json_request)])
|
|
818
|
+
@with_cancellation
|
|
819
|
+
@load_aware_call
|
|
820
|
+
async def create_classify(request: ClassificationRequest,
|
|
821
|
+
raw_request: Request):
|
|
822
|
+
handler = classify(raw_request)
|
|
823
|
+
if handler is None:
|
|
824
|
+
return base(raw_request).create_error_response(
|
|
825
|
+
message="The model does not support Classification API")
|
|
826
|
+
|
|
827
|
+
try:
|
|
828
|
+
generator = await handler.create_classify(request, raw_request)
|
|
829
|
+
except Exception as e:
|
|
830
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
831
|
+
detail=str(e)) from e
|
|
832
|
+
if isinstance(generator, ErrorResponse):
|
|
833
|
+
return JSONResponse(content=generator.model_dump(),
|
|
834
|
+
status_code=generator.error.code)
|
|
835
|
+
|
|
836
|
+
elif isinstance(generator, ClassificationResponse):
|
|
837
|
+
return JSONResponse(content=generator.model_dump())
|
|
838
|
+
|
|
839
|
+
assert_never(generator)
|
|
840
|
+
|
|
841
|
+
|
|
842
|
+
@router.post("/score",
|
|
843
|
+
dependencies=[Depends(validate_json_request)],
|
|
844
|
+
responses={
|
|
845
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
846
|
+
"model": ErrorResponse
|
|
847
|
+
},
|
|
848
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
849
|
+
"model": ErrorResponse
|
|
850
|
+
},
|
|
851
|
+
})
|
|
852
|
+
@with_cancellation
|
|
853
|
+
@load_aware_call
|
|
854
|
+
async def create_score(request: ScoreRequest, raw_request: Request):
|
|
855
|
+
handler = score(raw_request)
|
|
856
|
+
if handler is None:
|
|
857
|
+
return base(raw_request).create_error_response(
|
|
858
|
+
message="The model does not support Score API")
|
|
859
|
+
|
|
860
|
+
try:
|
|
861
|
+
generator = await handler.create_score(request, raw_request)
|
|
862
|
+
except Exception as e:
|
|
863
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
864
|
+
detail=str(e)) from e
|
|
865
|
+
if isinstance(generator, ErrorResponse):
|
|
866
|
+
return JSONResponse(content=generator.model_dump(),
|
|
867
|
+
status_code=generator.error.code)
|
|
868
|
+
elif isinstance(generator, ScoreResponse):
|
|
869
|
+
return JSONResponse(content=generator.model_dump())
|
|
870
|
+
|
|
871
|
+
assert_never(generator)
|
|
872
|
+
|
|
873
|
+
|
|
874
|
+
@router.post("/v1/score",
|
|
875
|
+
dependencies=[Depends(validate_json_request)],
|
|
876
|
+
responses={
|
|
877
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
878
|
+
"model": ErrorResponse
|
|
879
|
+
},
|
|
880
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
881
|
+
"model": ErrorResponse
|
|
882
|
+
},
|
|
883
|
+
})
|
|
884
|
+
@with_cancellation
|
|
885
|
+
@load_aware_call
|
|
886
|
+
async def create_score_v1(request: ScoreRequest, raw_request: Request):
|
|
887
|
+
logger.warning(
|
|
888
|
+
"To indicate that Score API is not part of standard OpenAI API, we "
|
|
889
|
+
"have moved it to `/score`. Please update your client accordingly.")
|
|
890
|
+
|
|
891
|
+
return await create_score(request, raw_request)
|
|
892
|
+
|
|
893
|
+
|
|
894
|
+
@router.post("/v1/audio/transcriptions",
|
|
895
|
+
responses={
|
|
896
|
+
HTTPStatus.OK.value: {
|
|
897
|
+
"content": {
|
|
898
|
+
"text/event-stream": {}
|
|
899
|
+
}
|
|
900
|
+
},
|
|
901
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
902
|
+
"model": ErrorResponse
|
|
903
|
+
},
|
|
904
|
+
HTTPStatus.UNPROCESSABLE_ENTITY.value: {
|
|
905
|
+
"model": ErrorResponse
|
|
906
|
+
},
|
|
907
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
908
|
+
"model": ErrorResponse
|
|
909
|
+
},
|
|
910
|
+
})
|
|
911
|
+
@with_cancellation
|
|
912
|
+
@load_aware_call
|
|
913
|
+
async def create_transcriptions(raw_request: Request,
|
|
914
|
+
request: Annotated[TranscriptionRequest,
|
|
915
|
+
Form()]):
|
|
916
|
+
handler = transcription(raw_request)
|
|
917
|
+
if handler is None:
|
|
918
|
+
return base(raw_request).create_error_response(
|
|
919
|
+
message="The model does not support Transcriptions API")
|
|
920
|
+
|
|
921
|
+
audio_data = await request.file.read()
|
|
922
|
+
try:
|
|
923
|
+
generator = await handler.create_transcription(audio_data, request,
|
|
924
|
+
raw_request)
|
|
925
|
+
except Exception as e:
|
|
926
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
927
|
+
detail=str(e)) from e
|
|
928
|
+
|
|
929
|
+
if isinstance(generator, ErrorResponse):
|
|
930
|
+
return JSONResponse(content=generator.model_dump(),
|
|
931
|
+
status_code=generator.error.code)
|
|
932
|
+
|
|
933
|
+
elif isinstance(generator, TranscriptionResponse):
|
|
934
|
+
return JSONResponse(content=generator.model_dump())
|
|
935
|
+
|
|
936
|
+
return StreamingResponse(content=generator, media_type="text/event-stream")
|
|
937
|
+
|
|
938
|
+
|
|
939
|
+
@router.post("/v1/audio/translations",
|
|
940
|
+
responses={
|
|
941
|
+
HTTPStatus.OK.value: {
|
|
942
|
+
"content": {
|
|
943
|
+
"text/event-stream": {}
|
|
944
|
+
}
|
|
945
|
+
},
|
|
946
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
947
|
+
"model": ErrorResponse
|
|
948
|
+
},
|
|
949
|
+
HTTPStatus.UNPROCESSABLE_ENTITY.value: {
|
|
950
|
+
"model": ErrorResponse
|
|
951
|
+
},
|
|
952
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
953
|
+
"model": ErrorResponse
|
|
954
|
+
},
|
|
955
|
+
})
|
|
956
|
+
@with_cancellation
|
|
957
|
+
@load_aware_call
|
|
958
|
+
async def create_translations(request: Annotated[TranslationRequest,
|
|
959
|
+
Form()],
|
|
960
|
+
raw_request: Request):
|
|
961
|
+
handler = translation(raw_request)
|
|
962
|
+
if handler is None:
|
|
963
|
+
return base(raw_request).create_error_response(
|
|
964
|
+
message="The model does not support Translations API")
|
|
965
|
+
|
|
966
|
+
audio_data = await request.file.read()
|
|
967
|
+
try:
|
|
968
|
+
generator = await handler.create_translation(audio_data, request,
|
|
969
|
+
raw_request)
|
|
970
|
+
except Exception as e:
|
|
971
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
972
|
+
detail=str(e)) from e
|
|
973
|
+
|
|
974
|
+
if isinstance(generator, ErrorResponse):
|
|
975
|
+
return JSONResponse(content=generator.model_dump(),
|
|
976
|
+
status_code=generator.error.code)
|
|
977
|
+
|
|
978
|
+
elif isinstance(generator, TranslationResponse):
|
|
979
|
+
return JSONResponse(content=generator.model_dump())
|
|
980
|
+
|
|
981
|
+
return StreamingResponse(content=generator, media_type="text/event-stream")
|
|
982
|
+
|
|
983
|
+
|
|
984
|
+
@router.post("/rerank",
|
|
985
|
+
dependencies=[Depends(validate_json_request)],
|
|
986
|
+
responses={
|
|
987
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
988
|
+
"model": ErrorResponse
|
|
989
|
+
},
|
|
990
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
991
|
+
"model": ErrorResponse
|
|
992
|
+
},
|
|
993
|
+
})
|
|
994
|
+
@with_cancellation
|
|
995
|
+
@load_aware_call
|
|
996
|
+
async def do_rerank(request: RerankRequest, raw_request: Request):
|
|
997
|
+
handler = rerank(raw_request)
|
|
998
|
+
if handler is None:
|
|
999
|
+
return base(raw_request).create_error_response(
|
|
1000
|
+
message="The model does not support Rerank (Score) API")
|
|
1001
|
+
try:
|
|
1002
|
+
generator = await handler.do_rerank(request, raw_request)
|
|
1003
|
+
except Exception as e:
|
|
1004
|
+
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
|
|
1005
|
+
detail=str(e)) from e
|
|
1006
|
+
if isinstance(generator, ErrorResponse):
|
|
1007
|
+
return JSONResponse(content=generator.model_dump(),
|
|
1008
|
+
status_code=generator.error.code)
|
|
1009
|
+
elif isinstance(generator, RerankResponse):
|
|
1010
|
+
return JSONResponse(content=generator.model_dump())
|
|
1011
|
+
|
|
1012
|
+
assert_never(generator)
|
|
1013
|
+
|
|
1014
|
+
|
|
1015
|
+
@router.post("/v1/rerank",
|
|
1016
|
+
dependencies=[Depends(validate_json_request)],
|
|
1017
|
+
responses={
|
|
1018
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
1019
|
+
"model": ErrorResponse
|
|
1020
|
+
},
|
|
1021
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
1022
|
+
"model": ErrorResponse
|
|
1023
|
+
},
|
|
1024
|
+
})
|
|
1025
|
+
@with_cancellation
|
|
1026
|
+
async def do_rerank_v1(request: RerankRequest, raw_request: Request):
|
|
1027
|
+
logger.warning_once(
|
|
1028
|
+
"To indicate that the rerank API is not part of the standard OpenAI"
|
|
1029
|
+
" API, we have located it at `/rerank`. Please update your client "
|
|
1030
|
+
"accordingly. (Note: Conforms to JinaAI rerank API)")
|
|
1031
|
+
|
|
1032
|
+
return await do_rerank(request, raw_request)
|
|
1033
|
+
|
|
1034
|
+
|
|
1035
|
+
@router.post("/v2/rerank",
|
|
1036
|
+
dependencies=[Depends(validate_json_request)],
|
|
1037
|
+
responses={
|
|
1038
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
1039
|
+
"model": ErrorResponse
|
|
1040
|
+
},
|
|
1041
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
1042
|
+
"model": ErrorResponse
|
|
1043
|
+
},
|
|
1044
|
+
})
|
|
1045
|
+
@with_cancellation
|
|
1046
|
+
async def do_rerank_v2(request: RerankRequest, raw_request: Request):
|
|
1047
|
+
return await do_rerank(request, raw_request)
|
|
1048
|
+
|
|
1049
|
+
|
|
1050
|
+
if envs.VLLM_SERVER_DEV_MODE:
|
|
1051
|
+
logger.warning("SECURITY WARNING: Development endpoints are enabled! "
|
|
1052
|
+
"This should NOT be used in production!")
|
|
1053
|
+
|
|
1054
|
+
@router.get("/server_info")
|
|
1055
|
+
async def show_server_info(raw_request: Request):
|
|
1056
|
+
server_info = {"vllm_config": str(raw_request.app.state.vllm_config)}
|
|
1057
|
+
return JSONResponse(content=server_info)
|
|
1058
|
+
|
|
1059
|
+
@router.post("/reset_prefix_cache")
|
|
1060
|
+
async def reset_prefix_cache(raw_request: Request):
|
|
1061
|
+
"""
|
|
1062
|
+
Reset the prefix cache. Note that we currently do not check if the
|
|
1063
|
+
prefix cache is successfully reset in the API server.
|
|
1064
|
+
"""
|
|
1065
|
+
device = None
|
|
1066
|
+
device_str = raw_request.query_params.get("device")
|
|
1067
|
+
if device_str is not None:
|
|
1068
|
+
device = Device[device_str.upper()]
|
|
1069
|
+
logger.info("Resetting prefix cache with specific %s...", str(device))
|
|
1070
|
+
await engine_client(raw_request).reset_prefix_cache(device)
|
|
1071
|
+
return Response(status_code=200)
|
|
1072
|
+
|
|
1073
|
+
@router.post("/sleep")
|
|
1074
|
+
async def sleep(raw_request: Request):
|
|
1075
|
+
# get POST params
|
|
1076
|
+
level = raw_request.query_params.get("level", "1")
|
|
1077
|
+
await engine_client(raw_request).sleep(int(level))
|
|
1078
|
+
# FIXME: in v0 with frontend multiprocessing, the sleep command
|
|
1079
|
+
# is sent but does not finish yet when we return a response.
|
|
1080
|
+
return Response(status_code=200)
|
|
1081
|
+
|
|
1082
|
+
@router.post("/wake_up")
|
|
1083
|
+
async def wake_up(raw_request: Request):
|
|
1084
|
+
tags = raw_request.query_params.getlist("tags")
|
|
1085
|
+
if tags == []:
|
|
1086
|
+
# set to None to wake up all tags if no tags are provided
|
|
1087
|
+
tags = None
|
|
1088
|
+
logger.info("wake up the engine with tags: %s", tags)
|
|
1089
|
+
await engine_client(raw_request).wake_up(tags)
|
|
1090
|
+
# FIXME: in v0 with frontend multiprocessing, the wake-up command
|
|
1091
|
+
# is sent but does not finish yet when we return a response.
|
|
1092
|
+
return Response(status_code=200)
|
|
1093
|
+
|
|
1094
|
+
@router.get("/is_sleeping")
|
|
1095
|
+
async def is_sleeping(raw_request: Request):
|
|
1096
|
+
logger.info("check whether the engine is sleeping")
|
|
1097
|
+
is_sleeping = await engine_client(raw_request).is_sleeping()
|
|
1098
|
+
return JSONResponse(content={"is_sleeping": is_sleeping})
|
|
1099
|
+
|
|
1100
|
+
@router.post("/collective_rpc")
|
|
1101
|
+
async def collective_rpc(raw_request: Request):
|
|
1102
|
+
try:
|
|
1103
|
+
body = await raw_request.json()
|
|
1104
|
+
except json.JSONDecodeError as e:
|
|
1105
|
+
raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
|
|
1106
|
+
detail=f"JSON decode error: {e}") from e
|
|
1107
|
+
method = body.get("method")
|
|
1108
|
+
if method is None:
|
|
1109
|
+
raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
|
|
1110
|
+
detail="Missing 'method' in request body")
|
|
1111
|
+
# For security reason, only serialized string args/kwargs are passed.
|
|
1112
|
+
# User-defined `method` is responsible for deserialization if needed.
|
|
1113
|
+
args: list[str] = body.get("args", [])
|
|
1114
|
+
kwargs: dict[str, str] = body.get("kwargs", {})
|
|
1115
|
+
timeout: Optional[float] = body.get("timeout")
|
|
1116
|
+
results = await engine_client(raw_request).collective_rpc(
|
|
1117
|
+
method=method, timeout=timeout, args=tuple(args), kwargs=kwargs)
|
|
1118
|
+
if results is None:
|
|
1119
|
+
return Response(status_code=200)
|
|
1120
|
+
response: list[Any] = []
|
|
1121
|
+
for result in results:
|
|
1122
|
+
if result is None or isinstance(result, (dict, list)):
|
|
1123
|
+
response.append(result)
|
|
1124
|
+
else:
|
|
1125
|
+
response.append(str(result))
|
|
1126
|
+
return JSONResponse(content={"results": response})
|
|
1127
|
+
|
|
1128
|
+
|
|
1129
|
+
@router.post("/scale_elastic_ep",
|
|
1130
|
+
dependencies=[Depends(validate_json_request)],
|
|
1131
|
+
responses={
|
|
1132
|
+
HTTPStatus.OK.value: {
|
|
1133
|
+
"model": dict
|
|
1134
|
+
},
|
|
1135
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
1136
|
+
"model": ErrorResponse
|
|
1137
|
+
},
|
|
1138
|
+
HTTPStatus.REQUEST_TIMEOUT.value: {
|
|
1139
|
+
"model": ErrorResponse
|
|
1140
|
+
},
|
|
1141
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
1142
|
+
"model": ErrorResponse
|
|
1143
|
+
},
|
|
1144
|
+
})
|
|
1145
|
+
async def scale_elastic_ep(raw_request: Request):
|
|
1146
|
+
try:
|
|
1147
|
+
body = await raw_request.json()
|
|
1148
|
+
except json.JSONDecodeError as e:
|
|
1149
|
+
raise HTTPException(status_code=400,
|
|
1150
|
+
detail="Invalid JSON format") from e # noqa: B904
|
|
1151
|
+
|
|
1152
|
+
new_data_parallel_size = body.get("new_data_parallel_size")
|
|
1153
|
+
drain_timeout = body.get("drain_timeout", 120) # Default 2 minutes
|
|
1154
|
+
|
|
1155
|
+
if new_data_parallel_size is None:
|
|
1156
|
+
raise HTTPException(status_code=400,
|
|
1157
|
+
detail="new_data_parallel_size is required")
|
|
1158
|
+
|
|
1159
|
+
if not isinstance(new_data_parallel_size,
|
|
1160
|
+
int) or new_data_parallel_size <= 0:
|
|
1161
|
+
raise HTTPException(
|
|
1162
|
+
status_code=400,
|
|
1163
|
+
detail="new_data_parallel_size must be a positive integer")
|
|
1164
|
+
|
|
1165
|
+
if not isinstance(drain_timeout, int) or drain_timeout <= 0:
|
|
1166
|
+
raise HTTPException(status_code=400,
|
|
1167
|
+
detail="drain_timeout must be a positive integer")
|
|
1168
|
+
|
|
1169
|
+
# Set scaling flag to prevent new requests
|
|
1170
|
+
global _scaling_elastic_ep
|
|
1171
|
+
_scaling_elastic_ep = True
|
|
1172
|
+
client = engine_client(raw_request)
|
|
1173
|
+
try:
|
|
1174
|
+
await client.scale_elastic_ep(new_data_parallel_size, drain_timeout)
|
|
1175
|
+
return JSONResponse({
|
|
1176
|
+
"message":
|
|
1177
|
+
f"Scaled to {new_data_parallel_size} "
|
|
1178
|
+
"data parallel engines",
|
|
1179
|
+
})
|
|
1180
|
+
except TimeoutError as e:
|
|
1181
|
+
raise HTTPException(status_code=408,
|
|
1182
|
+
detail="Scale failed due to request drain timeout "
|
|
1183
|
+
f"after {drain_timeout} seconds") from e
|
|
1184
|
+
except Exception as e:
|
|
1185
|
+
logger.error("Scale failed: %s", e)
|
|
1186
|
+
raise HTTPException(status_code=500, detail="Scale failed") from e
|
|
1187
|
+
finally:
|
|
1188
|
+
_scaling_elastic_ep = False
|
|
1189
|
+
|
|
1190
|
+
|
|
1191
|
+
@router.post("/is_scaling_elastic_ep")
|
|
1192
|
+
async def is_scaling_elastic_ep(raw_request: Request):
|
|
1193
|
+
return JSONResponse({"is_scaling_elastic_ep": _scaling_elastic_ep})
|
|
1194
|
+
|
|
1195
|
+
|
|
1196
|
+
# TODO: RequestType = TypeForm[BaseModel] when recognized by type checkers
|
|
1197
|
+
# (requires typing_extensions >= 4.13)
|
|
1198
|
+
RequestType = Any
|
|
1199
|
+
GetHandlerFn = Callable[[Request], Optional[OpenAIServing]]
|
|
1200
|
+
EndpointFn = Callable[[RequestType, Request], Awaitable[Any]]
|
|
1201
|
+
|
|
1202
|
+
# NOTE: Items defined earlier take higher priority
|
|
1203
|
+
INVOCATION_TYPES: list[tuple[RequestType, tuple[GetHandlerFn, EndpointFn]]] = [
|
|
1204
|
+
(ChatCompletionRequest, (chat, create_chat_completion)),
|
|
1205
|
+
(CompletionRequest, (completion, create_completion)),
|
|
1206
|
+
(EmbeddingRequest, (embedding, create_embedding)),
|
|
1207
|
+
(ClassificationRequest, (classify, create_classify)),
|
|
1208
|
+
(ScoreRequest, (score, create_score)),
|
|
1209
|
+
(RerankRequest, (rerank, do_rerank)),
|
|
1210
|
+
(PoolingRequest, (pooling, create_pooling)),
|
|
1211
|
+
]
|
|
1212
|
+
|
|
1213
|
+
# NOTE: Construct the TypeAdapters only once
|
|
1214
|
+
INVOCATION_VALIDATORS = [
|
|
1215
|
+
(pydantic.TypeAdapter(request_type), (get_handler, endpoint))
|
|
1216
|
+
for request_type, (get_handler, endpoint) in INVOCATION_TYPES
|
|
1217
|
+
]
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
@router.post("/invocations",
|
|
1221
|
+
dependencies=[Depends(validate_json_request)],
|
|
1222
|
+
responses={
|
|
1223
|
+
HTTPStatus.BAD_REQUEST.value: {
|
|
1224
|
+
"model": ErrorResponse
|
|
1225
|
+
},
|
|
1226
|
+
HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value: {
|
|
1227
|
+
"model": ErrorResponse
|
|
1228
|
+
},
|
|
1229
|
+
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
|
|
1230
|
+
"model": ErrorResponse
|
|
1231
|
+
},
|
|
1232
|
+
})
|
|
1233
|
+
async def invocations(raw_request: Request):
|
|
1234
|
+
"""For SageMaker, routes requests based on the request type."""
|
|
1235
|
+
try:
|
|
1236
|
+
body = await raw_request.json()
|
|
1237
|
+
except json.JSONDecodeError as e:
|
|
1238
|
+
raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
|
|
1239
|
+
detail=f"JSON decode error: {e}") from e
|
|
1240
|
+
|
|
1241
|
+
valid_endpoints = [(validator, endpoint)
|
|
1242
|
+
for validator, (get_handler,
|
|
1243
|
+
endpoint) in INVOCATION_VALIDATORS
|
|
1244
|
+
if get_handler(raw_request) is not None]
|
|
1245
|
+
|
|
1246
|
+
for request_validator, endpoint in valid_endpoints:
|
|
1247
|
+
try:
|
|
1248
|
+
request = request_validator.validate_python(body)
|
|
1249
|
+
except pydantic.ValidationError:
|
|
1250
|
+
continue
|
|
1251
|
+
|
|
1252
|
+
return await endpoint(request, raw_request)
|
|
1253
|
+
|
|
1254
|
+
type_names = [
|
|
1255
|
+
t.__name__ if isinstance(t := validator._type, type) else str(t)
|
|
1256
|
+
for validator, _ in valid_endpoints
|
|
1257
|
+
]
|
|
1258
|
+
msg = ("Cannot find suitable handler for request. "
|
|
1259
|
+
f"Expected one of: {type_names}")
|
|
1260
|
+
res = base(raw_request).create_error_response(message=msg)
|
|
1261
|
+
return JSONResponse(content=res.model_dump(), status_code=res.error.code)
|
|
1262
|
+
|
|
1263
|
+
|
|
1264
|
+
if envs.VLLM_TORCH_PROFILER_DIR:
|
|
1265
|
+
logger.warning(
|
|
1266
|
+
"Torch Profiler is enabled in the API server. This should ONLY be "
|
|
1267
|
+
"used for local development!")
|
|
1268
|
+
|
|
1269
|
+
@router.post("/start_profile")
|
|
1270
|
+
async def start_profile(raw_request: Request):
|
|
1271
|
+
logger.info("Starting profiler...")
|
|
1272
|
+
await engine_client(raw_request).start_profile()
|
|
1273
|
+
logger.info("Profiler started.")
|
|
1274
|
+
return Response(status_code=200)
|
|
1275
|
+
|
|
1276
|
+
@router.post("/stop_profile")
|
|
1277
|
+
async def stop_profile(raw_request: Request):
|
|
1278
|
+
logger.info("Stopping profiler...")
|
|
1279
|
+
await engine_client(raw_request).stop_profile()
|
|
1280
|
+
logger.info("Profiler stopped.")
|
|
1281
|
+
return Response(status_code=200)
|
|
1282
|
+
|
|
1283
|
+
|
|
1284
|
+
if envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING:
|
|
1285
|
+
logger.warning(
|
|
1286
|
+
"LoRA dynamic loading & unloading is enabled in the API server. "
|
|
1287
|
+
"This should ONLY be used for local development!")
|
|
1288
|
+
|
|
1289
|
+
@router.post("/v1/load_lora_adapter",
|
|
1290
|
+
dependencies=[Depends(validate_json_request)])
|
|
1291
|
+
async def load_lora_adapter(request: LoadLoRAAdapterRequest,
|
|
1292
|
+
raw_request: Request):
|
|
1293
|
+
handler = models(raw_request)
|
|
1294
|
+
response = await handler.load_lora_adapter(request)
|
|
1295
|
+
if isinstance(response, ErrorResponse):
|
|
1296
|
+
return JSONResponse(content=response.model_dump(),
|
|
1297
|
+
status_code=response.error.code)
|
|
1298
|
+
|
|
1299
|
+
return Response(status_code=200, content=response)
|
|
1300
|
+
|
|
1301
|
+
@router.post("/v1/unload_lora_adapter",
|
|
1302
|
+
dependencies=[Depends(validate_json_request)])
|
|
1303
|
+
async def unload_lora_adapter(request: UnloadLoRAAdapterRequest,
|
|
1304
|
+
raw_request: Request):
|
|
1305
|
+
handler = models(raw_request)
|
|
1306
|
+
response = await handler.unload_lora_adapter(request)
|
|
1307
|
+
if isinstance(response, ErrorResponse):
|
|
1308
|
+
return JSONResponse(content=response.model_dump(),
|
|
1309
|
+
status_code=response.error.code)
|
|
1310
|
+
|
|
1311
|
+
return Response(status_code=200, content=response)
|
|
1312
|
+
|
|
1313
|
+
|
|
1314
|
+
def load_log_config(log_config_file: Optional[str]) -> Optional[dict]:
|
|
1315
|
+
if not log_config_file:
|
|
1316
|
+
return None
|
|
1317
|
+
try:
|
|
1318
|
+
with open(log_config_file) as f:
|
|
1319
|
+
return json.load(f)
|
|
1320
|
+
except Exception as e:
|
|
1321
|
+
logger.warning("Failed to load log config from file %s: error %s",
|
|
1322
|
+
log_config_file, e)
|
|
1323
|
+
return None
|
|
1324
|
+
|
|
1325
|
+
|
|
1326
|
+
class AuthenticationMiddleware:
|
|
1327
|
+
"""
|
|
1328
|
+
Pure ASGI middleware that authenticates each request by checking
|
|
1329
|
+
if the Authorization header exists and equals "Bearer {api_key}".
|
|
1330
|
+
|
|
1331
|
+
Notes
|
|
1332
|
+
-----
|
|
1333
|
+
There are two cases in which authentication is skipped:
|
|
1334
|
+
1. The HTTP method is OPTIONS.
|
|
1335
|
+
2. The request path doesn't start with /v1 (e.g. /health).
|
|
1336
|
+
"""
|
|
1337
|
+
|
|
1338
|
+
def __init__(self, app: ASGIApp, tokens: list[str]) -> None:
|
|
1339
|
+
self.app = app
|
|
1340
|
+
self.api_tokens = {f"Bearer {token}" for token in tokens}
|
|
1341
|
+
|
|
1342
|
+
def __call__(self, scope: Scope, receive: Receive,
|
|
1343
|
+
send: Send) -> Awaitable[None]:
|
|
1344
|
+
if scope["type"] not in ("http",
|
|
1345
|
+
"websocket") or scope["method"] == "OPTIONS":
|
|
1346
|
+
# scope["type"] can be "lifespan" or "startup" for example,
|
|
1347
|
+
# in which case we don't need to do anything
|
|
1348
|
+
return self.app(scope, receive, send)
|
|
1349
|
+
root_path = scope.get("root_path", "")
|
|
1350
|
+
url_path = URL(scope=scope).path.removeprefix(root_path)
|
|
1351
|
+
headers = Headers(scope=scope)
|
|
1352
|
+
# Type narrow to satisfy mypy.
|
|
1353
|
+
if url_path.startswith("/v1") and headers.get(
|
|
1354
|
+
"Authorization") not in self.api_tokens:
|
|
1355
|
+
response = JSONResponse(content={"error": "Unauthorized"},
|
|
1356
|
+
status_code=401)
|
|
1357
|
+
return response(scope, receive, send)
|
|
1358
|
+
return self.app(scope, receive, send)
|
|
1359
|
+
|
|
1360
|
+
|
|
1361
|
+
class XRequestIdMiddleware:
|
|
1362
|
+
"""
|
|
1363
|
+
Middleware the set's the X-Request-Id header for each response
|
|
1364
|
+
to a random uuid4 (hex) value if the header isn't already
|
|
1365
|
+
present in the request, otherwise use the provided request id.
|
|
1366
|
+
"""
|
|
1367
|
+
|
|
1368
|
+
def __init__(self, app: ASGIApp) -> None:
|
|
1369
|
+
self.app = app
|
|
1370
|
+
|
|
1371
|
+
def __call__(self, scope: Scope, receive: Receive,
|
|
1372
|
+
send: Send) -> Awaitable[None]:
|
|
1373
|
+
if scope["type"] not in ("http", "websocket"):
|
|
1374
|
+
return self.app(scope, receive, send)
|
|
1375
|
+
|
|
1376
|
+
# Extract the request headers.
|
|
1377
|
+
request_headers = Headers(scope=scope)
|
|
1378
|
+
|
|
1379
|
+
async def send_with_request_id(message: Message) -> None:
|
|
1380
|
+
"""
|
|
1381
|
+
Custom send function to mutate the response headers
|
|
1382
|
+
and append X-Request-Id to it.
|
|
1383
|
+
"""
|
|
1384
|
+
if message["type"] == "http.response.start":
|
|
1385
|
+
response_headers = MutableHeaders(raw=message["headers"])
|
|
1386
|
+
request_id = request_headers.get("X-Request-Id",
|
|
1387
|
+
uuid.uuid4().hex)
|
|
1388
|
+
response_headers.append("X-Request-Id", request_id)
|
|
1389
|
+
await send(message)
|
|
1390
|
+
|
|
1391
|
+
return self.app(scope, receive, send_with_request_id)
|
|
1392
|
+
|
|
1393
|
+
|
|
1394
|
+
# Global variable to track scaling state
|
|
1395
|
+
_scaling_elastic_ep = False
|
|
1396
|
+
|
|
1397
|
+
|
|
1398
|
+
class ScalingMiddleware:
|
|
1399
|
+
"""
|
|
1400
|
+
Middleware that checks if the model is currently scaling and
|
|
1401
|
+
returns a 503 Service Unavailable response if it is.
|
|
1402
|
+
|
|
1403
|
+
This middleware applies to all HTTP requests and prevents
|
|
1404
|
+
processing when the model is in a scaling state.
|
|
1405
|
+
"""
|
|
1406
|
+
|
|
1407
|
+
def __init__(self, app: ASGIApp) -> None:
|
|
1408
|
+
self.app = app
|
|
1409
|
+
|
|
1410
|
+
def __call__(self, scope: Scope, receive: Receive,
|
|
1411
|
+
send: Send) -> Awaitable[None]:
|
|
1412
|
+
if scope["type"] != "http":
|
|
1413
|
+
return self.app(scope, receive, send)
|
|
1414
|
+
|
|
1415
|
+
# Check global scaling state
|
|
1416
|
+
global _scaling_elastic_ep
|
|
1417
|
+
if _scaling_elastic_ep:
|
|
1418
|
+
# Return 503 Service Unavailable response
|
|
1419
|
+
response = JSONResponse(content={
|
|
1420
|
+
"error":
|
|
1421
|
+
"The model is currently scaling. Please try again later."
|
|
1422
|
+
},
|
|
1423
|
+
status_code=503)
|
|
1424
|
+
return response(scope, receive, send)
|
|
1425
|
+
|
|
1426
|
+
return self.app(scope, receive, send)
|
|
1427
|
+
|
|
1428
|
+
|
|
1429
|
+
def _extract_content_from_chunk(chunk_data: dict) -> str:
|
|
1430
|
+
"""Extract content from a streaming response chunk."""
|
|
1431
|
+
try:
|
|
1432
|
+
from vllm.entrypoints.openai.protocol import (
|
|
1433
|
+
ChatCompletionStreamResponse, CompletionStreamResponse)
|
|
1434
|
+
|
|
1435
|
+
# Try using Completion types for type-safe parsing
|
|
1436
|
+
if chunk_data.get('object') == 'chat.completion.chunk':
|
|
1437
|
+
chat_response = ChatCompletionStreamResponse.model_validate(
|
|
1438
|
+
chunk_data)
|
|
1439
|
+
if chat_response.choices and chat_response.choices[0].delta.content:
|
|
1440
|
+
return chat_response.choices[0].delta.content
|
|
1441
|
+
elif chunk_data.get('object') == 'text_completion':
|
|
1442
|
+
completion_response = CompletionStreamResponse.model_validate(
|
|
1443
|
+
chunk_data)
|
|
1444
|
+
if completion_response.choices and completion_response.choices[
|
|
1445
|
+
0].text:
|
|
1446
|
+
return completion_response.choices[0].text
|
|
1447
|
+
except pydantic.ValidationError:
|
|
1448
|
+
# Fallback to manual parsing
|
|
1449
|
+
if 'choices' in chunk_data and chunk_data['choices']:
|
|
1450
|
+
choice = chunk_data['choices'][0]
|
|
1451
|
+
if 'delta' in choice and choice['delta'].get('content'):
|
|
1452
|
+
return choice['delta']['content']
|
|
1453
|
+
elif choice.get('text'):
|
|
1454
|
+
return choice['text']
|
|
1455
|
+
return ""
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
class SSEDecoder:
|
|
1459
|
+
"""Robust Server-Sent Events decoder for streaming responses."""
|
|
1460
|
+
|
|
1461
|
+
def __init__(self):
|
|
1462
|
+
self.buffer = ""
|
|
1463
|
+
self.content_buffer = []
|
|
1464
|
+
|
|
1465
|
+
def decode_chunk(self, chunk: bytes) -> list[dict]:
|
|
1466
|
+
"""Decode a chunk of SSE data and return parsed events."""
|
|
1467
|
+
import json
|
|
1468
|
+
|
|
1469
|
+
try:
|
|
1470
|
+
chunk_str = chunk.decode('utf-8')
|
|
1471
|
+
except UnicodeDecodeError:
|
|
1472
|
+
# Skip malformed chunks
|
|
1473
|
+
return []
|
|
1474
|
+
|
|
1475
|
+
self.buffer += chunk_str
|
|
1476
|
+
events = []
|
|
1477
|
+
|
|
1478
|
+
# Process complete lines
|
|
1479
|
+
while '\n' in self.buffer:
|
|
1480
|
+
line, self.buffer = self.buffer.split('\n', 1)
|
|
1481
|
+
line = line.rstrip('\r') # Handle CRLF
|
|
1482
|
+
|
|
1483
|
+
if line.startswith('data: '):
|
|
1484
|
+
data_str = line[6:].strip()
|
|
1485
|
+
if data_str == '[DONE]':
|
|
1486
|
+
events.append({'type': 'done'})
|
|
1487
|
+
elif data_str:
|
|
1488
|
+
try:
|
|
1489
|
+
event_data = json.loads(data_str)
|
|
1490
|
+
events.append({'type': 'data', 'data': event_data})
|
|
1491
|
+
except json.JSONDecodeError:
|
|
1492
|
+
# Skip malformed JSON
|
|
1493
|
+
continue
|
|
1494
|
+
|
|
1495
|
+
return events
|
|
1496
|
+
|
|
1497
|
+
def extract_content(self, event_data: dict) -> str:
|
|
1498
|
+
"""Extract content from event data."""
|
|
1499
|
+
return _extract_content_from_chunk(event_data)
|
|
1500
|
+
|
|
1501
|
+
def add_content(self, content: str) -> None:
|
|
1502
|
+
"""Add content to the buffer."""
|
|
1503
|
+
if content:
|
|
1504
|
+
self.content_buffer.append(content)
|
|
1505
|
+
|
|
1506
|
+
def get_complete_content(self) -> str:
|
|
1507
|
+
"""Get the complete buffered content."""
|
|
1508
|
+
return ''.join(self.content_buffer)
|
|
1509
|
+
|
|
1510
|
+
|
|
1511
|
+
def _log_streaming_response(response, response_body: list) -> None:
|
|
1512
|
+
"""Log streaming response with robust SSE parsing."""
|
|
1513
|
+
from starlette.concurrency import iterate_in_threadpool
|
|
1514
|
+
|
|
1515
|
+
sse_decoder = SSEDecoder()
|
|
1516
|
+
chunk_count = 0
|
|
1517
|
+
|
|
1518
|
+
def buffered_iterator():
|
|
1519
|
+
nonlocal chunk_count
|
|
1520
|
+
|
|
1521
|
+
for chunk in response_body:
|
|
1522
|
+
chunk_count += 1
|
|
1523
|
+
yield chunk
|
|
1524
|
+
|
|
1525
|
+
# Parse SSE events from chunk
|
|
1526
|
+
events = sse_decoder.decode_chunk(chunk)
|
|
1527
|
+
|
|
1528
|
+
for event in events:
|
|
1529
|
+
if event['type'] == 'data':
|
|
1530
|
+
content = sse_decoder.extract_content(event['data'])
|
|
1531
|
+
sse_decoder.add_content(content)
|
|
1532
|
+
elif event['type'] == 'done':
|
|
1533
|
+
# Log complete content when done
|
|
1534
|
+
full_content = sse_decoder.get_complete_content()
|
|
1535
|
+
if full_content:
|
|
1536
|
+
# Truncate if too long
|
|
1537
|
+
if len(full_content) > 2048:
|
|
1538
|
+
full_content = full_content[:2048] + ""
|
|
1539
|
+
"...[truncated]"
|
|
1540
|
+
logger.info(
|
|
1541
|
+
"response_body={streaming_complete: " \
|
|
1542
|
+
"content='%s', chunks=%d}",
|
|
1543
|
+
full_content, chunk_count)
|
|
1544
|
+
else:
|
|
1545
|
+
logger.info(
|
|
1546
|
+
"response_body={streaming_complete: " \
|
|
1547
|
+
"no_content, chunks=%d}",
|
|
1548
|
+
chunk_count)
|
|
1549
|
+
return
|
|
1550
|
+
|
|
1551
|
+
response.body_iterator = iterate_in_threadpool(buffered_iterator())
|
|
1552
|
+
logger.info("response_body={streaming_started: chunks=%d}",
|
|
1553
|
+
len(response_body))
|
|
1554
|
+
|
|
1555
|
+
|
|
1556
|
+
def _log_non_streaming_response(response_body: list) -> None:
|
|
1557
|
+
"""Log non-streaming response."""
|
|
1558
|
+
try:
|
|
1559
|
+
decoded_body = response_body[0].decode()
|
|
1560
|
+
logger.info("response_body={%s}", decoded_body)
|
|
1561
|
+
except UnicodeDecodeError:
|
|
1562
|
+
logger.info("response_body={<binary_data>}")
|
|
1563
|
+
|
|
1564
|
+
|
|
1565
|
+
def build_app(args: Namespace) -> FastAPI:
|
|
1566
|
+
if args.disable_fastapi_docs:
|
|
1567
|
+
app = FastAPI(openapi_url=None,
|
|
1568
|
+
docs_url=None,
|
|
1569
|
+
redoc_url=None,
|
|
1570
|
+
lifespan=lifespan)
|
|
1571
|
+
else:
|
|
1572
|
+
app = FastAPI(lifespan=lifespan)
|
|
1573
|
+
app.include_router(router)
|
|
1574
|
+
app.root_path = args.root_path
|
|
1575
|
+
|
|
1576
|
+
mount_metrics(app)
|
|
1577
|
+
|
|
1578
|
+
app.add_middleware(
|
|
1579
|
+
CORSMiddleware,
|
|
1580
|
+
allow_origins=args.allowed_origins,
|
|
1581
|
+
allow_credentials=args.allow_credentials,
|
|
1582
|
+
allow_methods=args.allowed_methods,
|
|
1583
|
+
allow_headers=args.allowed_headers,
|
|
1584
|
+
)
|
|
1585
|
+
|
|
1586
|
+
@app.exception_handler(HTTPException)
|
|
1587
|
+
async def http_exception_handler(_: Request, exc: HTTPException):
|
|
1588
|
+
err = ErrorResponse(
|
|
1589
|
+
error=ErrorInfo(message=exc.detail,
|
|
1590
|
+
type=HTTPStatus(exc.status_code).phrase,
|
|
1591
|
+
code=exc.status_code))
|
|
1592
|
+
return JSONResponse(err.model_dump(), status_code=exc.status_code)
|
|
1593
|
+
|
|
1594
|
+
@app.exception_handler(RequestValidationError)
|
|
1595
|
+
async def validation_exception_handler(_: Request,
|
|
1596
|
+
exc: RequestValidationError):
|
|
1597
|
+
exc_str = str(exc)
|
|
1598
|
+
errors_str = str(exc.errors())
|
|
1599
|
+
|
|
1600
|
+
if exc.errors() and errors_str and errors_str != exc_str:
|
|
1601
|
+
message = f"{exc_str} {errors_str}"
|
|
1602
|
+
else:
|
|
1603
|
+
message = exc_str
|
|
1604
|
+
|
|
1605
|
+
err = ErrorResponse(error=ErrorInfo(message=message,
|
|
1606
|
+
type=HTTPStatus.BAD_REQUEST.phrase,
|
|
1607
|
+
code=HTTPStatus.BAD_REQUEST))
|
|
1608
|
+
return JSONResponse(err.model_dump(),
|
|
1609
|
+
status_code=HTTPStatus.BAD_REQUEST)
|
|
1610
|
+
|
|
1611
|
+
# Ensure --api-key option from CLI takes precedence over VLLM_API_KEY
|
|
1612
|
+
if tokens := [key for key in (args.api_key or [envs.VLLM_API_KEY]) if key]:
|
|
1613
|
+
app.add_middleware(AuthenticationMiddleware, tokens=tokens)
|
|
1614
|
+
|
|
1615
|
+
if args.enable_request_id_headers:
|
|
1616
|
+
app.add_middleware(XRequestIdMiddleware)
|
|
1617
|
+
|
|
1618
|
+
# Add scaling middleware to check for scaling state
|
|
1619
|
+
app.add_middleware(ScalingMiddleware)
|
|
1620
|
+
|
|
1621
|
+
if envs.VLLM_DEBUG_LOG_API_SERVER_RESPONSE:
|
|
1622
|
+
logger.warning("CAUTION: Enabling log response in the API Server. "
|
|
1623
|
+
"This can include sensitive information and should be "
|
|
1624
|
+
"avoided in production.")
|
|
1625
|
+
|
|
1626
|
+
@app.middleware("http")
|
|
1627
|
+
async def log_response(request: Request, call_next):
|
|
1628
|
+
response = await call_next(request)
|
|
1629
|
+
response_body = [
|
|
1630
|
+
section async for section in response.body_iterator
|
|
1631
|
+
]
|
|
1632
|
+
response.body_iterator = iterate_in_threadpool(iter(response_body))
|
|
1633
|
+
# Check if this is a streaming response by looking at content-type
|
|
1634
|
+
content_type = response.headers.get("content-type", "")
|
|
1635
|
+
is_streaming = content_type == "text/event-stream; charset=utf-8"
|
|
1636
|
+
|
|
1637
|
+
# Log response body based on type
|
|
1638
|
+
if not response_body:
|
|
1639
|
+
logger.info("response_body={<empty>}")
|
|
1640
|
+
elif is_streaming:
|
|
1641
|
+
_log_streaming_response(response, response_body)
|
|
1642
|
+
else:
|
|
1643
|
+
_log_non_streaming_response(response_body)
|
|
1644
|
+
return response
|
|
1645
|
+
|
|
1646
|
+
for middleware in args.middleware:
|
|
1647
|
+
module_path, object_name = middleware.rsplit(".", 1)
|
|
1648
|
+
imported = getattr(importlib.import_module(module_path), object_name)
|
|
1649
|
+
if inspect.isclass(imported):
|
|
1650
|
+
app.add_middleware(imported) # type: ignore[arg-type]
|
|
1651
|
+
elif inspect.iscoroutinefunction(imported):
|
|
1652
|
+
app.middleware("http")(imported)
|
|
1653
|
+
else:
|
|
1654
|
+
raise ValueError(f"Invalid middleware {middleware}. "
|
|
1655
|
+
f"Must be a function or a class.")
|
|
1656
|
+
|
|
1657
|
+
return app
|
|
1658
|
+
|
|
1659
|
+
|
|
1660
|
+
async def init_app_state(
|
|
1661
|
+
engine_client: EngineClient,
|
|
1662
|
+
vllm_config: VllmConfig,
|
|
1663
|
+
state: State,
|
|
1664
|
+
args: Namespace,
|
|
1665
|
+
) -> None:
|
|
1666
|
+
if args.served_model_name is not None:
|
|
1667
|
+
served_model_names = args.served_model_name
|
|
1668
|
+
else:
|
|
1669
|
+
served_model_names = [args.model]
|
|
1670
|
+
|
|
1671
|
+
if args.enable_log_requests:
|
|
1672
|
+
request_logger = RequestLogger(max_log_len=args.max_log_len)
|
|
1673
|
+
else:
|
|
1674
|
+
request_logger = None
|
|
1675
|
+
|
|
1676
|
+
base_model_paths = [
|
|
1677
|
+
BaseModelPath(name=name, model_path=args.model)
|
|
1678
|
+
for name in served_model_names
|
|
1679
|
+
]
|
|
1680
|
+
|
|
1681
|
+
state.engine_client = engine_client
|
|
1682
|
+
state.log_stats = not args.disable_log_stats
|
|
1683
|
+
state.vllm_config = vllm_config
|
|
1684
|
+
model_config = vllm_config.model_config
|
|
1685
|
+
|
|
1686
|
+
if envs.VLLM_USE_V1:
|
|
1687
|
+
supported_tasks = await engine_client \
|
|
1688
|
+
.get_supported_tasks() # type: ignore
|
|
1689
|
+
else:
|
|
1690
|
+
supported_tasks = model_config.supported_tasks
|
|
1691
|
+
|
|
1692
|
+
logger.info("Supported_tasks: %s", supported_tasks)
|
|
1693
|
+
|
|
1694
|
+
resolved_chat_template = load_chat_template(args.chat_template)
|
|
1695
|
+
if resolved_chat_template is not None:
|
|
1696
|
+
# Get the tokenizer to check official template
|
|
1697
|
+
tokenizer = await engine_client.get_tokenizer()
|
|
1698
|
+
|
|
1699
|
+
if isinstance(tokenizer, MistralTokenizer):
|
|
1700
|
+
# The warning is logged in resolve_mistral_chat_template.
|
|
1701
|
+
resolved_chat_template = resolve_mistral_chat_template(
|
|
1702
|
+
chat_template=resolved_chat_template)
|
|
1703
|
+
else:
|
|
1704
|
+
hf_chat_template = resolve_hf_chat_template(
|
|
1705
|
+
tokenizer=tokenizer,
|
|
1706
|
+
chat_template=None,
|
|
1707
|
+
tools=None,
|
|
1708
|
+
model_config=vllm_config.model_config,
|
|
1709
|
+
)
|
|
1710
|
+
|
|
1711
|
+
if hf_chat_template != resolved_chat_template:
|
|
1712
|
+
logger.warning(
|
|
1713
|
+
"Using supplied chat template: %s\n"
|
|
1714
|
+
"It is different from official chat template '%s'. "
|
|
1715
|
+
"This discrepancy may lead to performance degradation.",
|
|
1716
|
+
resolved_chat_template, args.model)
|
|
1717
|
+
|
|
1718
|
+
if args.tool_server == "demo":
|
|
1719
|
+
tool_server: Optional[ToolServer] = DemoToolServer()
|
|
1720
|
+
assert isinstance(tool_server, DemoToolServer)
|
|
1721
|
+
await tool_server.init_and_validate()
|
|
1722
|
+
elif args.tool_server:
|
|
1723
|
+
tool_server = MCPToolServer()
|
|
1724
|
+
await tool_server.add_tool_server(args.tool_server)
|
|
1725
|
+
else:
|
|
1726
|
+
tool_server = None
|
|
1727
|
+
|
|
1728
|
+
# Merge default_mm_loras into the static lora_modules
|
|
1729
|
+
default_mm_loras = (vllm_config.lora_config.default_mm_loras
|
|
1730
|
+
if vllm_config.lora_config is not None else {})
|
|
1731
|
+
|
|
1732
|
+
lora_modules = args.lora_modules
|
|
1733
|
+
if default_mm_loras:
|
|
1734
|
+
default_mm_lora_paths = [
|
|
1735
|
+
LoRAModulePath(
|
|
1736
|
+
name=modality,
|
|
1737
|
+
path=lora_path,
|
|
1738
|
+
) for modality, lora_path in default_mm_loras.items()
|
|
1739
|
+
]
|
|
1740
|
+
if args.lora_modules is None:
|
|
1741
|
+
lora_modules = default_mm_lora_paths
|
|
1742
|
+
else:
|
|
1743
|
+
lora_modules += default_mm_lora_paths
|
|
1744
|
+
|
|
1745
|
+
state.openai_serving_models = OpenAIServingModels(
|
|
1746
|
+
engine_client=engine_client,
|
|
1747
|
+
model_config=model_config,
|
|
1748
|
+
base_model_paths=base_model_paths,
|
|
1749
|
+
lora_modules=lora_modules,
|
|
1750
|
+
)
|
|
1751
|
+
await state.openai_serving_models.init_static_loras()
|
|
1752
|
+
state.openai_serving_responses = OpenAIServingResponses(
|
|
1753
|
+
engine_client,
|
|
1754
|
+
model_config,
|
|
1755
|
+
state.openai_serving_models,
|
|
1756
|
+
request_logger=request_logger,
|
|
1757
|
+
chat_template=resolved_chat_template,
|
|
1758
|
+
chat_template_content_format=args.chat_template_content_format,
|
|
1759
|
+
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
|
|
1760
|
+
enable_auto_tools=args.enable_auto_tool_choice,
|
|
1761
|
+
tool_parser=args.tool_call_parser,
|
|
1762
|
+
tool_server=tool_server,
|
|
1763
|
+
reasoning_parser=args.reasoning_parser,
|
|
1764
|
+
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
|
|
1765
|
+
enable_force_include_usage=args.enable_force_include_usage,
|
|
1766
|
+
enable_log_outputs=args.enable_log_outputs,
|
|
1767
|
+
log_error_stack=args.log_error_stack,
|
|
1768
|
+
) if "generate" in supported_tasks else None
|
|
1769
|
+
state.openai_serving_chat = OpenAIServingChat(
|
|
1770
|
+
engine_client,
|
|
1771
|
+
model_config,
|
|
1772
|
+
state.openai_serving_models,
|
|
1773
|
+
args.response_role,
|
|
1774
|
+
request_logger=request_logger,
|
|
1775
|
+
chat_template=resolved_chat_template,
|
|
1776
|
+
chat_template_content_format=args.chat_template_content_format,
|
|
1777
|
+
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
|
|
1778
|
+
enable_auto_tools=args.enable_auto_tool_choice,
|
|
1779
|
+
exclude_tools_when_tool_choice_none=args.
|
|
1780
|
+
exclude_tools_when_tool_choice_none,
|
|
1781
|
+
tool_parser=args.tool_call_parser,
|
|
1782
|
+
reasoning_parser=args.reasoning_parser,
|
|
1783
|
+
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
|
|
1784
|
+
enable_force_include_usage=args.enable_force_include_usage,
|
|
1785
|
+
enable_log_outputs=args.enable_log_outputs,
|
|
1786
|
+
log_error_stack=args.log_error_stack,
|
|
1787
|
+
) if "generate" in supported_tasks else None
|
|
1788
|
+
state.openai_serving_completion = OpenAIServingCompletion(
|
|
1789
|
+
engine_client,
|
|
1790
|
+
model_config,
|
|
1791
|
+
state.openai_serving_models,
|
|
1792
|
+
request_logger=request_logger,
|
|
1793
|
+
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
|
|
1794
|
+
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
|
|
1795
|
+
enable_force_include_usage=args.enable_force_include_usage,
|
|
1796
|
+
log_error_stack=args.log_error_stack,
|
|
1797
|
+
) if "generate" in supported_tasks else None
|
|
1798
|
+
state.openai_serving_pooling = OpenAIServingPooling(
|
|
1799
|
+
engine_client,
|
|
1800
|
+
vllm_config,
|
|
1801
|
+
state.openai_serving_models,
|
|
1802
|
+
request_logger=request_logger,
|
|
1803
|
+
chat_template=resolved_chat_template,
|
|
1804
|
+
chat_template_content_format=args.chat_template_content_format,
|
|
1805
|
+
log_error_stack=args.log_error_stack,
|
|
1806
|
+
) if "encode" in supported_tasks else None
|
|
1807
|
+
state.openai_serving_embedding = OpenAIServingEmbedding(
|
|
1808
|
+
engine_client,
|
|
1809
|
+
model_config,
|
|
1810
|
+
state.openai_serving_models,
|
|
1811
|
+
request_logger=request_logger,
|
|
1812
|
+
chat_template=resolved_chat_template,
|
|
1813
|
+
chat_template_content_format=args.chat_template_content_format,
|
|
1814
|
+
log_error_stack=args.log_error_stack,
|
|
1815
|
+
) if "embed" in supported_tasks else None
|
|
1816
|
+
state.openai_serving_classification = ServingClassification(
|
|
1817
|
+
engine_client,
|
|
1818
|
+
model_config,
|
|
1819
|
+
state.openai_serving_models,
|
|
1820
|
+
request_logger=request_logger,
|
|
1821
|
+
log_error_stack=args.log_error_stack,
|
|
1822
|
+
) if "classify" in supported_tasks else None
|
|
1823
|
+
state.openai_serving_scores = ServingScores(
|
|
1824
|
+
engine_client,
|
|
1825
|
+
model_config,
|
|
1826
|
+
state.openai_serving_models,
|
|
1827
|
+
request_logger=request_logger,
|
|
1828
|
+
log_error_stack=args.log_error_stack,
|
|
1829
|
+
) if ("embed" in supported_tasks or "score" in supported_tasks) else None
|
|
1830
|
+
state.openai_serving_tokenization = OpenAIServingTokenization(
|
|
1831
|
+
engine_client,
|
|
1832
|
+
model_config,
|
|
1833
|
+
state.openai_serving_models,
|
|
1834
|
+
request_logger=request_logger,
|
|
1835
|
+
chat_template=resolved_chat_template,
|
|
1836
|
+
chat_template_content_format=args.chat_template_content_format,
|
|
1837
|
+
log_error_stack=args.log_error_stack,
|
|
1838
|
+
)
|
|
1839
|
+
state.openai_serving_transcription = OpenAIServingTranscription(
|
|
1840
|
+
engine_client,
|
|
1841
|
+
model_config,
|
|
1842
|
+
state.openai_serving_models,
|
|
1843
|
+
request_logger=request_logger,
|
|
1844
|
+
log_error_stack=args.log_error_stack,
|
|
1845
|
+
) if "transcription" in supported_tasks else None
|
|
1846
|
+
state.openai_serving_translation = OpenAIServingTranslation(
|
|
1847
|
+
engine_client,
|
|
1848
|
+
model_config,
|
|
1849
|
+
state.openai_serving_models,
|
|
1850
|
+
request_logger=request_logger,
|
|
1851
|
+
log_error_stack=args.log_error_stack,
|
|
1852
|
+
) if "transcription" in supported_tasks else None
|
|
1853
|
+
|
|
1854
|
+
state.enable_server_load_tracking = args.enable_server_load_tracking
|
|
1855
|
+
state.server_load_metrics = 0
|
|
1856
|
+
|
|
1857
|
+
|
|
1858
|
+
def create_server_socket(addr: tuple[str, int]) -> socket.socket:
|
|
1859
|
+
family = socket.AF_INET
|
|
1860
|
+
if is_valid_ipv6_address(addr[0]):
|
|
1861
|
+
family = socket.AF_INET6
|
|
1862
|
+
|
|
1863
|
+
sock = socket.socket(family=family, type=socket.SOCK_STREAM)
|
|
1864
|
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
1865
|
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
|
1866
|
+
sock.bind(addr)
|
|
1867
|
+
|
|
1868
|
+
return sock
|
|
1869
|
+
|
|
1870
|
+
|
|
1871
|
+
def create_server_unix_socket(path: str) -> socket.socket:
|
|
1872
|
+
sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
|
|
1873
|
+
sock.bind(path)
|
|
1874
|
+
return sock
|
|
1875
|
+
|
|
1876
|
+
|
|
1877
|
+
def validate_api_server_args(args):
|
|
1878
|
+
valid_tool_parses = ToolParserManager.tool_parsers.keys()
|
|
1879
|
+
if args.enable_auto_tool_choice \
|
|
1880
|
+
and args.tool_call_parser not in valid_tool_parses:
|
|
1881
|
+
raise KeyError(f"invalid tool call parser: {args.tool_call_parser} "
|
|
1882
|
+
f"(chose from {{ {','.join(valid_tool_parses)} }})")
|
|
1883
|
+
|
|
1884
|
+
valid_reasoning_parses = ReasoningParserManager.reasoning_parsers.keys()
|
|
1885
|
+
if args.reasoning_parser \
|
|
1886
|
+
and args.reasoning_parser not in valid_reasoning_parses:
|
|
1887
|
+
raise KeyError(
|
|
1888
|
+
f"invalid reasoning parser: {args.reasoning_parser} "
|
|
1889
|
+
f"(chose from {{ {','.join(valid_reasoning_parses)} }})")
|
|
1890
|
+
|
|
1891
|
+
|
|
1892
|
+
def setup_server(args):
|
|
1893
|
+
"""Validate API server args, set up signal handler, create socket
|
|
1894
|
+
ready to serve."""
|
|
1895
|
+
|
|
1896
|
+
logger.info("vLLM API server version %s", VLLM_VERSION)
|
|
1897
|
+
log_non_default_args(args)
|
|
1898
|
+
|
|
1899
|
+
if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
|
|
1900
|
+
ToolParserManager.import_tool_parser(args.tool_parser_plugin)
|
|
1901
|
+
|
|
1902
|
+
validate_api_server_args(args)
|
|
1903
|
+
|
|
1904
|
+
# workaround to make sure that we bind the port before the engine is set up.
|
|
1905
|
+
# This avoids race conditions with ray.
|
|
1906
|
+
# see https://github.com/vllm-project/vllm/issues/8204
|
|
1907
|
+
if args.uds:
|
|
1908
|
+
sock = create_server_unix_socket(args.uds)
|
|
1909
|
+
else:
|
|
1910
|
+
sock_addr = (args.host or "", args.port)
|
|
1911
|
+
sock = create_server_socket(sock_addr)
|
|
1912
|
+
|
|
1913
|
+
# workaround to avoid footguns where uvicorn drops requests with too
|
|
1914
|
+
# many concurrent requests active
|
|
1915
|
+
set_ulimit()
|
|
1916
|
+
|
|
1917
|
+
def signal_handler(*_) -> None:
|
|
1918
|
+
# Interrupt server on sigterm while initializing
|
|
1919
|
+
raise KeyboardInterrupt("terminated")
|
|
1920
|
+
|
|
1921
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
1922
|
+
|
|
1923
|
+
if args.uds:
|
|
1924
|
+
listen_address = f"unix:{args.uds}"
|
|
1925
|
+
else:
|
|
1926
|
+
addr, port = sock_addr
|
|
1927
|
+
is_ssl = args.ssl_keyfile and args.ssl_certfile
|
|
1928
|
+
host_part = f"[{addr}]" if is_valid_ipv6_address(
|
|
1929
|
+
addr) else addr or "0.0.0.0"
|
|
1930
|
+
listen_address = f"http{'s' if is_ssl else ''}://{host_part}:{port}"
|
|
1931
|
+
return listen_address, sock
|
|
1932
|
+
|
|
1933
|
+
|
|
1934
|
+
async def run_server(args, **uvicorn_kwargs) -> None:
|
|
1935
|
+
"""Run a single-worker API server."""
|
|
1936
|
+
|
|
1937
|
+
# Add process-specific prefix to stdout and stderr.
|
|
1938
|
+
decorate_logs("APIServer")
|
|
1939
|
+
|
|
1940
|
+
listen_address, sock = setup_server(args)
|
|
1941
|
+
await run_server_worker(listen_address, sock, args, **uvicorn_kwargs)
|
|
1942
|
+
|
|
1943
|
+
|
|
1944
|
+
async def run_server_worker(listen_address,
|
|
1945
|
+
sock,
|
|
1946
|
+
args,
|
|
1947
|
+
client_config=None,
|
|
1948
|
+
**uvicorn_kwargs) -> None:
|
|
1949
|
+
"""Run a single API server worker."""
|
|
1950
|
+
|
|
1951
|
+
if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
|
|
1952
|
+
ToolParserManager.import_tool_parser(args.tool_parser_plugin)
|
|
1953
|
+
|
|
1954
|
+
server_index = client_config.get("client_index", 0) if client_config else 0
|
|
1955
|
+
|
|
1956
|
+
# Load logging config for uvicorn if specified
|
|
1957
|
+
log_config = load_log_config(args.log_config_file)
|
|
1958
|
+
if log_config is not None:
|
|
1959
|
+
uvicorn_kwargs['log_config'] = log_config
|
|
1960
|
+
|
|
1961
|
+
async with build_async_engine_client(
|
|
1962
|
+
args,
|
|
1963
|
+
client_config=client_config,
|
|
1964
|
+
) as engine_client:
|
|
1965
|
+
maybe_register_tokenizer_info_endpoint(args)
|
|
1966
|
+
app = build_app(args)
|
|
1967
|
+
|
|
1968
|
+
vllm_config = await engine_client.get_vllm_config()
|
|
1969
|
+
await init_app_state(engine_client, vllm_config, app.state, args)
|
|
1970
|
+
|
|
1971
|
+
logger.info("Starting vLLM API server %d on %s", server_index,
|
|
1972
|
+
listen_address)
|
|
1973
|
+
shutdown_task = await serve_http(
|
|
1974
|
+
app,
|
|
1975
|
+
sock=sock,
|
|
1976
|
+
enable_ssl_refresh=args.enable_ssl_refresh,
|
|
1977
|
+
host=args.host,
|
|
1978
|
+
port=args.port,
|
|
1979
|
+
log_level=args.uvicorn_log_level,
|
|
1980
|
+
# NOTE: When the 'disable_uvicorn_access_log' value is True,
|
|
1981
|
+
# no access log will be output.
|
|
1982
|
+
access_log=not args.disable_uvicorn_access_log,
|
|
1983
|
+
timeout_keep_alive=envs.VLLM_HTTP_TIMEOUT_KEEP_ALIVE,
|
|
1984
|
+
ssl_keyfile=args.ssl_keyfile,
|
|
1985
|
+
ssl_certfile=args.ssl_certfile,
|
|
1986
|
+
ssl_ca_certs=args.ssl_ca_certs,
|
|
1987
|
+
ssl_cert_reqs=args.ssl_cert_reqs,
|
|
1988
|
+
h11_max_incomplete_event_size=args.h11_max_incomplete_event_size,
|
|
1989
|
+
h11_max_header_count=args.h11_max_header_count,
|
|
1990
|
+
**uvicorn_kwargs,
|
|
1991
|
+
)
|
|
1992
|
+
|
|
1993
|
+
# NB: Await server shutdown only after the backend context is exited
|
|
1994
|
+
try:
|
|
1995
|
+
await shutdown_task
|
|
1996
|
+
finally:
|
|
1997
|
+
sock.close()
|
|
1998
|
+
|
|
1999
|
+
|
|
2000
|
+
if __name__ == "__main__":
|
|
2001
|
+
# NOTE(simon):
|
|
2002
|
+
# This section should be in sync with vllm/entrypoints/cli/main.py for CLI
|
|
2003
|
+
# entrypoints.
|
|
2004
|
+
cli_env_setup()
|
|
2005
|
+
parser = FlexibleArgumentParser(
|
|
2006
|
+
description="vLLM OpenAI-Compatible RESTful API server.")
|
|
2007
|
+
parser = make_arg_parser(parser)
|
|
2008
|
+
args = parser.parse_args()
|
|
2009
|
+
validate_parsed_serve_args(args)
|
|
2010
|
+
|
|
2011
|
+
uvloop.run(run_server(args))
|