vllm-cpu 0.9.2.post2__cp311-cp311-manylinux_2_17_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +214 -0
- vllm/_custom_ops.py +1915 -0
- vllm/_ipex_ops.py +350 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +34 -0
- vllm/assets/video.py +139 -0
- vllm/attention/__init__.py +20 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +325 -0
- vllm/attention/backends/blocksparse_attn.py +465 -0
- vllm/attention/backends/cpu_mla.py +307 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1506 -0
- vllm/attention/backends/flash_attn.py +1008 -0
- vllm/attention/backends/flashinfer.py +1107 -0
- vllm/attention/backends/flashmla.py +244 -0
- vllm/attention/backends/hpu_attn.py +318 -0
- vllm/attention/backends/ipex_attn.py +403 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1391 -0
- vllm/attention/backends/pallas.py +356 -0
- vllm/attention/backends/placeholder_attn.py +400 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +1015 -0
- vllm/attention/backends/torch_sdpa.py +707 -0
- vllm/attention/backends/triton_mla.py +115 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +807 -0
- vllm/attention/layer.py +481 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
- vllm/attention/ops/blocksparse_attention/interface.py +239 -0
- vllm/attention/ops/blocksparse_attention/utils.py +246 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
- vllm/attention/ops/flashmla.py +116 -0
- vllm/attention/ops/hpu_paged_attn.py +88 -0
- vllm/attention/ops/ipex_attn.py +195 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/nki_flash_attn.py +903 -0
- vllm/attention/ops/paged_attn.py +256 -0
- vllm/attention/ops/pallas_kv_cache_update.py +120 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +100 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +674 -0
- vllm/attention/ops/triton_flash_attention.py +984 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +738 -0
- vllm/attention/selector.py +214 -0
- vllm/attention/utils/fa_utils.py +72 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +1441 -0
- vllm/benchmarks/endpoint_request_func.py +393 -0
- vllm/benchmarks/latency.py +168 -0
- vllm/benchmarks/serve.py +1063 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +70 -0
- vllm/collect_env.py +820 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +89 -0
- vllm/compilation/backends.py +610 -0
- vllm/compilation/base_piecewise_backend.py +72 -0
- vllm/compilation/collective_fusion.py +127 -0
- vllm/compilation/compiler_interface.py +564 -0
- vllm/compilation/counter.py +41 -0
- vllm/compilation/cuda_piecewise_backend.py +218 -0
- vllm/compilation/decorators.py +250 -0
- vllm/compilation/fix_functionalization.py +191 -0
- vllm/compilation/fusion.py +645 -0
- vllm/compilation/fusion_attn.py +166 -0
- vllm/compilation/fx_utils.py +84 -0
- vllm/compilation/inductor_pass.py +115 -0
- vllm/compilation/monitor.py +39 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +165 -0
- vllm/compilation/pass_manager.py +82 -0
- vllm/compilation/sequence_parallelism.py +482 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +70 -0
- vllm/compilation/wrapper.py +135 -0
- vllm/config.py +4913 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +441 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +525 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +139 -0
- vllm/core/placeholder_block_space_manager.py +103 -0
- vllm/core/scheduler.py +2126 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +281 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +264 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +145 -0
- vllm/distributed/device_communicators/cuda_communicator.py +194 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
- vllm/distributed/device_communicators/hpu_communicator.py +46 -0
- vllm/distributed/device_communicators/neuron_communicator.py +20 -0
- vllm/distributed/device_communicators/pynccl.py +218 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +349 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/tpu_communicator.py +103 -0
- vllm/distributed/device_communicators/xpu_communicator.py +55 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/eplb_state.py +432 -0
- vllm/distributed/eplb/rebalance_algo.py +234 -0
- vllm/distributed/eplb/rebalance_execute.py +307 -0
- vllm/distributed/kv_events.py +356 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +12 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +133 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +109 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1103 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +485 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +533 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +265 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +389 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
- vllm/distributed/parallel_state.py +1385 -0
- vllm/distributed/tpu_distributed_utils.py +178 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1801 -0
- vllm/engine/async_llm_engine.py +1200 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +2101 -0
- vllm/engine/metrics.py +629 -0
- vllm/engine/metrics_types.py +94 -0
- vllm/engine/multiprocessing/__init__.py +148 -0
- vllm/engine/multiprocessing/client.py +681 -0
- vllm/engine/multiprocessing/engine.py +460 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +75 -0
- vllm/engine/output_processor/multi_step.py +216 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +326 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1278 -0
- vllm/entrypoints/cli/__init__.py +12 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +58 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +36 -0
- vllm/entrypoints/cli/main.py +71 -0
- vllm/entrypoints/cli/openai.py +201 -0
- vllm/entrypoints/cli/run_batch.py +69 -0
- vllm/entrypoints/cli/serve.py +265 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/launcher.py +147 -0
- vllm/entrypoints/llm.py +1599 -0
- vllm/entrypoints/logger.py +50 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1495 -0
- vllm/entrypoints/openai/cli_args.py +331 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +2096 -0
- vllm/entrypoints/openai/run_batch.py +473 -0
- vllm/entrypoints/openai/serving_chat.py +1258 -0
- vllm/entrypoints/openai/serving_classification.py +160 -0
- vllm/entrypoints/openai/serving_completion.py +618 -0
- vllm/entrypoints/openai/serving_embedding.py +201 -0
- vllm/entrypoints/openai/serving_engine.py +988 -0
- vllm/entrypoints/openai/serving_models.py +315 -0
- vllm/entrypoints/openai/serving_pooling.py +234 -0
- vllm/entrypoints/openai/serving_score.py +431 -0
- vllm/entrypoints/openai/serving_tokenization.py +157 -0
- vllm/entrypoints/openai/serving_transcription.py +132 -0
- vllm/entrypoints/openai/speech_to_text.py +395 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +25 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +466 -0
- vllm/entrypoints/score_utils.py +50 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/utils.py +262 -0
- vllm/env_override.py +41 -0
- vllm/envs.py +1029 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +401 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +30 -0
- vllm/executor/multiproc_worker_utils.py +313 -0
- vllm/executor/ray_distributed_executor.py +701 -0
- vllm/executor/ray_utils.py +399 -0
- vllm/executor/uniproc_executor.py +139 -0
- vllm/forward_context.py +185 -0
- vllm/inputs/__init__.py +41 -0
- vllm/inputs/data.py +331 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +924 -0
- vllm/inputs/registry.py +245 -0
- vllm/jsontree.py +80 -0
- vllm/logger.py +212 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +81 -0
- vllm/logging_utils/formatter.py +18 -0
- vllm/logits_process.py +119 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +355 -0
- vllm/lora/layers.py +1285 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +818 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
- vllm/lora/ops/triton_ops/utils.py +120 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +136 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +485 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +290 -0
- vllm/lora/punica_wrapper/punica_hpu.py +145 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +405 -0
- vllm/lora/punica_wrapper/utils.py +164 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +240 -0
- vllm/lora/worker_manager.py +256 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +208 -0
- vllm/model_executor/guided_decoding/__init__.py +181 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
- vllm/model_executor/guided_decoding/guided_fields.py +41 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
- vllm/model_executor/guided_decoding/utils.py +242 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +420 -0
- vllm/model_executor/layers/fused_moe/__init__.py +78 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +298 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +140 -0
- vllm/model_executor/layers/fused_moe/config.py +456 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +215 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +645 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +250 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +231 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +183 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1021 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +234 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1734 -0
- vllm/model_executor/layers/fused_moe/layer.py +1528 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +598 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +224 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +233 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +66 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +429 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +136 -0
- vllm/model_executor/layers/fused_moe/utils.py +144 -0
- vllm/model_executor/layers/layernorm.py +287 -0
- vllm/model_executor/layers/lightning_attn.py +652 -0
- vllm/model_executor/layers/linear.py +1547 -0
- vllm/model_executor/layers/logits_processor.py +197 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +731 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
- vllm/model_executor/layers/pooler.py +473 -0
- vllm/model_executor/layers/quantization/__init__.py +160 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/auto_round.py +310 -0
- vllm/model_executor/layers/quantization/awq.py +228 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +523 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +164 -0
- vllm/model_executor/layers/quantization/bitblas.py +462 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +694 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1613 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +149 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepgemm.py +83 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
- vllm/model_executor/layers/quantization/experts_int8.py +204 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +950 -0
- vllm/model_executor/layers/quantization/gguf.py +577 -0
- vllm/model_executor/layers/quantization/gptq.py +278 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +446 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +679 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +132 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/marlin.py +263 -0
- vllm/model_executor/layers/quantization/modelopt.py +747 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +457 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
- vllm/model_executor/layers/quantization/qqq.py +275 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +437 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +245 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +157 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/rtn.py +289 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +212 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +653 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +146 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
- vllm/model_executor/layers/rejection_sampler.py +406 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding.py +2025 -0
- vllm/model_executor/layers/sampler.py +1204 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
- vllm/model_executor/layers/utils.py +116 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +77 -0
- vllm/model_executor/model_loader/base_loader.py +43 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +613 -0
- vllm/model_executor/model_loader/default_loader.py +282 -0
- vllm/model_executor/model_loader/dummy_loader.py +27 -0
- vllm/model_executor/model_loader/gguf_loader.py +120 -0
- vllm/model_executor/model_loader/neuron.py +476 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
- vllm/model_executor/model_loader/tensorizer.py +602 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +127 -0
- vllm/model_executor/model_loader/tpu.py +113 -0
- vllm/model_executor/model_loader/utils.py +315 -0
- vllm/model_executor/model_loader/weight_utils.py +782 -0
- vllm/model_executor/models/__init__.py +30 -0
- vllm/model_executor/models/adapters.py +375 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +670 -0
- vllm/model_executor/models/aya_vision.py +486 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bamba.py +558 -0
- vllm/model_executor/models/bart.py +938 -0
- vllm/model_executor/models/bert.py +513 -0
- vllm/model_executor/models/bert_with_rope.py +617 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +728 -0
- vllm/model_executor/models/bloom.py +373 -0
- vllm/model_executor/models/chameleon.py +1146 -0
- vllm/model_executor/models/chatglm.py +478 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/commandr.py +471 -0
- vllm/model_executor/models/config.py +200 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +472 -0
- vllm/model_executor/models/deepseek.py +486 -0
- vllm/model_executor/models/deepseek_mtp.py +281 -0
- vllm/model_executor/models/deepseek_v2.py +935 -0
- vllm/model_executor/models/deepseek_vl2.py +660 -0
- vllm/model_executor/models/dots1.py +536 -0
- vllm/model_executor/models/eagle.py +261 -0
- vllm/model_executor/models/ernie45.py +43 -0
- vllm/model_executor/models/ernie45_moe.py +583 -0
- vllm/model_executor/models/exaone.py +551 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +510 -0
- vllm/model_executor/models/falcon_h1.py +708 -0
- vllm/model_executor/models/florence2.py +1113 -0
- vllm/model_executor/models/fuyu.py +406 -0
- vllm/model_executor/models/gemma.py +427 -0
- vllm/model_executor/models/gemma2.py +427 -0
- vllm/model_executor/models/gemma3.py +535 -0
- vllm/model_executor/models/gemma3_mm.py +729 -0
- vllm/model_executor/models/gemma3n.py +811 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4_1v.py +1590 -0
- vllm/model_executor/models/glm4v.py +657 -0
- vllm/model_executor/models/gpt2.py +382 -0
- vllm/model_executor/models/gpt_bigcode.py +335 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +332 -0
- vllm/model_executor/models/granite.py +493 -0
- vllm/model_executor/models/granite_speech.py +790 -0
- vllm/model_executor/models/granitemoe.py +437 -0
- vllm/model_executor/models/granitemoehybrid.py +653 -0
- vllm/model_executor/models/granitemoeshared.py +341 -0
- vllm/model_executor/models/gritlm.py +224 -0
- vllm/model_executor/models/grok1.py +546 -0
- vllm/model_executor/models/h2ovl.py +549 -0
- vllm/model_executor/models/hunyuan_v1_moe.py +897 -0
- vllm/model_executor/models/idefics2_vision_model.py +389 -0
- vllm/model_executor/models/idefics3.py +786 -0
- vllm/model_executor/models/interfaces.py +681 -0
- vllm/model_executor/models/interfaces_base.py +164 -0
- vllm/model_executor/models/intern_vit.py +480 -0
- vllm/model_executor/models/internlm2.py +455 -0
- vllm/model_executor/models/internlm2_ve.py +147 -0
- vllm/model_executor/models/internvl.py +1432 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +592 -0
- vllm/model_executor/models/keye.py +1736 -0
- vllm/model_executor/models/kimi_vl.py +585 -0
- vllm/model_executor/models/llama.py +644 -0
- vllm/model_executor/models/llama4.py +531 -0
- vllm/model_executor/models/llama_eagle.py +165 -0
- vllm/model_executor/models/llama_eagle3.py +263 -0
- vllm/model_executor/models/llava.py +887 -0
- vllm/model_executor/models/llava_next.py +604 -0
- vllm/model_executor/models/llava_next_video.py +492 -0
- vllm/model_executor/models/llava_onevision.py +985 -0
- vllm/model_executor/models/mamba.py +273 -0
- vllm/model_executor/models/mamba2.py +320 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/mimo.py +192 -0
- vllm/model_executor/models/mimo_mtp.py +285 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +772 -0
- vllm/model_executor/models/minicpmv.py +1307 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1301 -0
- vllm/model_executor/models/minimax_vl_01.py +374 -0
- vllm/model_executor/models/mistral3.py +624 -0
- vllm/model_executor/models/mixtral.py +488 -0
- vllm/model_executor/models/mixtral_quant.py +453 -0
- vllm/model_executor/models/mllama.py +1682 -0
- vllm/model_executor/models/mllama4.py +947 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +339 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1576 -0
- vllm/model_executor/models/moonvit.py +630 -0
- vllm/model_executor/models/mpt.py +331 -0
- vllm/model_executor/models/nemotron.py +508 -0
- vllm/model_executor/models/nemotron_h.py +588 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +389 -0
- vllm/model_executor/models/olmo2.py +414 -0
- vllm/model_executor/models/olmoe.py +468 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +349 -0
- vllm/model_executor/models/ovis.py +577 -0
- vllm/model_executor/models/paligemma.py +419 -0
- vllm/model_executor/models/persimmon.py +344 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3_small.py +465 -0
- vllm/model_executor/models/phi3v.py +733 -0
- vllm/model_executor/models/phi4mm.py +1258 -0
- vllm/model_executor/models/phi4mm_audio.py +1233 -0
- vllm/model_executor/models/phi4mm_utils.py +1884 -0
- vllm/model_executor/models/phimoe.py +674 -0
- vllm/model_executor/models/pixtral.py +1329 -0
- vllm/model_executor/models/plamo2.py +738 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +240 -0
- vllm/model_executor/models/qwen.py +362 -0
- vllm/model_executor/models/qwen2.py +501 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +923 -0
- vllm/model_executor/models/qwen2_5_vl.py +1175 -0
- vllm/model_executor/models/qwen2_audio.py +420 -0
- vllm/model_executor/models/qwen2_moe.py +540 -0
- vllm/model_executor/models/qwen2_rm.py +122 -0
- vllm/model_executor/models/qwen2_vl.py +1513 -0
- vllm/model_executor/models/qwen3.py +325 -0
- vllm/model_executor/models/qwen3_moe.py +541 -0
- vllm/model_executor/models/qwen_vl.py +796 -0
- vllm/model_executor/models/registry.py +634 -0
- vllm/model_executor/models/roberta.py +271 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/skyworkr1v.py +961 -0
- vllm/model_executor/models/smolvlm.py +52 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +343 -0
- vllm/model_executor/models/starcoder2.py +356 -0
- vllm/model_executor/models/tarsier.py +652 -0
- vllm/model_executor/models/telechat2.py +140 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/transformers.py +509 -0
- vllm/model_executor/models/ultravox.py +670 -0
- vllm/model_executor/models/utils.py +744 -0
- vllm/model_executor/models/vision.py +147 -0
- vllm/model_executor/models/whisper.py +886 -0
- vllm/model_executor/models/zamba2.py +1036 -0
- vllm/model_executor/parameter.py +459 -0
- vllm/model_executor/pooling_metadata.py +72 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +80 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +116 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/hasher.py +91 -0
- vllm/multimodal/image.py +103 -0
- vllm/multimodal/inputs.py +878 -0
- vllm/multimodal/parse.py +499 -0
- vllm/multimodal/processing.py +1948 -0
- vllm/multimodal/profiling.py +283 -0
- vllm/multimodal/registry.py +331 -0
- vllm/multimodal/utils.py +492 -0
- vllm/multimodal/video.py +227 -0
- vllm/outputs.py +516 -0
- vllm/platforms/__init__.py +291 -0
- vllm/platforms/cpu.py +281 -0
- vllm/platforms/cuda.py +568 -0
- vllm/platforms/hpu.py +106 -0
- vllm/platforms/interface.py +551 -0
- vllm/platforms/neuron.py +150 -0
- vllm/platforms/rocm.py +453 -0
- vllm/platforms/tpu.py +206 -0
- vllm/platforms/xpu.py +192 -0
- vllm/plugins/__init__.py +94 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +64 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +83 -0
- vllm/prompt_adapter/models.py +358 -0
- vllm/prompt_adapter/request.py +37 -0
- vllm/prompt_adapter/utils.py +98 -0
- vllm/prompt_adapter/worker_manager.py +179 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +15 -0
- vllm/reasoning/abs_reasoning_parsers.py +192 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/sampling_params.py +602 -0
- vllm/scalar_type.py +347 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1568 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +506 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +99 -0
- vllm/spec_decode/medusa_worker.py +138 -0
- vllm/spec_decode/metrics.py +213 -0
- vllm/spec_decode/mlp_speculator_worker.py +94 -0
- vllm/spec_decode/mqa_scorer.py +160 -0
- vllm/spec_decode/multi_step_worker.py +423 -0
- vllm/spec_decode/ngram_worker.py +196 -0
- vllm/spec_decode/proposer_worker_base.py +59 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
- vllm/spec_decode/spec_decode_worker.py +1326 -0
- vllm/spec_decode/target_model_runner.py +45 -0
- vllm/spec_decode/top1_proposer.py +275 -0
- vllm/spec_decode/util.py +277 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +131 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +60 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +922 -0
- vllm/transformers_utils/configs/__init__.py +57 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/cohere2.py +195 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +85 -0
- vllm/transformers_utils/configs/exaone.py +190 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/minimax_text_01.py +70 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
- vllm/transformers_utils/configs/mllama.py +31 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/mpt.py +180 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +259 -0
- vllm/transformers_utils/configs/nvlm_d.py +31 -0
- vllm/transformers_utils/configs/ovis.py +184 -0
- vllm/transformers_utils/configs/skyworkr1v.py +54 -0
- vllm/transformers_utils/configs/solar.py +247 -0
- vllm/transformers_utils/configs/telechat2.py +64 -0
- vllm/transformers_utils/configs/ultravox.py +108 -0
- vllm/transformers_utils/detokenizer.py +168 -0
- vllm/transformers_utils/detokenizer_utils.py +189 -0
- vllm/transformers_utils/processor.py +221 -0
- vllm/transformers_utils/processors/__init__.py +8 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/s3_utils.py +162 -0
- vllm/transformers_utils/tokenizer.py +302 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +120 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +493 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +14 -0
- vllm/triton_utils/importing.py +94 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +259 -0
- vllm/utils/__init__.py +3008 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +184 -0
- vllm/v1/attention/backends/flash_attn.py +757 -0
- vllm/v1/attention/backends/flashinfer.py +680 -0
- vllm/v1/attention/backends/flex_attention.py +491 -0
- vllm/v1/attention/backends/mamba_attn.py +192 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +978 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +98 -0
- vllm/v1/attention/backends/mla/flashmla.py +180 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +241 -0
- vllm/v1/attention/backends/mla/triton_mla.py +177 -0
- vllm/v1/attention/backends/pallas.py +320 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +609 -0
- vllm/v1/attention/backends/triton_attn.py +449 -0
- vllm/v1/attention/backends/utils.py +310 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +349 -0
- vllm/v1/core/encoder_cache_manager.py +254 -0
- vllm/v1/core/kv_cache_coordinator.py +369 -0
- vllm/v1/core/kv_cache_manager.py +398 -0
- vllm/v1/core/kv_cache_utils.py +999 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +150 -0
- vllm/v1/core/sched/output.py +157 -0
- vllm/v1/core/sched/request_queue.py +224 -0
- vllm/v1/core/sched/scheduler.py +1115 -0
- vllm/v1/core/sched/utils.py +36 -0
- vllm/v1/core/single_type_kv_cache_manager.py +444 -0
- vllm/v1/engine/__init__.py +179 -0
- vllm/v1/engine/async_llm.py +626 -0
- vllm/v1/engine/coordinator.py +278 -0
- vllm/v1/engine/core.py +1046 -0
- vllm/v1/engine/core_client.py +1049 -0
- vllm/v1/engine/detokenizer.py +292 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +322 -0
- vllm/v1/engine/logprobs.py +200 -0
- vllm/v1/engine/mm_input_cache.py +91 -0
- vllm/v1/engine/output_processor.py +477 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +422 -0
- vllm/v1/engine/utils.py +546 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +113 -0
- vllm/v1/executor/multiproc_executor.py +532 -0
- vllm/v1/executor/ray_distributed_executor.py +62 -0
- vllm/v1/kv_cache_interface.py +223 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +557 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +131 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +240 -0
- vllm/v1/outputs.py +124 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +17 -0
- vllm/v1/request.py +229 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor.py +517 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/penalties.py +43 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +296 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +226 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +145 -0
- vllm/v1/serial_utils.py +315 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +441 -0
- vllm/v1/spec_decode/medusa.py +64 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +178 -0
- vllm/v1/spec_decode/ngram_proposer.py +132 -0
- vllm/v1/spec_decode/utils.py +41 -0
- vllm/v1/structured_output/__init__.py +227 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +318 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +175 -0
- vllm/v1/utils.py +377 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +142 -0
- vllm/v1/worker/cpu_model_runner.py +91 -0
- vllm/v1/worker/cpu_worker.py +153 -0
- vllm/v1/worker/gpu_input_batch.py +757 -0
- vllm/v1/worker/gpu_model_runner.py +2739 -0
- vllm/v1/worker/gpu_worker.py +408 -0
- vllm/v1/worker/lora_model_runner_mixin.py +177 -0
- vllm/v1/worker/tpu_input_batch.py +585 -0
- vllm/v1/worker/tpu_model_runner.py +1849 -0
- vllm/v1/worker/tpu_worker.py +315 -0
- vllm/v1/worker/utils.py +112 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/v1/worker/xpu_model_runner.py +33 -0
- vllm/v1/worker/xpu_worker.py +165 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +452 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2320 -0
- vllm/worker/hpu_worker.py +484 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +282 -0
- vllm/worker/multi_step_hpu_worker.py +123 -0
- vllm/worker/multi_step_model_runner.py +911 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +108 -0
- vllm/worker/multi_step_worker.py +197 -0
- vllm/worker/neuron_model_runner.py +460 -0
- vllm/worker/neuron_worker.py +193 -0
- vllm/worker/neuronx_distributed_model_runner.py +294 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +909 -0
- vllm/worker/tpu_worker.py +337 -0
- vllm/worker/utils.py +53 -0
- vllm/worker/worker.py +577 -0
- vllm/worker/worker_base.py +646 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +186 -0
- vllm_cpu-0.9.2.post2.dist-info/METADATA +339 -0
- vllm_cpu-0.9.2.post2.dist-info/RECORD +1236 -0
- vllm_cpu-0.9.2.post2.dist-info/WHEEL +5 -0
- vllm_cpu-0.9.2.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu-0.9.2.post2.dist-info/top_level.txt +1 -0
vllm/entrypoints/llm.py
ADDED
|
@@ -0,0 +1,1599 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
import itertools
|
|
5
|
+
import warnings
|
|
6
|
+
from collections.abc import Sequence
|
|
7
|
+
from contextlib import contextmanager
|
|
8
|
+
from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Optional, Union,
|
|
9
|
+
cast, overload)
|
|
10
|
+
|
|
11
|
+
import cloudpickle
|
|
12
|
+
import torch.nn as nn
|
|
13
|
+
from pydantic import ValidationError
|
|
14
|
+
from tqdm.auto import tqdm
|
|
15
|
+
from typing_extensions import TypeVar, deprecated
|
|
16
|
+
|
|
17
|
+
from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
|
|
18
|
+
BeamSearchSequence,
|
|
19
|
+
create_sort_beams_key_function)
|
|
20
|
+
from vllm.config import (CompilationConfig, ModelDType, TokenizerMode,
|
|
21
|
+
is_init_field)
|
|
22
|
+
from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig,
|
|
23
|
+
TaskOption)
|
|
24
|
+
from vllm.engine.llm_engine import LLMEngine
|
|
25
|
+
from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
|
|
26
|
+
ChatTemplateContentFormatOption,
|
|
27
|
+
apply_hf_chat_template,
|
|
28
|
+
apply_mistral_chat_template,
|
|
29
|
+
parse_chat_messages,
|
|
30
|
+
resolve_chat_template_content_format)
|
|
31
|
+
from vllm.entrypoints.score_utils import (_cosine_similarity,
|
|
32
|
+
_validate_score_input_lens)
|
|
33
|
+
from vllm.entrypoints.utils import _validate_truncation_size
|
|
34
|
+
from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt
|
|
35
|
+
from vllm.inputs.parse import parse_and_batch_prompt
|
|
36
|
+
from vllm.logger import init_logger
|
|
37
|
+
from vllm.lora.request import LoRARequest
|
|
38
|
+
from vllm.model_executor.guided_decoding.guided_fields import (
|
|
39
|
+
GuidedDecodingRequest, LLMGuidedOptions)
|
|
40
|
+
from vllm.model_executor.layers.quantization import QuantizationMethods
|
|
41
|
+
from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput,
|
|
42
|
+
PoolingRequestOutput, RequestOutput,
|
|
43
|
+
ScoringRequestOutput)
|
|
44
|
+
from vllm.pooling_params import PoolingParams
|
|
45
|
+
from vllm.prompt_adapter.request import PromptAdapterRequest
|
|
46
|
+
from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams,
|
|
47
|
+
RequestOutputKind, SamplingParams)
|
|
48
|
+
from vllm.transformers_utils.tokenizer import (AnyTokenizer, MistralTokenizer,
|
|
49
|
+
get_cached_tokenizer)
|
|
50
|
+
from vllm.usage.usage_lib import UsageContext
|
|
51
|
+
from vllm.utils import Counter, Device, deprecate_kwargs, is_list_of
|
|
52
|
+
|
|
53
|
+
if TYPE_CHECKING:
|
|
54
|
+
from vllm.v1.metrics.reader import Metric
|
|
55
|
+
|
|
56
|
+
logger = init_logger(__name__)
|
|
57
|
+
|
|
58
|
+
_R = TypeVar("_R", default=Any)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class LLM:
|
|
62
|
+
"""An LLM for generating texts from given prompts and sampling parameters.
|
|
63
|
+
|
|
64
|
+
This class includes a tokenizer, a language model (possibly distributed
|
|
65
|
+
across multiple GPUs), and GPU memory space allocated for intermediate
|
|
66
|
+
states (aka KV cache). Given a batch of prompts and sampling parameters,
|
|
67
|
+
this class generates texts from the model, using an intelligent batching
|
|
68
|
+
mechanism and efficient memory management.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
model: The name or path of a HuggingFace Transformers model.
|
|
72
|
+
tokenizer: The name or path of a HuggingFace Transformers tokenizer.
|
|
73
|
+
tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
|
|
74
|
+
if available, and "slow" will always use the slow tokenizer.
|
|
75
|
+
skip_tokenizer_init: If true, skip initialization of tokenizer and
|
|
76
|
+
detokenizer. Expect valid prompt_token_ids and None for prompt
|
|
77
|
+
from the input.
|
|
78
|
+
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
|
|
79
|
+
downloading the model and tokenizer.
|
|
80
|
+
allowed_local_media_path: Allowing API requests to read local images
|
|
81
|
+
or videos from directories specified by the server file system.
|
|
82
|
+
This is a security risk. Should only be enabled in trusted
|
|
83
|
+
environments.
|
|
84
|
+
tensor_parallel_size: The number of GPUs to use for distributed
|
|
85
|
+
execution with tensor parallelism.
|
|
86
|
+
dtype: The data type for the model weights and activations. Currently,
|
|
87
|
+
we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
|
|
88
|
+
the `torch_dtype` attribute specified in the model config file.
|
|
89
|
+
However, if the `torch_dtype` in the config is `float32`, we will
|
|
90
|
+
use `float16` instead.
|
|
91
|
+
quantization: The method used to quantize the model weights. Currently,
|
|
92
|
+
we support "awq", "gptq", and "fp8" (experimental).
|
|
93
|
+
If None, we first check the `quantization_config` attribute in the
|
|
94
|
+
model config file. If that is None, we assume the model weights are
|
|
95
|
+
not quantized and use `dtype` to determine the data type of
|
|
96
|
+
the weights.
|
|
97
|
+
revision: The specific model version to use. It can be a branch name,
|
|
98
|
+
a tag name, or a commit id.
|
|
99
|
+
tokenizer_revision: The specific tokenizer version to use. It can be a
|
|
100
|
+
branch name, a tag name, or a commit id.
|
|
101
|
+
seed: The seed to initialize the random number generator for sampling.
|
|
102
|
+
gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
|
|
103
|
+
reserve for the model weights, activations, and KV cache. Higher
|
|
104
|
+
values will increase the KV cache size and thus improve the model's
|
|
105
|
+
throughput. However, if the value is too high, it may cause out-of-
|
|
106
|
+
memory (OOM) errors.
|
|
107
|
+
swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
|
|
108
|
+
This can be used for temporarily storing the states of the requests
|
|
109
|
+
when their `best_of` sampling parameters are larger than 1. If all
|
|
110
|
+
requests will have `best_of=1`, you can safely set this to 0.
|
|
111
|
+
Noting that `best_of` is only supported in V0. Otherwise, too small
|
|
112
|
+
values may cause out-of-memory (OOM) errors.
|
|
113
|
+
cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
|
|
114
|
+
the model weights. This virtually increases the GPU memory space
|
|
115
|
+
you can use to hold the model weights, at the cost of CPU-GPU data
|
|
116
|
+
transfer for every forward pass.
|
|
117
|
+
enforce_eager: Whether to enforce eager execution. If True, we will
|
|
118
|
+
disable CUDA graph and always execute the model in eager mode.
|
|
119
|
+
If False, we will use CUDA graph and eager execution in hybrid.
|
|
120
|
+
max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
|
|
121
|
+
When a sequence has context length larger than this, we fall back
|
|
122
|
+
to eager mode. Additionally for encoder-decoder models, if the
|
|
123
|
+
sequence length of the encoder input is larger than this, we fall
|
|
124
|
+
back to the eager mode.
|
|
125
|
+
disable_custom_all_reduce: See
|
|
126
|
+
[ParallelConfig][vllm.config.ParallelConfig].
|
|
127
|
+
disable_async_output_proc: Disable async output processing.
|
|
128
|
+
This may result in lower performance.
|
|
129
|
+
hf_token: The token to use as HTTP bearer authorization for remote files
|
|
130
|
+
. If `True`, will use the token generated when running
|
|
131
|
+
`huggingface-cli login` (stored in `~/.huggingface`).
|
|
132
|
+
hf_overrides: If a dictionary, contains arguments to be forwarded to the
|
|
133
|
+
HuggingFace config. If a callable, it is called to update the
|
|
134
|
+
HuggingFace config.
|
|
135
|
+
mm_processor_kwargs: Arguments to be forwarded to the model's processor
|
|
136
|
+
for multi-modal data, e.g., image processor. Overrides for the
|
|
137
|
+
multi-modal processor obtained from `AutoProcessor.from_pretrained`.
|
|
138
|
+
The available overrides depend on the model that is being run.
|
|
139
|
+
For example, for Phi-3-Vision: `{"num_crops": 4}`.
|
|
140
|
+
override_pooler_config: Initialize non-default pooling config or
|
|
141
|
+
override default pooling config for the pooling model.
|
|
142
|
+
e.g. `PoolerConfig(pooling_type="mean", normalize=False)`.
|
|
143
|
+
compilation_config: Either an integer or a dictionary. If it is an
|
|
144
|
+
integer, it is used as the level of compilation optimization. If it
|
|
145
|
+
is a dictionary, it can specify the full compilation configuration.
|
|
146
|
+
**kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].
|
|
147
|
+
|
|
148
|
+
Note:
|
|
149
|
+
This class is intended to be used for offline inference. For online
|
|
150
|
+
serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead.
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
DEPRECATE_LEGACY: ClassVar[bool] = True
|
|
154
|
+
"""A flag to toggle whether to deprecate the legacy generate/encode API."""
|
|
155
|
+
|
|
156
|
+
@classmethod
|
|
157
|
+
@contextmanager
|
|
158
|
+
def deprecate_legacy_api(cls):
|
|
159
|
+
cls.DEPRECATE_LEGACY = True
|
|
160
|
+
|
|
161
|
+
yield
|
|
162
|
+
|
|
163
|
+
cls.DEPRECATE_LEGACY = False
|
|
164
|
+
|
|
165
|
+
def __init__(
|
|
166
|
+
self,
|
|
167
|
+
model: str,
|
|
168
|
+
*,
|
|
169
|
+
task: TaskOption = "auto",
|
|
170
|
+
tokenizer: Optional[str] = None,
|
|
171
|
+
tokenizer_mode: TokenizerMode = "auto",
|
|
172
|
+
skip_tokenizer_init: bool = False,
|
|
173
|
+
trust_remote_code: bool = False,
|
|
174
|
+
allowed_local_media_path: str = "",
|
|
175
|
+
tensor_parallel_size: int = 1,
|
|
176
|
+
dtype: ModelDType = "auto",
|
|
177
|
+
quantization: Optional[QuantizationMethods] = None,
|
|
178
|
+
revision: Optional[str] = None,
|
|
179
|
+
tokenizer_revision: Optional[str] = None,
|
|
180
|
+
seed: Optional[int] = None,
|
|
181
|
+
gpu_memory_utilization: float = 0.9,
|
|
182
|
+
swap_space: float = 4,
|
|
183
|
+
cpu_offload_gb: float = 0,
|
|
184
|
+
enforce_eager: bool = False,
|
|
185
|
+
max_seq_len_to_capture: int = 8192,
|
|
186
|
+
disable_custom_all_reduce: bool = False,
|
|
187
|
+
disable_async_output_proc: bool = False,
|
|
188
|
+
hf_token: Optional[Union[bool, str]] = None,
|
|
189
|
+
hf_overrides: Optional[HfOverrides] = None,
|
|
190
|
+
mm_processor_kwargs: Optional[dict[str, Any]] = None,
|
|
191
|
+
override_pooler_config: Optional[PoolerConfig] = None,
|
|
192
|
+
compilation_config: Optional[Union[int, dict[str, Any],
|
|
193
|
+
CompilationConfig]] = None,
|
|
194
|
+
**kwargs,
|
|
195
|
+
) -> None:
|
|
196
|
+
"""LLM constructor."""
|
|
197
|
+
|
|
198
|
+
if "disable_log_stats" not in kwargs:
|
|
199
|
+
kwargs["disable_log_stats"] = True
|
|
200
|
+
|
|
201
|
+
if "worker_cls" in kwargs:
|
|
202
|
+
worker_cls = kwargs["worker_cls"]
|
|
203
|
+
# if the worker_cls is not qualified string name,
|
|
204
|
+
# we serialize it using cloudpickle to avoid pickling issues
|
|
205
|
+
if isinstance(worker_cls, type):
|
|
206
|
+
kwargs["worker_cls"] = cloudpickle.dumps(worker_cls)
|
|
207
|
+
|
|
208
|
+
if "kv_transfer_config" in kwargs and isinstance(
|
|
209
|
+
kwargs["kv_transfer_config"], dict):
|
|
210
|
+
from vllm.config import KVTransferConfig
|
|
211
|
+
raw_config_dict = kwargs["kv_transfer_config"]
|
|
212
|
+
try:
|
|
213
|
+
kwargs["kv_transfer_config"] = KVTransferConfig(
|
|
214
|
+
**raw_config_dict)
|
|
215
|
+
except ValidationError as e:
|
|
216
|
+
logger.error(
|
|
217
|
+
"Failed to convert 'kv_transfer_config' dict to "
|
|
218
|
+
"KVTransferConfig object. Dict: %s. Error: %s",
|
|
219
|
+
raw_config_dict, e)
|
|
220
|
+
# Consider re-raising a more specific vLLM error or ValueError
|
|
221
|
+
# to provide better context to the user.
|
|
222
|
+
raise ValueError(
|
|
223
|
+
f"Invalid 'kv_transfer_config' provided: {e}") from e
|
|
224
|
+
|
|
225
|
+
if hf_overrides is None:
|
|
226
|
+
hf_overrides = {}
|
|
227
|
+
|
|
228
|
+
if compilation_config is not None:
|
|
229
|
+
if isinstance(compilation_config, int):
|
|
230
|
+
compilation_config_instance = CompilationConfig(
|
|
231
|
+
level=compilation_config)
|
|
232
|
+
elif isinstance(compilation_config, dict):
|
|
233
|
+
predicate = lambda x: is_init_field(CompilationConfig, x[0])
|
|
234
|
+
compilation_config_instance = CompilationConfig(
|
|
235
|
+
**dict(filter(predicate, compilation_config.items())))
|
|
236
|
+
else:
|
|
237
|
+
compilation_config_instance = compilation_config
|
|
238
|
+
else:
|
|
239
|
+
compilation_config_instance = CompilationConfig()
|
|
240
|
+
|
|
241
|
+
engine_args = EngineArgs(
|
|
242
|
+
model=model,
|
|
243
|
+
task=task,
|
|
244
|
+
tokenizer=tokenizer,
|
|
245
|
+
tokenizer_mode=tokenizer_mode,
|
|
246
|
+
skip_tokenizer_init=skip_tokenizer_init,
|
|
247
|
+
trust_remote_code=trust_remote_code,
|
|
248
|
+
allowed_local_media_path=allowed_local_media_path,
|
|
249
|
+
tensor_parallel_size=tensor_parallel_size,
|
|
250
|
+
dtype=dtype,
|
|
251
|
+
quantization=quantization,
|
|
252
|
+
revision=revision,
|
|
253
|
+
tokenizer_revision=tokenizer_revision,
|
|
254
|
+
seed=seed,
|
|
255
|
+
gpu_memory_utilization=gpu_memory_utilization,
|
|
256
|
+
swap_space=swap_space,
|
|
257
|
+
cpu_offload_gb=cpu_offload_gb,
|
|
258
|
+
enforce_eager=enforce_eager,
|
|
259
|
+
max_seq_len_to_capture=max_seq_len_to_capture,
|
|
260
|
+
disable_custom_all_reduce=disable_custom_all_reduce,
|
|
261
|
+
disable_async_output_proc=disable_async_output_proc,
|
|
262
|
+
hf_token=hf_token,
|
|
263
|
+
hf_overrides=hf_overrides,
|
|
264
|
+
mm_processor_kwargs=mm_processor_kwargs,
|
|
265
|
+
override_pooler_config=override_pooler_config,
|
|
266
|
+
compilation_config=compilation_config_instance,
|
|
267
|
+
**kwargs,
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
# Create the Engine (autoselects V0 vs V1)
|
|
271
|
+
self.llm_engine = LLMEngine.from_engine_args(
|
|
272
|
+
engine_args=engine_args, usage_context=UsageContext.LLM_CLASS)
|
|
273
|
+
self.engine_class = type(self.llm_engine)
|
|
274
|
+
|
|
275
|
+
self.request_counter = Counter()
|
|
276
|
+
self.default_sampling_params: Union[dict[str, Any], None] = None
|
|
277
|
+
|
|
278
|
+
def get_tokenizer(
|
|
279
|
+
self,
|
|
280
|
+
lora_request: Optional[LoRARequest] = None,
|
|
281
|
+
) -> AnyTokenizer:
|
|
282
|
+
return self.llm_engine.get_tokenizer_group().get_lora_tokenizer(
|
|
283
|
+
lora_request)
|
|
284
|
+
|
|
285
|
+
def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
|
|
286
|
+
tokenizer_group = self.llm_engine.get_tokenizer_group()
|
|
287
|
+
|
|
288
|
+
# While CachedTokenizer is dynamic, have no choice but
|
|
289
|
+
# compare class name. Misjudgment will arise from
|
|
290
|
+
# user-defined tokenizer started with 'Cached'
|
|
291
|
+
if tokenizer.__class__.__name__.startswith("Cached"):
|
|
292
|
+
tokenizer_group.tokenizer = tokenizer
|
|
293
|
+
else:
|
|
294
|
+
tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer)
|
|
295
|
+
|
|
296
|
+
def get_default_sampling_params(self) -> SamplingParams:
|
|
297
|
+
if self.default_sampling_params is None:
|
|
298
|
+
self.default_sampling_params = (
|
|
299
|
+
self.llm_engine.model_config.get_diff_sampling_param())
|
|
300
|
+
if self.default_sampling_params:
|
|
301
|
+
return SamplingParams.from_optional(**self.default_sampling_params)
|
|
302
|
+
return SamplingParams()
|
|
303
|
+
|
|
304
|
+
@overload
|
|
305
|
+
def generate(
|
|
306
|
+
self,
|
|
307
|
+
prompts: Union[PromptType, Sequence[PromptType]],
|
|
308
|
+
/,
|
|
309
|
+
sampling_params: Optional[Union[SamplingParams,
|
|
310
|
+
Sequence[SamplingParams]]] = None,
|
|
311
|
+
*,
|
|
312
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
313
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
314
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
315
|
+
guided_options_request: Optional[Union[LLMGuidedOptions,
|
|
316
|
+
GuidedDecodingRequest]] = None,
|
|
317
|
+
) -> list[RequestOutput]:
|
|
318
|
+
...
|
|
319
|
+
|
|
320
|
+
@overload # LEGACY: single (prompt + optional token ids)
|
|
321
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
322
|
+
def generate(
|
|
323
|
+
self,
|
|
324
|
+
prompts: str,
|
|
325
|
+
sampling_params: Optional[Union[SamplingParams,
|
|
326
|
+
list[SamplingParams]]] = None,
|
|
327
|
+
prompt_token_ids: Optional[list[int]] = None,
|
|
328
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
329
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
330
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
331
|
+
guided_options_request: Optional[Union[LLMGuidedOptions,
|
|
332
|
+
GuidedDecodingRequest]] = None,
|
|
333
|
+
) -> list[RequestOutput]:
|
|
334
|
+
...
|
|
335
|
+
|
|
336
|
+
@overload # LEGACY: multi (prompt + optional token ids)
|
|
337
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
338
|
+
def generate(
|
|
339
|
+
self,
|
|
340
|
+
prompts: list[str],
|
|
341
|
+
sampling_params: Optional[Union[SamplingParams,
|
|
342
|
+
list[SamplingParams]]] = None,
|
|
343
|
+
prompt_token_ids: Optional[list[list[int]]] = None,
|
|
344
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
345
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
346
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
347
|
+
guided_options_request: Optional[Union[LLMGuidedOptions,
|
|
348
|
+
GuidedDecodingRequest]] = None,
|
|
349
|
+
) -> list[RequestOutput]:
|
|
350
|
+
...
|
|
351
|
+
|
|
352
|
+
@overload # LEGACY: single (token ids + optional prompt)
|
|
353
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
354
|
+
def generate(
|
|
355
|
+
self,
|
|
356
|
+
prompts: Optional[str] = None,
|
|
357
|
+
sampling_params: Optional[Union[SamplingParams,
|
|
358
|
+
list[SamplingParams]]] = None,
|
|
359
|
+
*,
|
|
360
|
+
prompt_token_ids: list[int],
|
|
361
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
362
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
363
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
364
|
+
guided_options_request: Optional[Union[LLMGuidedOptions,
|
|
365
|
+
GuidedDecodingRequest]] = None,
|
|
366
|
+
) -> list[RequestOutput]:
|
|
367
|
+
...
|
|
368
|
+
|
|
369
|
+
@overload # LEGACY: multi (token ids + optional prompt)
|
|
370
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
371
|
+
def generate(
|
|
372
|
+
self,
|
|
373
|
+
prompts: Optional[list[str]] = None,
|
|
374
|
+
sampling_params: Optional[Union[SamplingParams,
|
|
375
|
+
list[SamplingParams]]] = None,
|
|
376
|
+
*,
|
|
377
|
+
prompt_token_ids: list[list[int]],
|
|
378
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
379
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
380
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
381
|
+
guided_options_request: Optional[Union[LLMGuidedOptions,
|
|
382
|
+
GuidedDecodingRequest]] = None,
|
|
383
|
+
) -> list[RequestOutput]:
|
|
384
|
+
...
|
|
385
|
+
|
|
386
|
+
@overload # LEGACY: single or multi token ids [pos-only]
|
|
387
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
388
|
+
def generate(
|
|
389
|
+
self,
|
|
390
|
+
prompts: None,
|
|
391
|
+
sampling_params: None,
|
|
392
|
+
prompt_token_ids: Union[list[int], list[list[int]]],
|
|
393
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
394
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
395
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
396
|
+
guided_options_request: Optional[Union[LLMGuidedOptions,
|
|
397
|
+
GuidedDecodingRequest]] = None,
|
|
398
|
+
) -> list[RequestOutput]:
|
|
399
|
+
...
|
|
400
|
+
|
|
401
|
+
@deprecate_kwargs(
|
|
402
|
+
"prompt_token_ids",
|
|
403
|
+
is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
|
|
404
|
+
additional_message="Please use the 'prompts' parameter instead.",
|
|
405
|
+
)
|
|
406
|
+
def generate(
|
|
407
|
+
self,
|
|
408
|
+
prompts: Union[Union[PromptType, Sequence[PromptType]],
|
|
409
|
+
Optional[Union[str, list[str]]]] = None,
|
|
410
|
+
sampling_params: Optional[Union[SamplingParams,
|
|
411
|
+
Sequence[SamplingParams]]] = None,
|
|
412
|
+
prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
|
|
413
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
414
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
415
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
416
|
+
guided_options_request: Optional[Union[LLMGuidedOptions,
|
|
417
|
+
GuidedDecodingRequest]] = None,
|
|
418
|
+
priority: Optional[list[int]] = None,
|
|
419
|
+
) -> list[RequestOutput]:
|
|
420
|
+
"""Generates the completions for the input prompts.
|
|
421
|
+
|
|
422
|
+
This class automatically batches the given prompts, considering
|
|
423
|
+
the memory constraint. For the best performance, put all of your prompts
|
|
424
|
+
into a single list and pass it to this method.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
prompts: The prompts to the LLM. You may pass a sequence of prompts
|
|
428
|
+
for batch inference. See [PromptType][vllm.inputs.PromptType]
|
|
429
|
+
for more details about the format of each prompts.
|
|
430
|
+
sampling_params: The sampling parameters for text generation. If
|
|
431
|
+
None, we use the default sampling parameters.
|
|
432
|
+
When it is a single value, it is applied to every prompt.
|
|
433
|
+
When it is a list, the list must have the same length as the
|
|
434
|
+
prompts and it is paired one by one with the prompt.
|
|
435
|
+
use_tqdm: If `True`, shows a tqdm progress bar.
|
|
436
|
+
If a callable (e.g., `functools.partial(tqdm, leave=False)`),
|
|
437
|
+
it is used to create the progress bar.
|
|
438
|
+
If `False`, no progress bar is created.
|
|
439
|
+
lora_request: LoRA request to use for generation, if any.
|
|
440
|
+
prompt_adapter_request: Prompt Adapter request to use for
|
|
441
|
+
generation, if any.
|
|
442
|
+
priority: The priority of the requests, if any.
|
|
443
|
+
Only applicable when priority scheduling policy is enabled.
|
|
444
|
+
|
|
445
|
+
Returns:
|
|
446
|
+
A list of `RequestOutput` objects containing the
|
|
447
|
+
generated completions in the same order as the input prompts.
|
|
448
|
+
|
|
449
|
+
Note:
|
|
450
|
+
Using `prompts` and `prompt_token_ids` as keyword parameters is
|
|
451
|
+
considered legacy and may be deprecated in the future. You should
|
|
452
|
+
instead pass them via the `inputs` parameter.
|
|
453
|
+
"""
|
|
454
|
+
runner_type = self.llm_engine.model_config.runner_type
|
|
455
|
+
if runner_type not in ["generate", "transcription"]:
|
|
456
|
+
messages = [
|
|
457
|
+
"LLM.generate() is only supported for (conditional) generation "
|
|
458
|
+
"models (XForCausalLM, XForConditionalGeneration).",
|
|
459
|
+
]
|
|
460
|
+
|
|
461
|
+
supported_runner_types = self.llm_engine.model_config \
|
|
462
|
+
.supported_runner_types
|
|
463
|
+
if "generate" in supported_runner_types:
|
|
464
|
+
messages.append(
|
|
465
|
+
"Your model supports the 'generate' runner, but is "
|
|
466
|
+
f"currently initialized for the '{runner_type}' runner. "
|
|
467
|
+
"Please initialize vLLM using `--task generate`.")
|
|
468
|
+
|
|
469
|
+
raise ValueError(" ".join(messages))
|
|
470
|
+
|
|
471
|
+
if prompt_token_ids is not None:
|
|
472
|
+
parsed_prompts = self._convert_v1_inputs(
|
|
473
|
+
prompts=cast(Optional[Union[str, list[str]]], prompts),
|
|
474
|
+
prompt_token_ids=prompt_token_ids,
|
|
475
|
+
)
|
|
476
|
+
else:
|
|
477
|
+
parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
|
|
478
|
+
prompts)
|
|
479
|
+
|
|
480
|
+
if isinstance(guided_options_request, dict):
|
|
481
|
+
if len(guided_options_request) > 1:
|
|
482
|
+
raise ValueError(
|
|
483
|
+
"You can only use one guided decoding but multiple is "
|
|
484
|
+
f"specified: {guided_options_request}")
|
|
485
|
+
guided_options_request = GuidedDecodingRequest(
|
|
486
|
+
**guided_options_request)
|
|
487
|
+
|
|
488
|
+
if sampling_params is None:
|
|
489
|
+
# Use default sampling params.
|
|
490
|
+
sampling_params = self.get_default_sampling_params()
|
|
491
|
+
|
|
492
|
+
tokenization_kwargs: dict[str, Any] = {}
|
|
493
|
+
truncate_prompt_tokens = None
|
|
494
|
+
if isinstance(sampling_params, SamplingParams):
|
|
495
|
+
truncate_prompt_tokens = sampling_params.truncate_prompt_tokens
|
|
496
|
+
_validate_truncation_size(self.llm_engine.model_config.max_model_len,
|
|
497
|
+
truncate_prompt_tokens, tokenization_kwargs)
|
|
498
|
+
|
|
499
|
+
self._validate_and_add_requests(
|
|
500
|
+
prompts=parsed_prompts,
|
|
501
|
+
params=sampling_params,
|
|
502
|
+
use_tqdm=use_tqdm,
|
|
503
|
+
lora_request=lora_request,
|
|
504
|
+
prompt_adapter_request=prompt_adapter_request,
|
|
505
|
+
guided_options=guided_options_request,
|
|
506
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
507
|
+
priority=priority,
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
outputs = self._run_engine(use_tqdm=use_tqdm)
|
|
511
|
+
return self.engine_class.validate_outputs(outputs, RequestOutput)
|
|
512
|
+
|
|
513
|
+
def collective_rpc(self,
|
|
514
|
+
method: Union[str, Callable[..., _R]],
|
|
515
|
+
timeout: Optional[float] = None,
|
|
516
|
+
args: tuple = (),
|
|
517
|
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
|
518
|
+
"""
|
|
519
|
+
Execute an RPC call on all workers.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
method: Name of the worker method to execute, or a callable that
|
|
523
|
+
is serialized and sent to all workers to execute.
|
|
524
|
+
|
|
525
|
+
If the method is a callable, it should accept an additional
|
|
526
|
+
`self` argument, in addition to the arguments passed in `args`
|
|
527
|
+
and `kwargs`. The `self` argument will be the worker object.
|
|
528
|
+
timeout: Maximum time in seconds to wait for execution. Raises a
|
|
529
|
+
[`TimeoutError`][] on timeout. `None` means wait indefinitely.
|
|
530
|
+
args: Positional arguments to pass to the worker method.
|
|
531
|
+
kwargs: Keyword arguments to pass to the worker method.
|
|
532
|
+
|
|
533
|
+
Returns:
|
|
534
|
+
A list containing the results from each worker.
|
|
535
|
+
|
|
536
|
+
Note:
|
|
537
|
+
It is recommended to use this API to only pass control messages,
|
|
538
|
+
and set up data-plane communication to pass data.
|
|
539
|
+
"""
|
|
540
|
+
|
|
541
|
+
return self.llm_engine.collective_rpc(method, timeout, args, kwargs)
|
|
542
|
+
|
|
543
|
+
def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
|
|
544
|
+
"""
|
|
545
|
+
Run a function directly on the model inside each worker,
|
|
546
|
+
returning the result for each of them.
|
|
547
|
+
"""
|
|
548
|
+
executor = self.llm_engine.model_executor
|
|
549
|
+
return executor.apply_model(func)
|
|
550
|
+
|
|
551
|
+
def _get_beam_search_lora_requests(
|
|
552
|
+
self,
|
|
553
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]],
|
|
554
|
+
prompts: list[Union[TokensPrompt, TextPrompt]],
|
|
555
|
+
) -> list[Optional[LoRARequest]]:
|
|
556
|
+
"""Get the optional lora request corresponding to each prompt."""
|
|
557
|
+
if isinstance(lora_request,
|
|
558
|
+
Sequence) and len(lora_request) != len(prompts):
|
|
559
|
+
raise ValueError(
|
|
560
|
+
"Lora request list should be the same length as the prompts")
|
|
561
|
+
|
|
562
|
+
if lora_request is None or isinstance(lora_request, LoRARequest):
|
|
563
|
+
return [lora_request] * len(prompts)
|
|
564
|
+
|
|
565
|
+
raise TypeError(f"Invalid lora_request type {type(lora_request)}")
|
|
566
|
+
|
|
567
|
+
def beam_search(
|
|
568
|
+
self,
|
|
569
|
+
prompts: list[Union[TokensPrompt, TextPrompt]],
|
|
570
|
+
params: BeamSearchParams,
|
|
571
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
572
|
+
use_tqdm: bool = False,
|
|
573
|
+
) -> list[BeamSearchOutput]:
|
|
574
|
+
"""
|
|
575
|
+
Generate sequences using beam search.
|
|
576
|
+
|
|
577
|
+
Args:
|
|
578
|
+
prompts: A list of prompts. Each prompt can be a string or a list
|
|
579
|
+
of token IDs.
|
|
580
|
+
params: The beam search parameters.
|
|
581
|
+
lora_request: LoRA request to use for generation, if any.
|
|
582
|
+
use_tqdm: Whether to use tqdm to display the progress bar.
|
|
583
|
+
"""
|
|
584
|
+
# TODO: how does beam search work together with length penalty,
|
|
585
|
+
# frequency, penalty, and stopping criteria, etc.?
|
|
586
|
+
beam_width = params.beam_width
|
|
587
|
+
max_tokens = params.max_tokens
|
|
588
|
+
temperature = params.temperature
|
|
589
|
+
ignore_eos = params.ignore_eos
|
|
590
|
+
length_penalty = params.length_penalty
|
|
591
|
+
|
|
592
|
+
lora_requests = self._get_beam_search_lora_requests(
|
|
593
|
+
lora_request, prompts)
|
|
594
|
+
|
|
595
|
+
tokenizer = self.get_tokenizer()
|
|
596
|
+
sort_beams_key = create_sort_beams_key_function(
|
|
597
|
+
tokenizer.eos_token_id,
|
|
598
|
+
length_penalty,
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
def create_tokens_prompt_from_beam(
|
|
602
|
+
beam: BeamSearchSequence) -> TokensPrompt:
|
|
603
|
+
token_prompt_kwargs: TokensPrompt = {
|
|
604
|
+
"prompt_token_ids": beam.tokens
|
|
605
|
+
}
|
|
606
|
+
if beam.multi_modal_data is not None:
|
|
607
|
+
token_prompt_kwargs["multi_modal_data"] = beam.multi_modal_data
|
|
608
|
+
|
|
609
|
+
if beam.mm_processor_kwargs is not None:
|
|
610
|
+
token_prompt_kwargs[
|
|
611
|
+
"mm_processor_kwargs"] = beam.mm_processor_kwargs
|
|
612
|
+
return TokensPrompt(**token_prompt_kwargs)
|
|
613
|
+
|
|
614
|
+
# generate 2 * beam_width candidates at each step
|
|
615
|
+
# following the huggingface transformers implementation
|
|
616
|
+
# at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa
|
|
617
|
+
beam_search_params = SamplingParams(logprobs=2 * beam_width,
|
|
618
|
+
max_tokens=1,
|
|
619
|
+
temperature=temperature)
|
|
620
|
+
instances: list[BeamSearchInstance] = []
|
|
621
|
+
|
|
622
|
+
for lora_req, prompt in zip(lora_requests, prompts):
|
|
623
|
+
# Add multimodal processor kwargs & data
|
|
624
|
+
mm_kwargs = {}
|
|
625
|
+
if "multi_modal_data" in prompt:
|
|
626
|
+
mm_kwargs["multi_modal_data"] = prompt["multi_modal_data"]
|
|
627
|
+
if "mm_processor_kwargs" in prompt:
|
|
628
|
+
mm_kwargs["mm_processor_kwargs"] = prompt[
|
|
629
|
+
"mm_processor_kwargs"]
|
|
630
|
+
|
|
631
|
+
if "prompt_token_ids" in prompt:
|
|
632
|
+
prompt = cast(TokensPrompt, prompt) # Needed for mypy
|
|
633
|
+
prompt_tokens = prompt["prompt_token_ids"]
|
|
634
|
+
else:
|
|
635
|
+
prompt_tokens = tokenizer.encode(prompt["prompt"])
|
|
636
|
+
|
|
637
|
+
instances.append(
|
|
638
|
+
BeamSearchInstance(
|
|
639
|
+
prompt_tokens,
|
|
640
|
+
lora_request=lora_req,
|
|
641
|
+
logprobs=None,
|
|
642
|
+
**mm_kwargs,
|
|
643
|
+
), )
|
|
644
|
+
|
|
645
|
+
token_iter = range(max_tokens)
|
|
646
|
+
if use_tqdm:
|
|
647
|
+
token_iter = tqdm(token_iter,
|
|
648
|
+
desc="Beam search",
|
|
649
|
+
unit="token",
|
|
650
|
+
unit_scale=False)
|
|
651
|
+
logger.warning(
|
|
652
|
+
"The progress bar shows the upper bound on token steps and "
|
|
653
|
+
"may finish early due to stopping conditions. It does not "
|
|
654
|
+
"reflect instance-level progress.")
|
|
655
|
+
|
|
656
|
+
for _ in token_iter:
|
|
657
|
+
all_beams: list[BeamSearchSequence] = list(
|
|
658
|
+
sum((instance.beams for instance in instances), []))
|
|
659
|
+
pos = [0] + list(
|
|
660
|
+
itertools.accumulate(
|
|
661
|
+
len(instance.beams) for instance in instances))
|
|
662
|
+
instance_start_and_end: list[tuple[int, int]] = list(
|
|
663
|
+
zip(pos[:-1], pos[1:]))
|
|
664
|
+
|
|
665
|
+
if len(all_beams) == 0:
|
|
666
|
+
break
|
|
667
|
+
|
|
668
|
+
# create the corresponding batch entries for prompt & optional lora
|
|
669
|
+
prompts_batch, lora_req_batch = zip(
|
|
670
|
+
*[(create_tokens_prompt_from_beam(beam), beam.lora_request)
|
|
671
|
+
for beam in all_beams])
|
|
672
|
+
|
|
673
|
+
# only runs for one step
|
|
674
|
+
# we don't need to use tqdm here
|
|
675
|
+
output = self.generate(prompts_batch,
|
|
676
|
+
sampling_params=beam_search_params,
|
|
677
|
+
use_tqdm=False,
|
|
678
|
+
lora_request=lora_req_batch)
|
|
679
|
+
|
|
680
|
+
for (start, end), instance in zip(instance_start_and_end,
|
|
681
|
+
instances):
|
|
682
|
+
instance_new_beams = []
|
|
683
|
+
for i in range(start, end):
|
|
684
|
+
current_beam = all_beams[i]
|
|
685
|
+
result = output[i]
|
|
686
|
+
|
|
687
|
+
if result.outputs[0].logprobs is not None:
|
|
688
|
+
# if `result.outputs[0].logprobs` is None, it means
|
|
689
|
+
# the sequence is completed because of the max-model-len
|
|
690
|
+
# or abortion. we don't need to add it to the new beams.
|
|
691
|
+
logprobs = result.outputs[0].logprobs[0]
|
|
692
|
+
for token_id, logprob_obj in logprobs.items():
|
|
693
|
+
new_beam = BeamSearchSequence(
|
|
694
|
+
tokens=current_beam.tokens + [token_id],
|
|
695
|
+
logprobs=current_beam.logprobs + [logprobs],
|
|
696
|
+
lora_request=current_beam.lora_request,
|
|
697
|
+
cum_logprob=current_beam.cum_logprob +
|
|
698
|
+
logprob_obj.logprob,
|
|
699
|
+
multi_modal_data=current_beam.multi_modal_data,
|
|
700
|
+
mm_processor_kwargs=current_beam.
|
|
701
|
+
mm_processor_kwargs)
|
|
702
|
+
|
|
703
|
+
if token_id == tokenizer.eos_token_id and \
|
|
704
|
+
not ignore_eos:
|
|
705
|
+
instance.completed.append(new_beam)
|
|
706
|
+
else:
|
|
707
|
+
instance_new_beams.append(new_beam)
|
|
708
|
+
sorted_beams = sorted(instance_new_beams,
|
|
709
|
+
key=sort_beams_key,
|
|
710
|
+
reverse=True)
|
|
711
|
+
instance.beams = sorted_beams[:beam_width]
|
|
712
|
+
|
|
713
|
+
outputs = []
|
|
714
|
+
for instance in instances:
|
|
715
|
+
instance.completed.extend(instance.beams)
|
|
716
|
+
sorted_completed = sorted(instance.completed,
|
|
717
|
+
key=sort_beams_key,
|
|
718
|
+
reverse=True)
|
|
719
|
+
best_beams = sorted_completed[:beam_width]
|
|
720
|
+
|
|
721
|
+
for beam in best_beams:
|
|
722
|
+
beam.text = tokenizer.decode(beam.tokens)
|
|
723
|
+
outputs.append(BeamSearchOutput(sequences=best_beams))
|
|
724
|
+
|
|
725
|
+
return outputs
|
|
726
|
+
|
|
727
|
+
def chat(
|
|
728
|
+
self,
|
|
729
|
+
messages: Union[list[ChatCompletionMessageParam],
|
|
730
|
+
list[list[ChatCompletionMessageParam]]],
|
|
731
|
+
sampling_params: Optional[Union[SamplingParams,
|
|
732
|
+
list[SamplingParams]]] = None,
|
|
733
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
734
|
+
lora_request: Optional[LoRARequest] = None,
|
|
735
|
+
chat_template: Optional[str] = None,
|
|
736
|
+
chat_template_content_format: ChatTemplateContentFormatOption = "auto",
|
|
737
|
+
add_generation_prompt: bool = True,
|
|
738
|
+
continue_final_message: bool = False,
|
|
739
|
+
tools: Optional[list[dict[str, Any]]] = None,
|
|
740
|
+
chat_template_kwargs: Optional[dict[str, Any]] = None,
|
|
741
|
+
mm_processor_kwargs: Optional[dict[str, Any]] = None,
|
|
742
|
+
) -> list[RequestOutput]:
|
|
743
|
+
"""
|
|
744
|
+
Generate responses for a chat conversation.
|
|
745
|
+
|
|
746
|
+
The chat conversation is converted into a text prompt using the
|
|
747
|
+
tokenizer and calls the [generate][] method to generate the
|
|
748
|
+
responses.
|
|
749
|
+
|
|
750
|
+
Multi-modal inputs can be passed in the same way you would pass them
|
|
751
|
+
to the OpenAI API.
|
|
752
|
+
|
|
753
|
+
Args:
|
|
754
|
+
messages: A list of conversations or a single conversation.
|
|
755
|
+
|
|
756
|
+
- Each conversation is represented as a list of messages.
|
|
757
|
+
- Each message is a dictionary with 'role' and 'content' keys.
|
|
758
|
+
|
|
759
|
+
sampling_params: The sampling parameters for text generation.
|
|
760
|
+
If None, we use the default sampling parameters. When it
|
|
761
|
+
is a single value, it is applied to every prompt. When it
|
|
762
|
+
is a list, the list must have the same length as the
|
|
763
|
+
prompts and it is paired one by one with the prompt.
|
|
764
|
+
use_tqdm: If `True`, shows a tqdm progress bar.
|
|
765
|
+
If a callable (e.g., `functools.partial(tqdm, leave=False)`),
|
|
766
|
+
it is used to create the progress bar.
|
|
767
|
+
If `False`, no progress bar is created.
|
|
768
|
+
lora_request: LoRA request to use for generation, if any.
|
|
769
|
+
chat_template: The template to use for structuring the chat.
|
|
770
|
+
If not provided, the model's default chat template will be used.
|
|
771
|
+
chat_template_content_format: The format to render message content.
|
|
772
|
+
|
|
773
|
+
- "string" will render the content as a string.
|
|
774
|
+
Example: `"Who are you?"`
|
|
775
|
+
- "openai" will render the content as a list of dictionaries,
|
|
776
|
+
similar to OpenAI schema.
|
|
777
|
+
Example: `[{"type": "text", "text": "Who are you?"}]`
|
|
778
|
+
|
|
779
|
+
add_generation_prompt: If True, adds a generation template
|
|
780
|
+
to each message.
|
|
781
|
+
continue_final_message: If True, continues the final message in
|
|
782
|
+
the conversation instead of starting a new one. Cannot be
|
|
783
|
+
`True` if `add_generation_prompt` is also `True`.
|
|
784
|
+
chat_template_kwargs: Additional kwargs to pass to the chat
|
|
785
|
+
template.
|
|
786
|
+
mm_processor_kwargs: Multimodal processor kwarg overrides for this
|
|
787
|
+
chat request. Only used for offline requests.
|
|
788
|
+
|
|
789
|
+
Returns:
|
|
790
|
+
A list of `RequestOutput` objects containing the generated
|
|
791
|
+
responses in the same order as the input messages.
|
|
792
|
+
"""
|
|
793
|
+
list_of_messages: list[list[ChatCompletionMessageParam]]
|
|
794
|
+
|
|
795
|
+
# Handle multi and single conversations
|
|
796
|
+
if is_list_of(messages, list):
|
|
797
|
+
# messages is list[list[...]]
|
|
798
|
+
list_of_messages = cast(list[list[ChatCompletionMessageParam]],
|
|
799
|
+
messages)
|
|
800
|
+
else:
|
|
801
|
+
# messages is list[...]
|
|
802
|
+
list_of_messages = [
|
|
803
|
+
cast(list[ChatCompletionMessageParam], messages)
|
|
804
|
+
]
|
|
805
|
+
|
|
806
|
+
tokenizer = self.get_tokenizer(lora_request)
|
|
807
|
+
model_config = self.llm_engine.get_model_config()
|
|
808
|
+
resolved_content_format = resolve_chat_template_content_format(
|
|
809
|
+
chat_template,
|
|
810
|
+
tools,
|
|
811
|
+
chat_template_content_format,
|
|
812
|
+
tokenizer,
|
|
813
|
+
model_config=model_config,
|
|
814
|
+
)
|
|
815
|
+
|
|
816
|
+
_chat_template_kwargs: dict[str, Any] = dict(
|
|
817
|
+
chat_template=chat_template,
|
|
818
|
+
add_generation_prompt=add_generation_prompt,
|
|
819
|
+
continue_final_message=continue_final_message,
|
|
820
|
+
tools=tools,
|
|
821
|
+
)
|
|
822
|
+
_chat_template_kwargs.update(chat_template_kwargs or {})
|
|
823
|
+
|
|
824
|
+
prompts: list[Union[TokensPrompt, TextPrompt]] = []
|
|
825
|
+
|
|
826
|
+
for msgs in list_of_messages:
|
|
827
|
+
# NOTE: _parse_chat_message_content_parts() currently doesn't
|
|
828
|
+
# handle mm_processor_kwargs, since there is no implementation in
|
|
829
|
+
# the chat message parsing for it.
|
|
830
|
+
conversation, mm_data = parse_chat_messages(
|
|
831
|
+
msgs,
|
|
832
|
+
model_config,
|
|
833
|
+
tokenizer,
|
|
834
|
+
content_format=resolved_content_format,
|
|
835
|
+
)
|
|
836
|
+
|
|
837
|
+
if isinstance(tokenizer, MistralTokenizer):
|
|
838
|
+
prompt_token_ids = apply_mistral_chat_template(
|
|
839
|
+
tokenizer,
|
|
840
|
+
messages=msgs,
|
|
841
|
+
**_chat_template_kwargs,
|
|
842
|
+
)
|
|
843
|
+
else:
|
|
844
|
+
prompt_str = apply_hf_chat_template(
|
|
845
|
+
tokenizer=tokenizer,
|
|
846
|
+
conversation=conversation,
|
|
847
|
+
model_config=model_config,
|
|
848
|
+
**_chat_template_kwargs,
|
|
849
|
+
)
|
|
850
|
+
# Special tokens are already included in chat templates so
|
|
851
|
+
# should not be added by the tokenizer in this case.
|
|
852
|
+
prompt_token_ids = tokenizer.encode(prompt_str,
|
|
853
|
+
add_special_tokens=False)
|
|
854
|
+
|
|
855
|
+
prompt = TokensPrompt(prompt_token_ids=prompt_token_ids)
|
|
856
|
+
|
|
857
|
+
if mm_data is not None:
|
|
858
|
+
prompt["multi_modal_data"] = mm_data
|
|
859
|
+
|
|
860
|
+
if mm_processor_kwargs is not None:
|
|
861
|
+
prompt["mm_processor_kwargs"] = mm_processor_kwargs
|
|
862
|
+
|
|
863
|
+
prompts.append(prompt)
|
|
864
|
+
|
|
865
|
+
return self.generate(
|
|
866
|
+
prompts,
|
|
867
|
+
sampling_params=sampling_params,
|
|
868
|
+
use_tqdm=use_tqdm,
|
|
869
|
+
lora_request=lora_request,
|
|
870
|
+
)
|
|
871
|
+
|
|
872
|
+
@overload
|
|
873
|
+
def encode(
|
|
874
|
+
self,
|
|
875
|
+
prompts: Union[PromptType, Sequence[PromptType]],
|
|
876
|
+
/,
|
|
877
|
+
pooling_params: Optional[Union[PoolingParams,
|
|
878
|
+
Sequence[PoolingParams]]] = None,
|
|
879
|
+
*,
|
|
880
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
881
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
882
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
883
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
884
|
+
) -> list[PoolingRequestOutput]:
|
|
885
|
+
...
|
|
886
|
+
|
|
887
|
+
@overload # LEGACY: single (prompt + optional token ids)
|
|
888
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
889
|
+
def encode(
|
|
890
|
+
self,
|
|
891
|
+
prompts: str,
|
|
892
|
+
pooling_params: Optional[Union[PoolingParams,
|
|
893
|
+
Sequence[PoolingParams]]] = None,
|
|
894
|
+
prompt_token_ids: Optional[list[int]] = None,
|
|
895
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
896
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
897
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
898
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
899
|
+
) -> list[PoolingRequestOutput]:
|
|
900
|
+
...
|
|
901
|
+
|
|
902
|
+
@overload # LEGACY: multi (prompt + optional token ids)
|
|
903
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
904
|
+
def encode(
|
|
905
|
+
self,
|
|
906
|
+
prompts: list[str],
|
|
907
|
+
pooling_params: Optional[Union[PoolingParams,
|
|
908
|
+
Sequence[PoolingParams]]] = None,
|
|
909
|
+
prompt_token_ids: Optional[list[list[int]]] = None,
|
|
910
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
911
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
912
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
913
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
914
|
+
) -> list[PoolingRequestOutput]:
|
|
915
|
+
...
|
|
916
|
+
|
|
917
|
+
@overload # LEGACY: single (token ids + optional prompt)
|
|
918
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
919
|
+
def encode(
|
|
920
|
+
self,
|
|
921
|
+
prompts: Optional[str] = None,
|
|
922
|
+
pooling_params: Optional[Union[PoolingParams,
|
|
923
|
+
Sequence[PoolingParams]]] = None,
|
|
924
|
+
*,
|
|
925
|
+
prompt_token_ids: list[int],
|
|
926
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
927
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
928
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
929
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
930
|
+
) -> list[PoolingRequestOutput]:
|
|
931
|
+
...
|
|
932
|
+
|
|
933
|
+
@overload # LEGACY: multi (token ids + optional prompt)
|
|
934
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
935
|
+
def encode(
|
|
936
|
+
self,
|
|
937
|
+
prompts: Optional[list[str]] = None,
|
|
938
|
+
pooling_params: Optional[Union[PoolingParams,
|
|
939
|
+
Sequence[PoolingParams]]] = None,
|
|
940
|
+
*,
|
|
941
|
+
prompt_token_ids: list[list[int]],
|
|
942
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
943
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
944
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
945
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
946
|
+
) -> list[PoolingRequestOutput]:
|
|
947
|
+
...
|
|
948
|
+
|
|
949
|
+
@overload # LEGACY: single or multi token ids [pos-only]
|
|
950
|
+
@deprecated("'prompt_token_ids' will become part of 'prompts'")
|
|
951
|
+
def encode(
|
|
952
|
+
self,
|
|
953
|
+
prompts: None,
|
|
954
|
+
pooling_params: None,
|
|
955
|
+
prompt_token_ids: Union[list[int], list[list[int]]],
|
|
956
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
957
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
958
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
959
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
960
|
+
) -> list[PoolingRequestOutput]:
|
|
961
|
+
...
|
|
962
|
+
|
|
963
|
+
@deprecate_kwargs(
|
|
964
|
+
"prompt_token_ids",
|
|
965
|
+
is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
|
|
966
|
+
additional_message="Please use the 'prompts' parameter instead.",
|
|
967
|
+
)
|
|
968
|
+
def encode(
|
|
969
|
+
self,
|
|
970
|
+
prompts: Union[Union[PromptType, Sequence[PromptType]],
|
|
971
|
+
Optional[Union[str, list[str]]]] = None,
|
|
972
|
+
pooling_params: Optional[Union[PoolingParams,
|
|
973
|
+
Sequence[PoolingParams]]] = None,
|
|
974
|
+
prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
|
|
975
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
976
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
977
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
978
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
979
|
+
) -> list[PoolingRequestOutput]:
|
|
980
|
+
"""Apply pooling to the hidden states corresponding to the input
|
|
981
|
+
prompts.
|
|
982
|
+
|
|
983
|
+
This class automatically batches the given prompts, considering
|
|
984
|
+
the memory constraint. For the best performance, put all of your prompts
|
|
985
|
+
into a single list and pass it to this method.
|
|
986
|
+
|
|
987
|
+
Args:
|
|
988
|
+
prompts: The prompts to the LLM. You may pass a sequence of prompts
|
|
989
|
+
for batch inference. See [PromptType][vllm.inputs.PromptType]
|
|
990
|
+
for more details about the format of each prompts.
|
|
991
|
+
pooling_params: The pooling parameters for pooling. If None, we
|
|
992
|
+
use the default pooling parameters.
|
|
993
|
+
use_tqdm: If `True`, shows a tqdm progress bar.
|
|
994
|
+
If a callable (e.g., `functools.partial(tqdm, leave=False)`),
|
|
995
|
+
it is used to create the progress bar.
|
|
996
|
+
If `False`, no progress bar is created.
|
|
997
|
+
lora_request: LoRA request to use for generation, if any.
|
|
998
|
+
prompt_adapter_request: Prompt Adapter request to use for
|
|
999
|
+
generation, if any.
|
|
1000
|
+
|
|
1001
|
+
Returns:
|
|
1002
|
+
A list of `PoolingRequestOutput` objects containing the
|
|
1003
|
+
pooled hidden states in the same order as the input prompts.
|
|
1004
|
+
|
|
1005
|
+
Note:
|
|
1006
|
+
Using `prompts` and `prompt_token_ids` as keyword parameters is
|
|
1007
|
+
considered legacy and may be deprecated in the future. You should
|
|
1008
|
+
instead pass them via the `inputs` parameter.
|
|
1009
|
+
"""
|
|
1010
|
+
runner_type = self.llm_engine.model_config.runner_type
|
|
1011
|
+
if runner_type != "pooling":
|
|
1012
|
+
messages = ["LLM.encode() is only supported for pooling models."]
|
|
1013
|
+
|
|
1014
|
+
supported_runner_types = self.llm_engine.model_config \
|
|
1015
|
+
.supported_runner_types
|
|
1016
|
+
if "pooling" in supported_runner_types:
|
|
1017
|
+
messages.append(
|
|
1018
|
+
"Your model supports the 'pooling' runner, but is "
|
|
1019
|
+
f"currently initialized for the '{runner_type}' runner. "
|
|
1020
|
+
"Please initialize vLLM using `--task embed`, "
|
|
1021
|
+
"`--task classify`, `--task score` etc.")
|
|
1022
|
+
|
|
1023
|
+
raise ValueError(" ".join(messages))
|
|
1024
|
+
|
|
1025
|
+
if prompt_token_ids is not None:
|
|
1026
|
+
parsed_prompts = self._convert_v1_inputs(
|
|
1027
|
+
prompts=cast(Optional[Union[str, list[str]]], prompts),
|
|
1028
|
+
prompt_token_ids=prompt_token_ids,
|
|
1029
|
+
)
|
|
1030
|
+
else:
|
|
1031
|
+
parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
|
|
1032
|
+
prompts)
|
|
1033
|
+
|
|
1034
|
+
if pooling_params is None:
|
|
1035
|
+
# Use default pooling params.
|
|
1036
|
+
pooling_params = PoolingParams()
|
|
1037
|
+
elif isinstance(pooling_params, PoolingParams):
|
|
1038
|
+
pooling_params.verify(self.llm_engine.model_config)
|
|
1039
|
+
else:
|
|
1040
|
+
for pooling_param in pooling_params:
|
|
1041
|
+
pooling_param.verify(self.llm_engine.model_config)
|
|
1042
|
+
|
|
1043
|
+
tokenization_kwargs: dict[str, Any] = {}
|
|
1044
|
+
_validate_truncation_size(self.llm_engine.model_config.max_model_len,
|
|
1045
|
+
truncate_prompt_tokens, tokenization_kwargs)
|
|
1046
|
+
|
|
1047
|
+
self._validate_and_add_requests(
|
|
1048
|
+
prompts=parsed_prompts,
|
|
1049
|
+
params=pooling_params,
|
|
1050
|
+
use_tqdm=use_tqdm,
|
|
1051
|
+
lora_request=lora_request,
|
|
1052
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1053
|
+
prompt_adapter_request=prompt_adapter_request,
|
|
1054
|
+
)
|
|
1055
|
+
|
|
1056
|
+
outputs = self._run_engine(use_tqdm=use_tqdm)
|
|
1057
|
+
return self.engine_class.validate_outputs(outputs,
|
|
1058
|
+
PoolingRequestOutput)
|
|
1059
|
+
|
|
1060
|
+
def embed(
|
|
1061
|
+
self,
|
|
1062
|
+
prompts: Union[PromptType, Sequence[PromptType]],
|
|
1063
|
+
/,
|
|
1064
|
+
*,
|
|
1065
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
1066
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
1067
|
+
pooling_params: Optional[Union[PoolingParams,
|
|
1068
|
+
Sequence[PoolingParams]]] = None,
|
|
1069
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
1070
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
1071
|
+
) -> list[EmbeddingRequestOutput]:
|
|
1072
|
+
"""
|
|
1073
|
+
Generate an embedding vector for each prompt.
|
|
1074
|
+
|
|
1075
|
+
This class automatically batches the given prompts, considering
|
|
1076
|
+
the memory constraint. For the best performance, put all of your prompts
|
|
1077
|
+
into a single list and pass it to this method.
|
|
1078
|
+
|
|
1079
|
+
Args:
|
|
1080
|
+
prompts: The prompts to the LLM. You may pass a sequence of prompts
|
|
1081
|
+
for batch inference. See [PromptType][vllm.inputs.PromptType]
|
|
1082
|
+
for more details about the format of each prompts.
|
|
1083
|
+
pooling_params: The pooling parameters for pooling. If None, we
|
|
1084
|
+
use the default pooling parameters.
|
|
1085
|
+
use_tqdm: If `True`, shows a tqdm progress bar.
|
|
1086
|
+
If a callable (e.g., `functools.partial(tqdm, leave=False)`),
|
|
1087
|
+
it is used to create the progress bar.
|
|
1088
|
+
If `False`, no progress bar is created.
|
|
1089
|
+
lora_request: LoRA request to use for generation, if any.
|
|
1090
|
+
prompt_adapter_request: Prompt Adapter request to use for
|
|
1091
|
+
generation, if any.
|
|
1092
|
+
|
|
1093
|
+
Returns:
|
|
1094
|
+
A list of `EmbeddingRequestOutput` objects containing the
|
|
1095
|
+
embedding vectors in the same order as the input prompts.
|
|
1096
|
+
"""
|
|
1097
|
+
if self.llm_engine.model_config.task != "embed":
|
|
1098
|
+
raise ValueError(
|
|
1099
|
+
"Embedding API is only enabled for `--task embed`")
|
|
1100
|
+
|
|
1101
|
+
items = self.encode(prompts,
|
|
1102
|
+
truncate_prompt_tokens=truncate_prompt_tokens,
|
|
1103
|
+
use_tqdm=use_tqdm,
|
|
1104
|
+
pooling_params=pooling_params,
|
|
1105
|
+
lora_request=lora_request,
|
|
1106
|
+
prompt_adapter_request=prompt_adapter_request)
|
|
1107
|
+
|
|
1108
|
+
return [EmbeddingRequestOutput.from_base(item) for item in items]
|
|
1109
|
+
|
|
1110
|
+
def classify(
|
|
1111
|
+
self,
|
|
1112
|
+
prompts: Union[PromptType, Sequence[PromptType]],
|
|
1113
|
+
/,
|
|
1114
|
+
*,
|
|
1115
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
1116
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
1117
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
1118
|
+
) -> list[ClassificationRequestOutput]:
|
|
1119
|
+
"""
|
|
1120
|
+
Generate class logits for each prompt.
|
|
1121
|
+
|
|
1122
|
+
This class automatically batches the given prompts, considering
|
|
1123
|
+
the memory constraint. For the best performance, put all of your prompts
|
|
1124
|
+
into a single list and pass it to this method.
|
|
1125
|
+
|
|
1126
|
+
Args:
|
|
1127
|
+
prompts: The prompts to the LLM. You may pass a sequence of prompts
|
|
1128
|
+
for batch inference. See [PromptType][vllm.inputs.PromptType]
|
|
1129
|
+
for more details about the format of each prompts.
|
|
1130
|
+
use_tqdm: If `True`, shows a tqdm progress bar.
|
|
1131
|
+
If a callable (e.g., `functools.partial(tqdm, leave=False)`),
|
|
1132
|
+
it is used to create the progress bar.
|
|
1133
|
+
If `False`, no progress bar is created.
|
|
1134
|
+
lora_request: LoRA request to use for generation, if any.
|
|
1135
|
+
prompt_adapter_request: Prompt Adapter request to use for
|
|
1136
|
+
generation, if any.
|
|
1137
|
+
|
|
1138
|
+
Returns:
|
|
1139
|
+
A list of `ClassificationRequestOutput` objects containing the
|
|
1140
|
+
embedding vectors in the same order as the input prompts.
|
|
1141
|
+
"""
|
|
1142
|
+
if self.llm_engine.model_config.task != "classify":
|
|
1143
|
+
raise ValueError(
|
|
1144
|
+
"Classification API is only enabled for `--task classify`")
|
|
1145
|
+
|
|
1146
|
+
items = self.encode(prompts,
|
|
1147
|
+
use_tqdm=use_tqdm,
|
|
1148
|
+
lora_request=lora_request,
|
|
1149
|
+
prompt_adapter_request=prompt_adapter_request)
|
|
1150
|
+
|
|
1151
|
+
return [ClassificationRequestOutput.from_base(item) for item in items]
|
|
1152
|
+
|
|
1153
|
+
def _embedding_score(
|
|
1154
|
+
self,
|
|
1155
|
+
tokenizer: AnyTokenizer,
|
|
1156
|
+
text_1: list[Union[str, TextPrompt, TokensPrompt]],
|
|
1157
|
+
text_2: list[Union[str, TextPrompt, TokensPrompt]],
|
|
1158
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
1159
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
1160
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
1161
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
1162
|
+
) -> list[ScoringRequestOutput]:
|
|
1163
|
+
|
|
1164
|
+
encoded_output: list[PoolingRequestOutput] = self.encode(
|
|
1165
|
+
text_1 + text_2,
|
|
1166
|
+
truncate_prompt_tokens=truncate_prompt_tokens,
|
|
1167
|
+
use_tqdm=use_tqdm,
|
|
1168
|
+
lora_request=lora_request,
|
|
1169
|
+
prompt_adapter_request=prompt_adapter_request)
|
|
1170
|
+
|
|
1171
|
+
encoded_output_1: list[PoolingRequestOutput] = encoded_output[
|
|
1172
|
+
0:len(text_1)]
|
|
1173
|
+
encoded_output_2: list[PoolingRequestOutput] = encoded_output[
|
|
1174
|
+
len(text_1):]
|
|
1175
|
+
|
|
1176
|
+
if len(encoded_output_1) == 1:
|
|
1177
|
+
encoded_output_1 = encoded_output_1 * len(encoded_output_2)
|
|
1178
|
+
|
|
1179
|
+
scores = _cosine_similarity(tokenizer=tokenizer,
|
|
1180
|
+
embed_1=encoded_output_1,
|
|
1181
|
+
embed_2=encoded_output_2)
|
|
1182
|
+
|
|
1183
|
+
items = self.engine_class.validate_outputs(scores,
|
|
1184
|
+
PoolingRequestOutput)
|
|
1185
|
+
return [ScoringRequestOutput.from_base(item) for item in items]
|
|
1186
|
+
|
|
1187
|
+
def _cross_encoding_score(
|
|
1188
|
+
self,
|
|
1189
|
+
tokenizer: AnyTokenizer,
|
|
1190
|
+
text_1: list[str],
|
|
1191
|
+
text_2: list[str],
|
|
1192
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
1193
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
1194
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
1195
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
1196
|
+
) -> list[ScoringRequestOutput]:
|
|
1197
|
+
|
|
1198
|
+
if isinstance(tokenizer, MistralTokenizer):
|
|
1199
|
+
raise ValueError(
|
|
1200
|
+
"Score API is only enabled for `--task embed or score`")
|
|
1201
|
+
|
|
1202
|
+
if len(text_1) == 1:
|
|
1203
|
+
text_1 = text_1 * len(text_2)
|
|
1204
|
+
|
|
1205
|
+
input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)]
|
|
1206
|
+
|
|
1207
|
+
pooling_params = PoolingParams(use_cross_encoder=True)
|
|
1208
|
+
|
|
1209
|
+
tokenization_kwargs: dict[str, Any] = {}
|
|
1210
|
+
_validate_truncation_size(self.llm_engine.model_config.max_model_len,
|
|
1211
|
+
truncate_prompt_tokens, tokenization_kwargs)
|
|
1212
|
+
|
|
1213
|
+
parsed_prompts = []
|
|
1214
|
+
|
|
1215
|
+
for q, t in input_pairs:
|
|
1216
|
+
prompt_inputs = tokenizer(text=q,
|
|
1217
|
+
text_pair=t,
|
|
1218
|
+
**tokenization_kwargs)
|
|
1219
|
+
engine_prompt = TokensPrompt(
|
|
1220
|
+
prompt_token_ids=prompt_inputs["input_ids"],
|
|
1221
|
+
token_type_ids=prompt_inputs.get("token_type_ids"))
|
|
1222
|
+
parsed_prompts.append(engine_prompt)
|
|
1223
|
+
|
|
1224
|
+
self._validate_and_add_requests(
|
|
1225
|
+
prompts=parsed_prompts,
|
|
1226
|
+
params=pooling_params,
|
|
1227
|
+
use_tqdm=use_tqdm,
|
|
1228
|
+
lora_request=lora_request,
|
|
1229
|
+
prompt_adapter_request=prompt_adapter_request,
|
|
1230
|
+
)
|
|
1231
|
+
|
|
1232
|
+
outputs = self._run_engine(use_tqdm=use_tqdm)
|
|
1233
|
+
items = self.engine_class.validate_outputs(outputs,
|
|
1234
|
+
PoolingRequestOutput)
|
|
1235
|
+
|
|
1236
|
+
return [ScoringRequestOutput.from_base(item) for item in items]
|
|
1237
|
+
|
|
1238
|
+
def score(
|
|
1239
|
+
self,
|
|
1240
|
+
text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]],
|
|
1241
|
+
text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]],
|
|
1242
|
+
/,
|
|
1243
|
+
*,
|
|
1244
|
+
truncate_prompt_tokens: Optional[int] = None,
|
|
1245
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
1246
|
+
lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
|
|
1247
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
1248
|
+
) -> list[ScoringRequestOutput]:
|
|
1249
|
+
"""Generate similarity scores for all pairs `<text,text_pair>`.
|
|
1250
|
+
|
|
1251
|
+
The inputs can be `1 -> 1`, `1 -> N` or `N -> N`.
|
|
1252
|
+
In the `1 - N` case the `text_1` sentence will be replicated `N`
|
|
1253
|
+
times to pair with the `text_2` sentences.
|
|
1254
|
+
The input pairs are used to build a list of prompts for the
|
|
1255
|
+
cross encoder model. This class automatically batches the prompts,
|
|
1256
|
+
considering the memory constraint. For the best performance, put all
|
|
1257
|
+
of your texts into a single list and pass it to this method.
|
|
1258
|
+
|
|
1259
|
+
Args:
|
|
1260
|
+
text_1: can be a single prompt or a list of prompts, in which
|
|
1261
|
+
case it has to have the same length as the `text_2` list
|
|
1262
|
+
text_2: The texts to pair with the query to form the input
|
|
1263
|
+
to the LLM. See [PromptType][vllm.inputs.PromptType] for
|
|
1264
|
+
more details about the format of each prompts.
|
|
1265
|
+
use_tqdm: If `True`, shows a tqdm progress bar.
|
|
1266
|
+
If a callable (e.g., `functools.partial(tqdm, leave=False)`),
|
|
1267
|
+
it is used to create the progress bar.
|
|
1268
|
+
If `False`, no progress bar is created.
|
|
1269
|
+
lora_request: LoRA request to use for generation, if any.
|
|
1270
|
+
prompt_adapter_request: Prompt Adapter request to use for
|
|
1271
|
+
generation, if any.
|
|
1272
|
+
|
|
1273
|
+
Returns:
|
|
1274
|
+
A list of `ScoringRequestOutput` objects containing the
|
|
1275
|
+
generated scores in the same order as the input prompts.
|
|
1276
|
+
"""
|
|
1277
|
+
runner_type = self.llm_engine.model_config.runner_type
|
|
1278
|
+
if runner_type != "pooling":
|
|
1279
|
+
messages = ["LLM.score() is only supported for pooling models."]
|
|
1280
|
+
|
|
1281
|
+
supported_runner_types = self.llm_engine.model_config \
|
|
1282
|
+
.supported_runner_types
|
|
1283
|
+
if "pooling" in supported_runner_types:
|
|
1284
|
+
messages.append(
|
|
1285
|
+
"Your model supports the 'pooling' runner, but is "
|
|
1286
|
+
f"currently initialized for the '{runner_type}' runner. "
|
|
1287
|
+
"Please initialize vLLM using `--task embed`, "
|
|
1288
|
+
"`--task classify`, `--task score` etc.")
|
|
1289
|
+
|
|
1290
|
+
raise ValueError(" ".join(messages))
|
|
1291
|
+
|
|
1292
|
+
if self.llm_engine.model_config.task not in ("embed", "classify"):
|
|
1293
|
+
raise ValueError("Score API is only enabled for "
|
|
1294
|
+
"`--task embed or --task classify`.")
|
|
1295
|
+
|
|
1296
|
+
if (self.llm_engine.model_config.task == "classify"
|
|
1297
|
+
and self.llm_engine.model_config.hf_config.num_labels != 1):
|
|
1298
|
+
raise ValueError("Score API is only enabled for num_labels == 1.")
|
|
1299
|
+
|
|
1300
|
+
# the tokenizer for models such as
|
|
1301
|
+
# "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing
|
|
1302
|
+
# lists of tokens to the `text` and `text_pair` kwargs
|
|
1303
|
+
tokenizer = self.get_tokenizer()
|
|
1304
|
+
|
|
1305
|
+
def ensure_str(prompt: SingletonPrompt):
|
|
1306
|
+
if isinstance(prompt, dict):
|
|
1307
|
+
if "multi_modal_data" in prompt:
|
|
1308
|
+
raise ValueError("Multi-modal prompt is not "
|
|
1309
|
+
"supported for scoring")
|
|
1310
|
+
elif "prompt_token_ids" in prompt:
|
|
1311
|
+
prompt = tokenizer.decode(
|
|
1312
|
+
cast(TokensPrompt, prompt)["prompt_token_ids"])
|
|
1313
|
+
elif "prompt" in prompt:
|
|
1314
|
+
prompt = cast(TextPrompt, prompt)["prompt"]
|
|
1315
|
+
assert type(prompt) is str
|
|
1316
|
+
return prompt
|
|
1317
|
+
|
|
1318
|
+
if isinstance(text_1, (str, dict)):
|
|
1319
|
+
# Convert a single prompt to a list.
|
|
1320
|
+
text_1 = [text_1]
|
|
1321
|
+
input_text_1: list[str] = [ensure_str(t) for t in text_1]
|
|
1322
|
+
|
|
1323
|
+
if isinstance(text_2, (str, dict)):
|
|
1324
|
+
# Convert a single prompt to a list.
|
|
1325
|
+
text_2 = [text_2]
|
|
1326
|
+
input_text_2: list[str] = [ensure_str(t) for t in text_2]
|
|
1327
|
+
|
|
1328
|
+
_validate_score_input_lens(input_text_1, input_text_2)
|
|
1329
|
+
|
|
1330
|
+
if self.llm_engine.model_config.is_cross_encoder:
|
|
1331
|
+
return self._cross_encoding_score(tokenizer, input_text_1,
|
|
1332
|
+
input_text_2,
|
|
1333
|
+
truncate_prompt_tokens, use_tqdm,
|
|
1334
|
+
lora_request,
|
|
1335
|
+
prompt_adapter_request)
|
|
1336
|
+
else:
|
|
1337
|
+
return self._embedding_score(
|
|
1338
|
+
tokenizer,
|
|
1339
|
+
input_text_1, # type: ignore[arg-type]
|
|
1340
|
+
input_text_2, # type: ignore[arg-type]
|
|
1341
|
+
truncate_prompt_tokens,
|
|
1342
|
+
use_tqdm,
|
|
1343
|
+
lora_request,
|
|
1344
|
+
prompt_adapter_request)
|
|
1345
|
+
|
|
1346
|
+
def start_profile(self) -> None:
|
|
1347
|
+
self.llm_engine.start_profile()
|
|
1348
|
+
|
|
1349
|
+
def stop_profile(self) -> None:
|
|
1350
|
+
self.llm_engine.stop_profile()
|
|
1351
|
+
|
|
1352
|
+
def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
|
|
1353
|
+
return self.llm_engine.reset_prefix_cache(device)
|
|
1354
|
+
|
|
1355
|
+
def sleep(self, level: int = 1):
|
|
1356
|
+
"""
|
|
1357
|
+
Put the engine to sleep. The engine should not process any requests.
|
|
1358
|
+
The caller should guarantee that no requests are being processed
|
|
1359
|
+
during the sleep period, before `wake_up` is called.
|
|
1360
|
+
|
|
1361
|
+
Args:
|
|
1362
|
+
level: The sleep level. Level 1 sleep will offload the model
|
|
1363
|
+
weights and discard the kv cache. The content of kv cache
|
|
1364
|
+
is forgotten. Level 1 sleep is good for sleeping and waking
|
|
1365
|
+
up the engine to run the same model again. The model weights
|
|
1366
|
+
are backed up in CPU memory. Please make sure there's enough
|
|
1367
|
+
CPU memory to store the model weights. Level 2 sleep will
|
|
1368
|
+
discard both the model weights and the kv cache. The content
|
|
1369
|
+
of both the model weights and kv cache is forgotten. Level 2
|
|
1370
|
+
sleep is good for sleeping and waking up the engine to run a
|
|
1371
|
+
different model or update the model, where previous model
|
|
1372
|
+
weights are not needed. It reduces CPU memory pressure.
|
|
1373
|
+
"""
|
|
1374
|
+
self.reset_prefix_cache()
|
|
1375
|
+
self.llm_engine.sleep(level=level)
|
|
1376
|
+
|
|
1377
|
+
def wake_up(self, tags: Optional[list[str]] = None):
|
|
1378
|
+
"""
|
|
1379
|
+
Wake up the engine from sleep mode. See the [sleep][] method
|
|
1380
|
+
for more details.
|
|
1381
|
+
|
|
1382
|
+
Args:
|
|
1383
|
+
tags: An optional list of tags to reallocate the engine memory
|
|
1384
|
+
for specific memory allocations. Values must be in
|
|
1385
|
+
`("weights", "kv_cache")`. If None, all memory is reallocated.
|
|
1386
|
+
wake_up should be called with all tags (or None) before the
|
|
1387
|
+
engine is used again.
|
|
1388
|
+
"""
|
|
1389
|
+
self.llm_engine.wake_up(tags)
|
|
1390
|
+
|
|
1391
|
+
def get_metrics(self) -> list["Metric"]:
|
|
1392
|
+
"""Return a snapshot of aggregated metrics from Prometheus.
|
|
1393
|
+
|
|
1394
|
+
Returns:
|
|
1395
|
+
A ``MetricSnapshot`` instance capturing the current state
|
|
1396
|
+
of all aggregated metrics from Prometheus.
|
|
1397
|
+
|
|
1398
|
+
Note:
|
|
1399
|
+
This method is only available with the V1 LLM engine.
|
|
1400
|
+
"""
|
|
1401
|
+
from vllm.v1.engine.llm_engine import LLMEngine as V1LLMEngine
|
|
1402
|
+
assert isinstance(self.llm_engine, V1LLMEngine)
|
|
1403
|
+
return self.llm_engine.get_metrics()
|
|
1404
|
+
|
|
1405
|
+
# LEGACY
|
|
1406
|
+
def _convert_v1_inputs(
|
|
1407
|
+
self,
|
|
1408
|
+
prompts: Optional[Union[str, list[str]]],
|
|
1409
|
+
prompt_token_ids: Optional[Union[list[int], list[list[int]]]],
|
|
1410
|
+
):
|
|
1411
|
+
# skip_tokenizer_init is now checked in engine
|
|
1412
|
+
|
|
1413
|
+
if prompts is None and prompt_token_ids is None:
|
|
1414
|
+
raise ValueError(
|
|
1415
|
+
"Either prompts or prompt_token_ids must be provided.")
|
|
1416
|
+
if prompts is not None and prompt_token_ids is not None \
|
|
1417
|
+
and len(prompts) != len(prompt_token_ids):
|
|
1418
|
+
raise ValueError(
|
|
1419
|
+
"The lengths of prompts and prompt_token_ids must be the same."
|
|
1420
|
+
)
|
|
1421
|
+
|
|
1422
|
+
if prompts is not None:
|
|
1423
|
+
prompts = [p["content"] for p in parse_and_batch_prompt(prompts)]
|
|
1424
|
+
if prompt_token_ids is not None:
|
|
1425
|
+
prompt_token_ids = [
|
|
1426
|
+
p["content"] for p in parse_and_batch_prompt(prompt_token_ids)
|
|
1427
|
+
]
|
|
1428
|
+
if prompts is not None:
|
|
1429
|
+
num_requests = len(prompts)
|
|
1430
|
+
elif prompt_token_ids is not None:
|
|
1431
|
+
num_requests = len(prompt_token_ids)
|
|
1432
|
+
parsed_prompts: list[PromptType] = []
|
|
1433
|
+
for i in range(num_requests):
|
|
1434
|
+
item: PromptType
|
|
1435
|
+
|
|
1436
|
+
if prompts is not None:
|
|
1437
|
+
item = TextPrompt(prompt=prompts[i])
|
|
1438
|
+
elif prompt_token_ids is not None:
|
|
1439
|
+
item = TokensPrompt(prompt_token_ids=prompt_token_ids[i])
|
|
1440
|
+
else:
|
|
1441
|
+
raise AssertionError
|
|
1442
|
+
|
|
1443
|
+
parsed_prompts.append(item)
|
|
1444
|
+
|
|
1445
|
+
return parsed_prompts
|
|
1446
|
+
|
|
1447
|
+
def _validate_and_add_requests(
|
|
1448
|
+
self,
|
|
1449
|
+
prompts: Union[PromptType, Sequence[PromptType]],
|
|
1450
|
+
params: Union[SamplingParams, Sequence[SamplingParams], PoolingParams,
|
|
1451
|
+
Sequence[PoolingParams]],
|
|
1452
|
+
*,
|
|
1453
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True,
|
|
1454
|
+
lora_request: Optional[Union[Sequence[LoRARequest], LoRARequest]],
|
|
1455
|
+
prompt_adapter_request: Optional[PromptAdapterRequest],
|
|
1456
|
+
tokenization_kwargs: Optional[dict[str, Any]] = None,
|
|
1457
|
+
guided_options: Optional[GuidedDecodingRequest] = None,
|
|
1458
|
+
priority: Optional[list[int]] = None,
|
|
1459
|
+
) -> None:
|
|
1460
|
+
if guided_options is not None:
|
|
1461
|
+
warnings.warn(
|
|
1462
|
+
"guided_options_request is deprecated, use "
|
|
1463
|
+
"SamplingParams.guided_decoding instead",
|
|
1464
|
+
DeprecationWarning,
|
|
1465
|
+
stacklevel=2,
|
|
1466
|
+
)
|
|
1467
|
+
|
|
1468
|
+
if isinstance(prompts, (str, dict)):
|
|
1469
|
+
# Convert a single prompt to a list.
|
|
1470
|
+
prompts = [prompts]
|
|
1471
|
+
|
|
1472
|
+
num_requests = len(prompts)
|
|
1473
|
+
if isinstance(params, Sequence) and len(params) != num_requests:
|
|
1474
|
+
raise ValueError("The lengths of prompts and params "
|
|
1475
|
+
"must be the same.")
|
|
1476
|
+
if isinstance(lora_request,
|
|
1477
|
+
Sequence) and len(lora_request) != num_requests:
|
|
1478
|
+
raise ValueError("The lengths of prompts and lora_request "
|
|
1479
|
+
"must be the same.")
|
|
1480
|
+
|
|
1481
|
+
for sp in params if isinstance(params, Sequence) else (params, ):
|
|
1482
|
+
if isinstance(sp, SamplingParams):
|
|
1483
|
+
self._add_guided_params(sp, guided_options)
|
|
1484
|
+
|
|
1485
|
+
# We only care about the final output
|
|
1486
|
+
sp.output_kind = RequestOutputKind.FINAL_ONLY
|
|
1487
|
+
|
|
1488
|
+
# Add requests to the engine.
|
|
1489
|
+
it = prompts
|
|
1490
|
+
if use_tqdm:
|
|
1491
|
+
tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
|
|
1492
|
+
it = tqdm_func(it, desc="Adding requests")
|
|
1493
|
+
|
|
1494
|
+
for i, prompt in enumerate(it):
|
|
1495
|
+
self._add_request(
|
|
1496
|
+
prompt,
|
|
1497
|
+
params[i] if isinstance(params, Sequence) else params,
|
|
1498
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1499
|
+
lora_request=lora_request[i] if isinstance(
|
|
1500
|
+
lora_request, Sequence) else lora_request,
|
|
1501
|
+
prompt_adapter_request=prompt_adapter_request,
|
|
1502
|
+
priority=priority[i] if priority else 0,
|
|
1503
|
+
)
|
|
1504
|
+
|
|
1505
|
+
def _add_request(
|
|
1506
|
+
self,
|
|
1507
|
+
prompt: PromptType,
|
|
1508
|
+
params: Union[SamplingParams, PoolingParams],
|
|
1509
|
+
tokenization_kwargs: Optional[dict[str, Any]] = None,
|
|
1510
|
+
lora_request: Optional[LoRARequest] = None,
|
|
1511
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
1512
|
+
priority: int = 0,
|
|
1513
|
+
) -> None:
|
|
1514
|
+
request_id = str(next(self.request_counter))
|
|
1515
|
+
self.llm_engine.add_request(
|
|
1516
|
+
request_id,
|
|
1517
|
+
prompt,
|
|
1518
|
+
params,
|
|
1519
|
+
lora_request=lora_request,
|
|
1520
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1521
|
+
prompt_adapter_request=prompt_adapter_request,
|
|
1522
|
+
priority=priority,
|
|
1523
|
+
)
|
|
1524
|
+
|
|
1525
|
+
def _add_guided_params(
|
|
1526
|
+
self,
|
|
1527
|
+
params: SamplingParams,
|
|
1528
|
+
guided_options: Optional[GuidedDecodingRequest] = None):
|
|
1529
|
+
if guided_options is None:
|
|
1530
|
+
return params
|
|
1531
|
+
|
|
1532
|
+
if params.guided_decoding is not None:
|
|
1533
|
+
raise ValueError("Cannot set both guided_options_request and "
|
|
1534
|
+
"params.guided_decoding.")
|
|
1535
|
+
|
|
1536
|
+
params.guided_decoding = GuidedDecodingParams(
|
|
1537
|
+
json=guided_options.guided_json,
|
|
1538
|
+
regex=guided_options.guided_regex,
|
|
1539
|
+
choice=guided_options.guided_choice,
|
|
1540
|
+
grammar=guided_options.guided_grammar,
|
|
1541
|
+
json_object=guided_options.guided_json_object,
|
|
1542
|
+
backend=guided_options.guided_decoding_backend,
|
|
1543
|
+
whitespace_pattern=guided_options.guided_whitespace_pattern,
|
|
1544
|
+
structural_tag=guided_options.structural_tag,
|
|
1545
|
+
)
|
|
1546
|
+
return params
|
|
1547
|
+
|
|
1548
|
+
def _run_engine(
|
|
1549
|
+
self,
|
|
1550
|
+
*,
|
|
1551
|
+
use_tqdm: Union[bool, Callable[..., tqdm]] = True
|
|
1552
|
+
) -> list[Union[RequestOutput, PoolingRequestOutput]]:
|
|
1553
|
+
# Initialize tqdm.
|
|
1554
|
+
if use_tqdm:
|
|
1555
|
+
num_requests = self.llm_engine.get_num_unfinished_requests()
|
|
1556
|
+
tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
|
|
1557
|
+
pbar = tqdm_func(
|
|
1558
|
+
total=num_requests,
|
|
1559
|
+
desc="Processed prompts",
|
|
1560
|
+
dynamic_ncols=True,
|
|
1561
|
+
postfix=(f"est. speed input: {0:.2f} toks/s, "
|
|
1562
|
+
f"output: {0:.2f} toks/s"),
|
|
1563
|
+
)
|
|
1564
|
+
|
|
1565
|
+
# Run the engine.
|
|
1566
|
+
outputs: list[Union[RequestOutput, PoolingRequestOutput]] = []
|
|
1567
|
+
total_in_toks = 0
|
|
1568
|
+
total_out_toks = 0
|
|
1569
|
+
while self.llm_engine.has_unfinished_requests():
|
|
1570
|
+
step_outputs = self.llm_engine.step()
|
|
1571
|
+
for output in step_outputs:
|
|
1572
|
+
if output.finished:
|
|
1573
|
+
outputs.append(output)
|
|
1574
|
+
if use_tqdm:
|
|
1575
|
+
if isinstance(output, RequestOutput):
|
|
1576
|
+
# Calculate tokens only for RequestOutput
|
|
1577
|
+
n = len(output.outputs)
|
|
1578
|
+
assert output.prompt_token_ids is not None
|
|
1579
|
+
total_in_toks += len(output.prompt_token_ids) * n
|
|
1580
|
+
in_spd = total_in_toks / pbar.format_dict["elapsed"]
|
|
1581
|
+
total_out_toks += sum(
|
|
1582
|
+
len(stp.token_ids) for stp in output.outputs)
|
|
1583
|
+
out_spd = (total_out_toks /
|
|
1584
|
+
pbar.format_dict["elapsed"])
|
|
1585
|
+
pbar.postfix = (
|
|
1586
|
+
f"est. speed input: {in_spd:.2f} toks/s, "
|
|
1587
|
+
f"output: {out_spd:.2f} toks/s")
|
|
1588
|
+
pbar.update(n)
|
|
1589
|
+
else:
|
|
1590
|
+
pbar.update(1)
|
|
1591
|
+
if pbar.n == num_requests:
|
|
1592
|
+
pbar.refresh()
|
|
1593
|
+
|
|
1594
|
+
if use_tqdm:
|
|
1595
|
+
pbar.close()
|
|
1596
|
+
# Sort the outputs by request ID.
|
|
1597
|
+
# This is necessary because some requests may be finished earlier than
|
|
1598
|
+
# its previous requests.
|
|
1599
|
+
return sorted(outputs, key=lambda x: int(x.request_id))
|