vllm-cpu-avx512bf16 0.9.0.post2__cp310-cp310-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +170 -0
- vllm/_custom_ops.py +1742 -0
- vllm/_ipex_ops.py +243 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +15 -0
- vllm/adapter_commons/models.py +105 -0
- vllm/adapter_commons/request.py +25 -0
- vllm/adapter_commons/utils.py +92 -0
- vllm/adapter_commons/worker_manager.py +38 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +44 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +33 -0
- vllm/assets/video.py +114 -0
- vllm/attention/__init__.py +19 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +306 -0
- vllm/attention/backends/blocksparse_attn.py +457 -0
- vllm/attention/backends/cpu_mla.py +305 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1494 -0
- vllm/attention/backends/flash_attn.py +999 -0
- vllm/attention/backends/flashinfer.py +1100 -0
- vllm/attention/backends/flashmla.py +242 -0
- vllm/attention/backends/hpu_attn.py +309 -0
- vllm/attention/backends/ipex_attn.py +394 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1381 -0
- vllm/attention/backends/pallas.py +347 -0
- vllm/attention/backends/placeholder_attn.py +399 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +970 -0
- vllm/attention/backends/torch_sdpa.py +691 -0
- vllm/attention/backends/triton_mla.py +113 -0
- vllm/attention/backends/utils.py +609 -0
- vllm/attention/backends/xformers.py +798 -0
- vllm/attention/layer.py +452 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
- vllm/attention/ops/blocksparse_attention/interface.py +238 -0
- vllm/attention/ops/blocksparse_attention/utils.py +245 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +367 -0
- vllm/attention/ops/flashmla.py +115 -0
- vllm/attention/ops/hpu_paged_attn.py +87 -0
- vllm/attention/ops/ipex_attn.py +194 -0
- vllm/attention/ops/merge_attn_states.py +42 -0
- vllm/attention/ops/nki_flash_attn.py +905 -0
- vllm/attention/ops/paged_attn.py +255 -0
- vllm/attention/ops/prefix_prefill.py +901 -0
- vllm/attention/ops/rocm_aiter_mla.py +99 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
- vllm/attention/ops/triton_decode_attention.py +673 -0
- vllm/attention/ops/triton_flash_attention.py +1374 -0
- vllm/attention/ops/triton_merge_attn_states.py +96 -0
- vllm/attention/ops/triton_unified_attention.py +337 -0
- vllm/attention/selector.py +186 -0
- vllm/attention/utils/fa_utils.py +54 -0
- vllm/beam_search.py +82 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +921 -0
- vllm/benchmarks/endpoint_request_func.py +160 -0
- vllm/benchmarks/latency.py +184 -0
- vllm/benchmarks/serve.py +925 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +69 -0
- vllm/collect_env.py +818 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +88 -0
- vllm/compilation/backends.py +560 -0
- vllm/compilation/base_piecewise_backend.py +71 -0
- vllm/compilation/collective_fusion.py +126 -0
- vllm/compilation/compiler_interface.py +533 -0
- vllm/compilation/counter.py +33 -0
- vllm/compilation/cuda_piecewise_backend.py +213 -0
- vllm/compilation/decorators.py +249 -0
- vllm/compilation/fix_functionalization.py +190 -0
- vllm/compilation/fusion.py +617 -0
- vllm/compilation/fx_utils.py +61 -0
- vllm/compilation/inductor_pass.py +114 -0
- vllm/compilation/monitor.py +38 -0
- vllm/compilation/multi_output_match.py +108 -0
- vllm/compilation/noop_elimination.py +136 -0
- vllm/compilation/pass_manager.py +77 -0
- vllm/compilation/sequence_parallelism.py +267 -0
- vllm/compilation/torch25_custom_graph_pass.py +41 -0
- vllm/compilation/vllm_inductor_pass.py +66 -0
- vllm/compilation/wrapper.py +129 -0
- vllm/config.py +4600 -0
- vllm/connections.py +173 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +398 -0
- vllm/core/block/common.py +370 -0
- vllm/core/block/cpu_gpu_block_allocator.py +440 -0
- vllm/core/block/interfaces.py +318 -0
- vllm/core/block/naive_block.py +465 -0
- vllm/core/block/prefix_caching_block.py +1134 -0
- vllm/core/block/utils.py +27 -0
- vllm/core/block_manager.py +520 -0
- vllm/core/evictor.py +156 -0
- vllm/core/interfaces.py +134 -0
- vllm/core/placeholder_block_space_manager.py +99 -0
- vllm/core/scheduler.py +2092 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +280 -0
- vllm/distributed/__init__.py +5 -0
- vllm/distributed/communication_op.py +40 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +126 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +144 -0
- vllm/distributed/device_communicators/cuda_communicator.py +167 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +303 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +258 -0
- vllm/distributed/device_communicators/hpu_communicator.py +45 -0
- vllm/distributed/device_communicators/neuron_communicator.py +19 -0
- vllm/distributed/device_communicators/pynccl.py +217 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
- vllm/distributed/device_communicators/shm_broadcast.py +541 -0
- vllm/distributed/device_communicators/tpu_communicator.py +102 -0
- vllm/distributed/device_communicators/xpu_communicator.py +54 -0
- vllm/distributed/kv_events.py +296 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +11 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +126 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +202 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +91 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +5 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +259 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +133 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +189 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +851 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
- vllm/distributed/parallel_state.py +1294 -0
- vllm/distributed/utils.py +520 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1649 -0
- vllm/engine/async_llm_engine.py +1274 -0
- vllm/engine/async_timeout.py +191 -0
- vllm/engine/llm_engine.py +2153 -0
- vllm/engine/metrics.py +717 -0
- vllm/engine/metrics_types.py +96 -0
- vllm/engine/multiprocessing/__init__.py +188 -0
- vllm/engine/multiprocessing/client.py +755 -0
- vllm/engine/multiprocessing/engine.py +459 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +74 -0
- vllm/engine/output_processor/multi_step.py +215 -0
- vllm/engine/output_processor/single_step.py +144 -0
- vllm/engine/output_processor/stop_checker.py +130 -0
- vllm/engine/output_processor/util.py +27 -0
- vllm/engine/protocol.py +310 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +177 -0
- vllm/entrypoints/chat_utils.py +1298 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +38 -0
- vllm/entrypoints/cli/benchmark/latency.py +29 -0
- vllm/entrypoints/cli/benchmark/main.py +53 -0
- vllm/entrypoints/cli/benchmark/serve.py +29 -0
- vllm/entrypoints/cli/benchmark/throughput.py +29 -0
- vllm/entrypoints/cli/collect_env.py +34 -0
- vllm/entrypoints/cli/main.py +62 -0
- vllm/entrypoints/cli/openai.py +204 -0
- vllm/entrypoints/cli/serve.py +141 -0
- vllm/entrypoints/cli/types.py +24 -0
- vllm/entrypoints/launcher.py +146 -0
- vllm/entrypoints/llm.py +1503 -0
- vllm/entrypoints/logger.py +49 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1376 -0
- vllm/entrypoints/openai/cli_args.py +306 -0
- vllm/entrypoints/openai/logits_processors.py +89 -0
- vllm/entrypoints/openai/protocol.py +1890 -0
- vllm/entrypoints/openai/run_batch.py +439 -0
- vllm/entrypoints/openai/serving_chat.py +1192 -0
- vllm/entrypoints/openai/serving_classification.py +159 -0
- vllm/entrypoints/openai/serving_completion.py +590 -0
- vllm/entrypoints/openai/serving_embedding.py +200 -0
- vllm/entrypoints/openai/serving_engine.py +985 -0
- vllm/entrypoints/openai/serving_models.py +314 -0
- vllm/entrypoints/openai/serving_pooling.py +231 -0
- vllm/entrypoints/openai/serving_score.py +432 -0
- vllm/entrypoints/openai/serving_tokenization.py +151 -0
- vllm/entrypoints/openai/serving_transcription.py +421 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +22 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +258 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +236 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +215 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +307 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +302 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +266 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +111 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +296 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
- vllm/entrypoints/score_utils.py +49 -0
- vllm/entrypoints/ssl.py +74 -0
- vllm/entrypoints/utils.py +219 -0
- vllm/env_override.py +34 -0
- vllm/envs.py +896 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +400 -0
- vllm/executor/mp_distributed_executor.py +243 -0
- vllm/executor/msgspec_utils.py +29 -0
- vllm/executor/multiproc_worker_utils.py +312 -0
- vllm/executor/ray_distributed_executor.py +700 -0
- vllm/executor/ray_utils.py +398 -0
- vllm/executor/uniproc_executor.py +138 -0
- vllm/forward_context.py +147 -0
- vllm/inputs/__init__.py +40 -0
- vllm/inputs/data.py +330 -0
- vllm/inputs/parse.py +150 -0
- vllm/inputs/preprocess.py +908 -0
- vllm/inputs/registry.py +214 -0
- vllm/jsontree.py +79 -0
- vllm/logger.py +211 -0
- vllm/logging_utils/__init__.py +7 -0
- vllm/logging_utils/dump_input.py +84 -0
- vllm/logging_utils/formatter.py +17 -0
- vllm/logits_process.py +118 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +354 -0
- vllm/lora/layers.py +1284 -0
- vllm/lora/lora.py +198 -0
- vllm/lora/models.py +817 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +15 -0
- vllm/lora/ops/torch_ops/lora_ops.py +115 -0
- vllm/lora/ops/triton_ops/__init__.py +11 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +242 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
- vllm/lora/ops/triton_ops/utils.py +119 -0
- vllm/lora/ops/xla_ops/__init__.py +6 -0
- vllm/lora/ops/xla_ops/lora_ops.py +106 -0
- vllm/lora/ops/xla_ops/pallas.py +133 -0
- vllm/lora/peft_helper.py +135 -0
- vllm/lora/punica_wrapper/__init__.py +9 -0
- vllm/lora/punica_wrapper/punica_base.py +484 -0
- vllm/lora/punica_wrapper/punica_cpu.py +348 -0
- vllm/lora/punica_wrapper/punica_gpu.py +289 -0
- vllm/lora/punica_wrapper/punica_hpu.py +144 -0
- vllm/lora/punica_wrapper/punica_selector.py +19 -0
- vllm/lora/punica_wrapper/punica_tpu.py +325 -0
- vllm/lora/punica_wrapper/utils.py +163 -0
- vllm/lora/request.py +98 -0
- vllm/lora/resolver.py +84 -0
- vllm/lora/utils.py +239 -0
- vllm/lora/worker_manager.py +253 -0
- vllm/model_executor/__init__.py +15 -0
- vllm/model_executor/custom_op.py +151 -0
- vllm/model_executor/guided_decoding/__init__.py +180 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +62 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +103 -0
- vllm/model_executor/guided_decoding/guided_fields.py +42 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +283 -0
- vllm/model_executor/guided_decoding/utils.py +241 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +368 -0
- vllm/model_executor/layers/fused_moe/__init__.py +53 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +382 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +227 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +755 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +231 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1722 -0
- vllm/model_executor/layers/fused_moe/layer.py +1366 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +364 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +242 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +188 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +146 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +60 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +372 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +112 -0
- vllm/model_executor/layers/fused_moe/utils.py +97 -0
- vllm/model_executor/layers/layernorm.py +287 -0
- vllm/model_executor/layers/lightning_attn.py +651 -0
- vllm/model_executor/layers/linear.py +1523 -0
- vllm/model_executor/layers/logits_processor.py +196 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +124 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +615 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +413 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
- vllm/model_executor/layers/pooler.py +343 -0
- vllm/model_executor/layers/quantization/__init__.py +156 -0
- vllm/model_executor/layers/quantization/aqlm.py +375 -0
- vllm/model_executor/layers/quantization/auto_round.py +308 -0
- vllm/model_executor/layers/quantization/awq.py +185 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
- vllm/model_executor/layers/quantization/awq_triton.py +319 -0
- vllm/model_executor/layers/quantization/base_config.py +150 -0
- vllm/model_executor/layers/quantization/bitblas.py +460 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +397 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +644 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1252 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +21 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +92 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +120 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +214 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +194 -0
- vllm/model_executor/layers/quantization/experts_int8.py +195 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +171 -0
- vllm/model_executor/layers/quantization/fp8.py +876 -0
- vllm/model_executor/layers/quantization/gguf.py +564 -0
- vllm/model_executor/layers/quantization/gptq.py +277 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +444 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +647 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +296 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +331 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +249 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +130 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
- vllm/model_executor/layers/quantization/kv_cache.py +138 -0
- vllm/model_executor/layers/quantization/marlin.py +260 -0
- vllm/model_executor/layers/quantization/modelopt.py +734 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +448 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +68 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +126 -0
- vllm/model_executor/layers/quantization/qqq.py +274 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +440 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +8 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +125 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +145 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
- vllm/model_executor/layers/quantization/quark/utils.py +104 -0
- vllm/model_executor/layers/quantization/schema.py +85 -0
- vllm/model_executor/layers/quantization/torchao.py +143 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +120 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +207 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +611 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +484 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +475 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +277 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +324 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +463 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +125 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +44 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +61 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +572 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/resampler.py +269 -0
- vllm/model_executor/layers/rotary_embedding.py +1861 -0
- vllm/model_executor/layers/sampler.py +1203 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +165 -0
- vllm/model_executor/layers/utils.py +99 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +486 -0
- vllm/model_executor/model_loader/__init__.py +75 -0
- vllm/model_executor/model_loader/base_loader.py +24 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +582 -0
- vllm/model_executor/model_loader/default_loader.py +295 -0
- vllm/model_executor/model_loader/dummy_loader.py +37 -0
- vllm/model_executor/model_loader/gguf_loader.py +113 -0
- vllm/model_executor/model_loader/neuron.py +475 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +622 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +120 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +211 -0
- vllm/model_executor/model_loader/tensorizer.py +632 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +122 -0
- vllm/model_executor/model_loader/utils.py +301 -0
- vllm/model_executor/model_loader/weight_utils.py +781 -0
- vllm/model_executor/models/__init__.py +27 -0
- vllm/model_executor/models/adapters.py +247 -0
- vllm/model_executor/models/aimv2.py +199 -0
- vllm/model_executor/models/arctic.py +558 -0
- vllm/model_executor/models/aria.py +656 -0
- vllm/model_executor/models/aya_vision.py +461 -0
- vllm/model_executor/models/baichuan.py +473 -0
- vllm/model_executor/models/bamba.py +542 -0
- vllm/model_executor/models/bart.py +937 -0
- vllm/model_executor/models/bert.py +517 -0
- vllm/model_executor/models/bert_with_rope.py +714 -0
- vllm/model_executor/models/blip.py +338 -0
- vllm/model_executor/models/blip2.py +717 -0
- vllm/model_executor/models/bloom.py +372 -0
- vllm/model_executor/models/chameleon.py +1135 -0
- vllm/model_executor/models/chatglm.py +477 -0
- vllm/model_executor/models/clip.py +411 -0
- vllm/model_executor/models/commandr.py +471 -0
- vllm/model_executor/models/constant_size_cache.py +136 -0
- vllm/model_executor/models/dbrx.py +471 -0
- vllm/model_executor/models/deepseek.py +485 -0
- vllm/model_executor/models/deepseek_mtp.py +268 -0
- vllm/model_executor/models/deepseek_v2.py +842 -0
- vllm/model_executor/models/deepseek_vl2.py +647 -0
- vllm/model_executor/models/eagle.py +259 -0
- vllm/model_executor/models/exaone.py +550 -0
- vllm/model_executor/models/fairseq2_llama.py +153 -0
- vllm/model_executor/models/falcon.py +509 -0
- vllm/model_executor/models/falcon_h1.py +684 -0
- vllm/model_executor/models/florence2.py +1102 -0
- vllm/model_executor/models/fuyu.py +388 -0
- vllm/model_executor/models/gemma.py +424 -0
- vllm/model_executor/models/gemma2.py +424 -0
- vllm/model_executor/models/gemma3.py +532 -0
- vllm/model_executor/models/gemma3_mm.py +708 -0
- vllm/model_executor/models/glm.py +22 -0
- vllm/model_executor/models/glm4.py +304 -0
- vllm/model_executor/models/glm4v.py +647 -0
- vllm/model_executor/models/gpt2.py +327 -0
- vllm/model_executor/models/gpt_bigcode.py +334 -0
- vllm/model_executor/models/gpt_j.py +338 -0
- vllm/model_executor/models/gpt_neox.py +331 -0
- vllm/model_executor/models/granite.py +492 -0
- vllm/model_executor/models/granite_speech.py +778 -0
- vllm/model_executor/models/granitemoe.py +436 -0
- vllm/model_executor/models/granitemoehybrid.py +585 -0
- vllm/model_executor/models/granitemoeshared.py +340 -0
- vllm/model_executor/models/gritlm.py +223 -0
- vllm/model_executor/models/grok1.py +545 -0
- vllm/model_executor/models/h2ovl.py +545 -0
- vllm/model_executor/models/idefics2_vision_model.py +388 -0
- vllm/model_executor/models/idefics3.py +767 -0
- vllm/model_executor/models/interfaces.py +571 -0
- vllm/model_executor/models/interfaces_base.py +163 -0
- vllm/model_executor/models/intern_vit.py +475 -0
- vllm/model_executor/models/internlm2.py +454 -0
- vllm/model_executor/models/internlm2_ve.py +146 -0
- vllm/model_executor/models/internvl.py +1405 -0
- vllm/model_executor/models/jais.py +372 -0
- vllm/model_executor/models/jamba.py +591 -0
- vllm/model_executor/models/kimi_vl.py +576 -0
- vllm/model_executor/models/llama.py +643 -0
- vllm/model_executor/models/llama4.py +531 -0
- vllm/model_executor/models/llama_eagle.py +166 -0
- vllm/model_executor/models/llama_eagle3.py +257 -0
- vllm/model_executor/models/llava.py +865 -0
- vllm/model_executor/models/llava_next.py +585 -0
- vllm/model_executor/models/llava_next_video.py +470 -0
- vllm/model_executor/models/llava_onevision.py +955 -0
- vllm/model_executor/models/mamba.py +272 -0
- vllm/model_executor/models/mamba2.py +302 -0
- vllm/model_executor/models/mamba_cache.py +75 -0
- vllm/model_executor/models/medusa.py +218 -0
- vllm/model_executor/models/mimo.py +191 -0
- vllm/model_executor/models/mimo_mtp.py +284 -0
- vllm/model_executor/models/minicpm.py +590 -0
- vllm/model_executor/models/minicpm3.py +229 -0
- vllm/model_executor/models/minicpmo.py +758 -0
- vllm/model_executor/models/minicpmv.py +1286 -0
- vllm/model_executor/models/minimax_cache.py +35 -0
- vllm/model_executor/models/minimax_text_01.py +1303 -0
- vllm/model_executor/models/minimax_vl_01.py +363 -0
- vllm/model_executor/models/mistral3.py +603 -0
- vllm/model_executor/models/mixtral.py +487 -0
- vllm/model_executor/models/mixtral_quant.py +452 -0
- vllm/model_executor/models/mllama.py +1623 -0
- vllm/model_executor/models/mllama4.py +838 -0
- vllm/model_executor/models/mlp_speculator.py +205 -0
- vllm/model_executor/models/modernbert.py +329 -0
- vllm/model_executor/models/module_mapping.py +71 -0
- vllm/model_executor/models/molmo.py +1567 -0
- vllm/model_executor/models/moonvit.py +629 -0
- vllm/model_executor/models/mpt.py +330 -0
- vllm/model_executor/models/nemotron.py +507 -0
- vllm/model_executor/models/nemotron_nas.py +483 -0
- vllm/model_executor/models/nvlm_d.py +215 -0
- vllm/model_executor/models/olmo.py +388 -0
- vllm/model_executor/models/olmo2.py +413 -0
- vllm/model_executor/models/olmoe.py +446 -0
- vllm/model_executor/models/opt.py +411 -0
- vllm/model_executor/models/orion.py +348 -0
- vllm/model_executor/models/ovis.py +554 -0
- vllm/model_executor/models/paligemma.py +397 -0
- vllm/model_executor/models/persimmon.py +343 -0
- vllm/model_executor/models/phi.py +355 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3_small.py +464 -0
- vllm/model_executor/models/phi3v.py +722 -0
- vllm/model_executor/models/phi4mm.py +1245 -0
- vllm/model_executor/models/phi4mm_audio.py +1232 -0
- vllm/model_executor/models/phi4mm_utils.py +1883 -0
- vllm/model_executor/models/phimoe.py +664 -0
- vllm/model_executor/models/pixtral.py +1315 -0
- vllm/model_executor/models/plamo2.py +737 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
- vllm/model_executor/models/qwen.py +361 -0
- vllm/model_executor/models/qwen2.py +567 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +903 -0
- vllm/model_executor/models/qwen2_5_vl.py +1171 -0
- vllm/model_executor/models/qwen2_audio.py +409 -0
- vllm/model_executor/models/qwen2_moe.py +539 -0
- vllm/model_executor/models/qwen2_rm.py +131 -0
- vllm/model_executor/models/qwen2_vl.py +1410 -0
- vllm/model_executor/models/qwen3.py +320 -0
- vllm/model_executor/models/qwen3_moe.py +534 -0
- vllm/model_executor/models/qwen_vl.py +784 -0
- vllm/model_executor/models/registry.py +618 -0
- vllm/model_executor/models/roberta.py +273 -0
- vllm/model_executor/models/siglip.py +523 -0
- vllm/model_executor/models/skyworkr1v.py +950 -0
- vllm/model_executor/models/smolvlm.py +51 -0
- vllm/model_executor/models/solar.py +505 -0
- vllm/model_executor/models/stablelm.py +342 -0
- vllm/model_executor/models/starcoder2.py +355 -0
- vllm/model_executor/models/telechat2.py +139 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/transformers.py +507 -0
- vllm/model_executor/models/ultravox.py +655 -0
- vllm/model_executor/models/utils.py +730 -0
- vllm/model_executor/models/vision.py +146 -0
- vllm/model_executor/models/whisper.py +746 -0
- vllm/model_executor/models/zamba2.py +1008 -0
- vllm/model_executor/parameter.py +458 -0
- vllm/model_executor/pooling_metadata.py +71 -0
- vllm/model_executor/sampling_metadata.py +596 -0
- vllm/model_executor/utils.py +53 -0
- vllm/multimodal/__init__.py +32 -0
- vllm/multimodal/audio.py +105 -0
- vllm/multimodal/base.py +218 -0
- vllm/multimodal/hasher.py +117 -0
- vllm/multimodal/image.py +96 -0
- vllm/multimodal/inputs.py +872 -0
- vllm/multimodal/parse.py +460 -0
- vllm/multimodal/processing.py +1894 -0
- vllm/multimodal/profiling.py +273 -0
- vllm/multimodal/registry.py +330 -0
- vllm/multimodal/utils.py +392 -0
- vllm/multimodal/video.py +197 -0
- vllm/outputs.py +525 -0
- vllm/platforms/__init__.py +290 -0
- vllm/platforms/cpu.py +205 -0
- vllm/platforms/cuda.py +461 -0
- vllm/platforms/hpu.py +105 -0
- vllm/platforms/interface.py +492 -0
- vllm/platforms/neuron.py +152 -0
- vllm/platforms/rocm.py +388 -0
- vllm/platforms/tpu.py +215 -0
- vllm/platforms/xpu.py +155 -0
- vllm/plugins/__init__.py +86 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +49 -0
- vllm/pooling_params.py +53 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +374 -0
- vllm/profiler/utils.py +147 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +82 -0
- vllm/prompt_adapter/models.py +357 -0
- vllm/prompt_adapter/request.py +36 -0
- vllm/prompt_adapter/utils.py +97 -0
- vllm/prompt_adapter/worker_manager.py +178 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +14 -0
- vllm/reasoning/abs_reasoning_parsers.py +191 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
- vllm/reasoning/granite_reasoning_parser.py +362 -0
- vllm/reasoning/qwen3_reasoning_parser.py +150 -0
- vllm/sampling_params.py +590 -0
- vllm/scalar_type.py +346 -0
- vllm/scripts.py +14 -0
- vllm/sequence.py +1567 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +505 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +98 -0
- vllm/spec_decode/medusa_worker.py +137 -0
- vllm/spec_decode/metrics.py +212 -0
- vllm/spec_decode/mlp_speculator_worker.py +93 -0
- vllm/spec_decode/mqa_scorer.py +159 -0
- vllm/spec_decode/multi_step_worker.py +422 -0
- vllm/spec_decode/ngram_worker.py +195 -0
- vllm/spec_decode/proposer_worker_base.py +58 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +195 -0
- vllm/spec_decode/spec_decode_worker.py +1325 -0
- vllm/spec_decode/target_model_runner.py +44 -0
- vllm/spec_decode/top1_proposer.py +274 -0
- vllm/spec_decode/util.py +276 -0
- vllm/test_utils.py +129 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6139 -0
- vllm/tracing.py +130 -0
- vllm/transformers_utils/__init__.py +23 -0
- vllm/transformers_utils/chat_templates/__init__.py +4 -0
- vllm/transformers_utils/chat_templates/registry.py +59 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +835 -0
- vllm/transformers_utils/configs/__init__.py +58 -0
- vllm/transformers_utils/configs/arctic.py +206 -0
- vllm/transformers_utils/configs/chatglm.py +71 -0
- vllm/transformers_utils/configs/cohere2.py +194 -0
- vllm/transformers_utils/configs/dbrx.py +279 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +215 -0
- vllm/transformers_utils/configs/eagle.py +84 -0
- vllm/transformers_utils/configs/exaone.py +189 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/h2ovl.py +15 -0
- vllm/transformers_utils/configs/internvl.py +53 -0
- vllm/transformers_utils/configs/jais.py +237 -0
- vllm/transformers_utils/configs/kimi_vl.py +36 -0
- vllm/transformers_utils/configs/medusa.py +62 -0
- vllm/transformers_utils/configs/minimax_text_01.py +69 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +70 -0
- vllm/transformers_utils/configs/mllama.py +30 -0
- vllm/transformers_utils/configs/mlp_speculator.py +67 -0
- vllm/transformers_utils/configs/moonvit.py +32 -0
- vllm/transformers_utils/configs/mpt.py +179 -0
- vllm/transformers_utils/configs/nemotron.py +204 -0
- vllm/transformers_utils/configs/nvlm_d.py +14 -0
- vllm/transformers_utils/configs/ovis.py +183 -0
- vllm/transformers_utils/configs/skyworkr1v.py +53 -0
- vllm/transformers_utils/configs/solar.py +246 -0
- vllm/transformers_utils/configs/telechat2.py +63 -0
- vllm/transformers_utils/configs/ultravox.py +107 -0
- vllm/transformers_utils/detokenizer.py +167 -0
- vllm/transformers_utils/detokenizer_utils.py +188 -0
- vllm/transformers_utils/processor.py +220 -0
- vllm/transformers_utils/processors/__init__.py +7 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
- vllm/transformers_utils/processors/ovis.py +419 -0
- vllm/transformers_utils/s3_utils.py +161 -0
- vllm/transformers_utils/tokenizer.py +301 -0
- vllm/transformers_utils/tokenizer_base.py +148 -0
- vllm/transformers_utils/tokenizer_group.py +119 -0
- vllm/transformers_utils/tokenizers/__init__.py +9 -0
- vllm/transformers_utils/tokenizers/mistral.py +490 -0
- vllm/transformers_utils/utils.py +98 -0
- vllm/triton_utils/__init__.py +13 -0
- vllm/triton_utils/importing.py +49 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +255 -0
- vllm/utils.py +2844 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/flash_attn.py +833 -0
- vllm/v1/attention/backends/flashinfer.py +639 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +926 -0
- vllm/v1/attention/backends/mla/flashmla.py +150 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +221 -0
- vllm/v1/attention/backends/mla/triton_mla.py +118 -0
- vllm/v1/attention/backends/pallas.py +235 -0
- vllm/v1/attention/backends/triton_attn.py +279 -0
- vllm/v1/attention/backends/utils.py +18 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +328 -0
- vllm/v1/core/encoder_cache_manager.py +149 -0
- vllm/v1/core/kv_cache_manager.py +372 -0
- vllm/v1/core/kv_cache_utils.py +748 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +143 -0
- vllm/v1/core/sched/output.py +153 -0
- vllm/v1/core/sched/scheduler.py +1015 -0
- vllm/v1/core/sched/utils.py +22 -0
- vllm/v1/core/single_type_kv_cache_manager.py +358 -0
- vllm/v1/engine/__init__.py +171 -0
- vllm/v1/engine/async_llm.py +546 -0
- vllm/v1/engine/core.py +801 -0
- vllm/v1/engine/core_client.py +1020 -0
- vllm/v1/engine/detokenizer.py +260 -0
- vllm/v1/engine/exceptions.py +16 -0
- vllm/v1/engine/llm_engine.py +316 -0
- vllm/v1/engine/logprobs.py +198 -0
- vllm/v1/engine/mm_input_cache.py +90 -0
- vllm/v1/engine/output_processor.py +427 -0
- vllm/v1/engine/parallel_sampling.py +132 -0
- vllm/v1/engine/processor.py +398 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +112 -0
- vllm/v1/executor/multiproc_executor.py +532 -0
- vllm/v1/executor/ray_distributed_executor.py +61 -0
- vllm/v1/kv_cache_interface.py +208 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +511 -0
- vllm/v1/metrics/ray_wrappers.py +120 -0
- vllm/v1/metrics/reader.py +245 -0
- vllm/v1/metrics/stats.py +238 -0
- vllm/v1/outputs.py +115 -0
- vllm/v1/request.py +191 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +38 -0
- vllm/v1/sample/ops/penalties.py +58 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
- vllm/v1/sample/rejection_sampler.py +630 -0
- vllm/v1/sample/sampler.py +270 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +123 -0
- vllm/v1/sample/tpu/sampler.py +144 -0
- vllm/v1/serial_utils.py +313 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +424 -0
- vllm/v1/spec_decode/medusa.py +61 -0
- vllm/v1/spec_decode/metadata.py +61 -0
- vllm/v1/spec_decode/metrics.py +177 -0
- vllm/v1/spec_decode/ngram_proposer.py +131 -0
- vllm/v1/spec_decode/utils.py +45 -0
- vllm/v1/structured_output/__init__.py +215 -0
- vllm/v1/structured_output/backend_guidance.py +244 -0
- vllm/v1/structured_output/backend_types.py +133 -0
- vllm/v1/structured_output/backend_xgrammar.py +317 -0
- vllm/v1/structured_output/request.py +85 -0
- vllm/v1/structured_output/utils.py +174 -0
- vllm/v1/utils.py +294 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +139 -0
- vllm/v1/worker/gpu_input_batch.py +680 -0
- vllm/v1/worker/gpu_model_runner.py +2084 -0
- vllm/v1/worker/gpu_worker.py +373 -0
- vllm/v1/worker/lora_model_runner_mixin.py +145 -0
- vllm/v1/worker/tpu_model_runner.py +1510 -0
- vllm/v1/worker/tpu_worker.py +276 -0
- vllm/v1/worker/utils.py +74 -0
- vllm/v1/worker/worker_base.py +64 -0
- vllm/version.py +40 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +144 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +400 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2319 -0
- vllm/worker/hpu_worker.py +483 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +281 -0
- vllm/worker/multi_step_hpu_worker.py +122 -0
- vllm/worker/multi_step_model_runner.py +910 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +107 -0
- vllm/worker/multi_step_worker.py +196 -0
- vllm/worker/neuron_model_runner.py +418 -0
- vllm/worker/neuron_worker.py +158 -0
- vllm/worker/neuronx_distributed_model_runner.py +136 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +908 -0
- vllm/worker/tpu_worker.py +336 -0
- vllm/worker/utils.py +52 -0
- vllm/worker/worker.py +574 -0
- vllm/worker/worker_base.py +644 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +185 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/METADATA +335 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/RECORD +1175 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/WHEEL +5 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2084 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
import copy
|
|
4
|
+
import gc
|
|
5
|
+
import time
|
|
6
|
+
import weakref
|
|
7
|
+
from typing import TYPE_CHECKING, Optional, Union
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import torch
|
|
11
|
+
import torch.distributed
|
|
12
|
+
import torch.nn as nn
|
|
13
|
+
|
|
14
|
+
from vllm.attention import AttentionType, get_attn_backend
|
|
15
|
+
from vllm.attention.backends.abstract import (AttentionBackend,
|
|
16
|
+
AttentionMetadataBuilder)
|
|
17
|
+
from vllm.attention.layer import Attention
|
|
18
|
+
from vllm.attention.utils.fa_utils import get_flash_attn_version
|
|
19
|
+
from vllm.config import (CompilationLevel, VllmConfig,
|
|
20
|
+
get_layers_from_vllm_config)
|
|
21
|
+
from vllm.distributed.kv_transfer import (get_kv_transfer_group,
|
|
22
|
+
has_kv_transfer_group)
|
|
23
|
+
from vllm.distributed.kv_transfer.kv_connector.v1 import KVConnectorBase_V1
|
|
24
|
+
from vllm.distributed.parallel_state import (
|
|
25
|
+
get_pp_group, get_tp_group, graph_capture,
|
|
26
|
+
prepare_communication_buffer_for_model)
|
|
27
|
+
from vllm.forward_context import get_forward_context, set_forward_context
|
|
28
|
+
from vllm.logger import init_logger
|
|
29
|
+
from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding
|
|
30
|
+
from vllm.model_executor.model_loader import TensorizerLoader, get_model
|
|
31
|
+
from vllm.multimodal import MULTIMODAL_REGISTRY
|
|
32
|
+
from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange
|
|
33
|
+
from vllm.multimodal.utils import group_mm_inputs_by_modality
|
|
34
|
+
from vllm.sampling_params import SamplingType
|
|
35
|
+
from vllm.sequence import IntermediateTensors
|
|
36
|
+
from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler,
|
|
37
|
+
GiB_bytes, LazyLoader, async_tensor_h2d, cdiv,
|
|
38
|
+
check_use_alibi, is_pin_memory_available)
|
|
39
|
+
from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
|
|
40
|
+
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
|
|
41
|
+
from vllm.v1.core.encoder_cache_manager import compute_encoder_budget
|
|
42
|
+
from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec,
|
|
43
|
+
KVCacheConfig, KVCacheSpec,
|
|
44
|
+
SlidingWindowSpec)
|
|
45
|
+
from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors,
|
|
46
|
+
ModelRunnerOutput)
|
|
47
|
+
from vllm.v1.sample.metadata import SamplingMetadata
|
|
48
|
+
from vllm.v1.sample.rejection_sampler import RejectionSampler
|
|
49
|
+
from vllm.v1.sample.sampler import Sampler
|
|
50
|
+
from vllm.v1.spec_decode.eagle import EagleProposer
|
|
51
|
+
from vllm.v1.spec_decode.medusa import MedusaProposer
|
|
52
|
+
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
|
|
53
|
+
from vllm.v1.spec_decode.ngram_proposer import NgramProposer
|
|
54
|
+
from vllm.v1.spec_decode.utils import is_spec_decode_supported
|
|
55
|
+
from vllm.v1.utils import bind_kv_cache
|
|
56
|
+
from vllm.v1.worker.block_table import BlockTable
|
|
57
|
+
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
|
|
58
|
+
from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
|
|
59
|
+
|
|
60
|
+
from .utils import (gather_mm_placeholders, sanity_check_mm_encoder_outputs,
|
|
61
|
+
scatter_mm_placeholders)
|
|
62
|
+
|
|
63
|
+
if TYPE_CHECKING:
|
|
64
|
+
import xgrammar as xgr
|
|
65
|
+
|
|
66
|
+
from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
|
|
67
|
+
from vllm.v1.core.sched.output import SchedulerOutput
|
|
68
|
+
else:
|
|
69
|
+
xgr = LazyLoader("xgr", globals(), "xgrammar")
|
|
70
|
+
|
|
71
|
+
logger = init_logger(__name__)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class GPUModelRunner(LoRAModelRunnerMixin):
|
|
75
|
+
|
|
76
|
+
def __init__(
|
|
77
|
+
self,
|
|
78
|
+
vllm_config: VllmConfig,
|
|
79
|
+
device: torch.device,
|
|
80
|
+
):
|
|
81
|
+
self.vllm_config = vllm_config
|
|
82
|
+
self.model_config = vllm_config.model_config
|
|
83
|
+
self.cache_config = vllm_config.cache_config
|
|
84
|
+
self.lora_config = vllm_config.lora_config
|
|
85
|
+
self.load_config = vllm_config.load_config
|
|
86
|
+
self.parallel_config = vllm_config.parallel_config
|
|
87
|
+
self.scheduler_config = vllm_config.scheduler_config
|
|
88
|
+
self.speculative_config = vllm_config.speculative_config
|
|
89
|
+
self.prompt_adapter_config = vllm_config.prompt_adapter_config
|
|
90
|
+
self.observability_config = vllm_config.observability_config
|
|
91
|
+
|
|
92
|
+
from vllm.model_executor.models.utils import set_cpu_offload_max_bytes
|
|
93
|
+
set_cpu_offload_max_bytes(
|
|
94
|
+
int(self.cache_config.cpu_offload_gb * 1024**3))
|
|
95
|
+
|
|
96
|
+
model_config = self.model_config
|
|
97
|
+
cache_config = self.cache_config
|
|
98
|
+
scheduler_config = self.scheduler_config
|
|
99
|
+
parallel_config = self.parallel_config
|
|
100
|
+
self.device = device
|
|
101
|
+
self.pin_memory = is_pin_memory_available()
|
|
102
|
+
self.dtype = self.model_config.dtype
|
|
103
|
+
if cache_config.cache_dtype == "auto":
|
|
104
|
+
self.kv_cache_dtype = self.dtype
|
|
105
|
+
else:
|
|
106
|
+
self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[
|
|
107
|
+
cache_config.cache_dtype]
|
|
108
|
+
|
|
109
|
+
self.is_multimodal_model = model_config.is_multimodal_model
|
|
110
|
+
self.max_model_len = model_config.max_model_len
|
|
111
|
+
self.max_num_tokens = scheduler_config.max_num_batched_tokens
|
|
112
|
+
self.max_num_reqs = scheduler_config.max_num_seqs
|
|
113
|
+
|
|
114
|
+
# Model-related.
|
|
115
|
+
self.num_query_heads = model_config.get_num_attention_heads(
|
|
116
|
+
parallel_config)
|
|
117
|
+
self.hidden_size = model_config.get_hidden_size()
|
|
118
|
+
self.attention_chunk_size = model_config.attention_chunk_size
|
|
119
|
+
|
|
120
|
+
self.cascade_attn_enabled = not self.model_config.disable_cascade_attn
|
|
121
|
+
|
|
122
|
+
# Multi-modal data support
|
|
123
|
+
self.mm_registry = MULTIMODAL_REGISTRY
|
|
124
|
+
self.uses_mrope = model_config.uses_mrope
|
|
125
|
+
|
|
126
|
+
encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
|
|
127
|
+
model_config=model_config,
|
|
128
|
+
scheduler_config=scheduler_config,
|
|
129
|
+
mm_registry=self.mm_registry,
|
|
130
|
+
)
|
|
131
|
+
self.max_num_encoder_input_tokens = encoder_compute_budget
|
|
132
|
+
self.encoder_cache_size = encoder_cache_size
|
|
133
|
+
|
|
134
|
+
# Sampler
|
|
135
|
+
self.sampler = Sampler()
|
|
136
|
+
|
|
137
|
+
# Lazy initializations
|
|
138
|
+
# self.model: nn.Module # Set after load_model
|
|
139
|
+
# Initialize in initialize_kv_cache
|
|
140
|
+
self.kv_caches: list[torch.Tensor] = []
|
|
141
|
+
self.attn_metadata_builders: list[AttentionMetadataBuilder] = []
|
|
142
|
+
self.attn_backends: list[type[AttentionBackend]] = []
|
|
143
|
+
# self.kv_cache_config: KVCacheConfig
|
|
144
|
+
# self.input_batch: InputBatch # Persistent batch.
|
|
145
|
+
|
|
146
|
+
# req_id -> (input_id -> encoder_output)
|
|
147
|
+
self.encoder_cache: dict[str, dict[int, torch.Tensor]] = {}
|
|
148
|
+
|
|
149
|
+
# Set up speculative decoding.
|
|
150
|
+
self.use_spec_decode = False
|
|
151
|
+
self.use_aux_hidden_state_outputs = False
|
|
152
|
+
if self.speculative_config:
|
|
153
|
+
self.use_spec_decode = True
|
|
154
|
+
|
|
155
|
+
# NOTE(Jiayi): currently we put the entire draft model on
|
|
156
|
+
# the last PP rank. This is not ideal if there are many
|
|
157
|
+
# layers in the draft model.
|
|
158
|
+
if get_pp_group().is_last_rank:
|
|
159
|
+
if self.speculative_config.method == "ngram":
|
|
160
|
+
self.drafter = NgramProposer(self.vllm_config)
|
|
161
|
+
elif self.speculative_config.use_eagle():
|
|
162
|
+
self.drafter = EagleProposer(self.vllm_config, self.device,
|
|
163
|
+
self) # type: ignore
|
|
164
|
+
if self.speculative_config.method == "eagle3":
|
|
165
|
+
self.use_aux_hidden_state_outputs = True
|
|
166
|
+
elif self.speculative_config.method == "medusa":
|
|
167
|
+
self.drafter = MedusaProposer(
|
|
168
|
+
vllm_config=self.vllm_config,
|
|
169
|
+
device=self.device) # type: ignore
|
|
170
|
+
else:
|
|
171
|
+
raise ValueError("Unknown speculative decoding method: "
|
|
172
|
+
f"{self.speculative_config.method}")
|
|
173
|
+
self.rejection_sampler = RejectionSampler()
|
|
174
|
+
|
|
175
|
+
# Request states.
|
|
176
|
+
self.requests: dict[str, CachedRequestState] = {}
|
|
177
|
+
|
|
178
|
+
self.input_batch = InputBatch(
|
|
179
|
+
max_num_reqs=self.max_num_reqs,
|
|
180
|
+
max_model_len=self.max_model_len,
|
|
181
|
+
max_num_batched_tokens=self.max_num_tokens,
|
|
182
|
+
device=self.device,
|
|
183
|
+
pin_memory=self.pin_memory,
|
|
184
|
+
vocab_size=self.model_config.get_vocab_size(),
|
|
185
|
+
block_size=self.cache_config.block_size,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
self.use_cuda_graph = (self.vllm_config.compilation_config.level
|
|
189
|
+
== CompilationLevel.PIECEWISE
|
|
190
|
+
and not self.model_config.enforce_eager)
|
|
191
|
+
# TODO(woosuk): Provide an option to tune the max cudagraph batch size.
|
|
192
|
+
# The convention is different.
|
|
193
|
+
# self.cudagraph_batch_sizes sorts in ascending order.
|
|
194
|
+
# The batch sizes in the config are in descending order.
|
|
195
|
+
self.cudagraph_batch_sizes = list(
|
|
196
|
+
reversed(
|
|
197
|
+
self.vllm_config.compilation_config.cudagraph_capture_sizes))
|
|
198
|
+
|
|
199
|
+
# Cache the device properties.
|
|
200
|
+
self.device_properties = torch.cuda.get_device_properties(self.device)
|
|
201
|
+
self.num_sms = self.device_properties.multi_processor_count
|
|
202
|
+
|
|
203
|
+
# Persistent buffers for CUDA graphs.
|
|
204
|
+
self.input_ids = torch.zeros(self.max_num_tokens,
|
|
205
|
+
dtype=torch.int32,
|
|
206
|
+
device=self.device)
|
|
207
|
+
self.positions = torch.zeros(self.max_num_tokens,
|
|
208
|
+
dtype=torch.int64,
|
|
209
|
+
device=self.device)
|
|
210
|
+
self.query_start_loc = torch.zeros(self.max_num_reqs + 1,
|
|
211
|
+
dtype=torch.int32,
|
|
212
|
+
device=self.device)
|
|
213
|
+
self.seq_lens = torch.zeros(self.max_num_reqs,
|
|
214
|
+
dtype=torch.int32,
|
|
215
|
+
device=self.device)
|
|
216
|
+
self.slot_mapping = torch.zeros(self.max_num_tokens,
|
|
217
|
+
dtype=torch.int64,
|
|
218
|
+
device=self.device)
|
|
219
|
+
|
|
220
|
+
# None in the first PP rank. The rest are set after load_model.
|
|
221
|
+
self.intermediate_tensors: Optional[IntermediateTensors] = None
|
|
222
|
+
|
|
223
|
+
# Only relevant for models using M-RoPE (e.g, Qwen2-VL)
|
|
224
|
+
if self.uses_mrope:
|
|
225
|
+
# NOTE: `mrope_positions` is implemented with one additional dummy
|
|
226
|
+
# position on purpose to make it non-contiguous so that it can work
|
|
227
|
+
# with torch compile.
|
|
228
|
+
# See detailed explanation in https://github.com/vllm-project/vllm/pull/12128#discussion_r1926431923
|
|
229
|
+
|
|
230
|
+
# NOTE: When M-RoPE is enabled, position ids are 3D regardless of
|
|
231
|
+
# the modality of inputs. For text-only inputs, each dimension has
|
|
232
|
+
# identical position IDs, making M-RoPE functionally equivalent to
|
|
233
|
+
# 1D-RoPE.
|
|
234
|
+
# See page 5 of https://arxiv.org/abs/2409.12191
|
|
235
|
+
self.mrope_positions = torch.zeros((3, self.max_num_tokens + 1),
|
|
236
|
+
dtype=torch.int64,
|
|
237
|
+
device=self.device)
|
|
238
|
+
self.mrope_positions_cpu = torch.zeros(
|
|
239
|
+
(3, self.max_num_tokens + 1),
|
|
240
|
+
dtype=torch.int64,
|
|
241
|
+
device="cpu",
|
|
242
|
+
pin_memory=self.pin_memory)
|
|
243
|
+
|
|
244
|
+
# Only relevant for models using ALiBi (e.g, MPT)
|
|
245
|
+
self.use_alibi = check_use_alibi(model_config)
|
|
246
|
+
|
|
247
|
+
self.inputs_embeds = torch.zeros(
|
|
248
|
+
(self.max_num_tokens, self.hidden_size),
|
|
249
|
+
dtype=self.dtype,
|
|
250
|
+
device=self.device)
|
|
251
|
+
|
|
252
|
+
# OPTIMIZATION: Cache the tensors rather than creating them every step.
|
|
253
|
+
# Keep in int64 to avoid overflow with long context
|
|
254
|
+
self.arange_np = np.arange(max(self.max_num_reqs + 1,
|
|
255
|
+
self.max_model_len,
|
|
256
|
+
self.max_num_tokens),
|
|
257
|
+
dtype=np.int64)
|
|
258
|
+
# NOTE(woosuk): These tensors are "stateless", i.e., they are literally
|
|
259
|
+
# a faster version of creating a new tensor every time. Thus, we should
|
|
260
|
+
# not make any assumptions about the values in these tensors.
|
|
261
|
+
self.input_ids_cpu = torch.zeros(self.max_num_tokens,
|
|
262
|
+
dtype=torch.int32,
|
|
263
|
+
device="cpu",
|
|
264
|
+
pin_memory=self.pin_memory)
|
|
265
|
+
self.positions_cpu = torch.zeros(self.max_num_tokens,
|
|
266
|
+
dtype=torch.int64,
|
|
267
|
+
device="cpu",
|
|
268
|
+
pin_memory=self.pin_memory)
|
|
269
|
+
self.positions_np = self.positions_cpu.numpy()
|
|
270
|
+
self.query_start_loc_cpu = torch.zeros(self.max_num_reqs + 1,
|
|
271
|
+
dtype=torch.int32,
|
|
272
|
+
device="cpu",
|
|
273
|
+
pin_memory=self.pin_memory)
|
|
274
|
+
self.query_start_loc_np = self.query_start_loc_cpu.numpy()
|
|
275
|
+
self.seq_lens_cpu = torch.zeros(self.max_num_reqs,
|
|
276
|
+
dtype=torch.int32,
|
|
277
|
+
device="cpu",
|
|
278
|
+
pin_memory=self.pin_memory)
|
|
279
|
+
self.seq_lens_np = self.seq_lens_cpu.numpy()
|
|
280
|
+
|
|
281
|
+
def _may_reorder_batch(self, scheduler_output: "SchedulerOutput") -> bool:
|
|
282
|
+
"""
|
|
283
|
+
Update the order of requests in the batch based on the attention
|
|
284
|
+
backend's needs. For example, some attention backends (namely MLA) may
|
|
285
|
+
want to separate requests based on if the attention computation will be
|
|
286
|
+
compute-bound or memory-bound.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
scheduler_output: The scheduler output.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
True if the batch was reordered, False otherwise.
|
|
293
|
+
"""
|
|
294
|
+
batch_reordered = self.attn_metadata_builders[0].reorder_batch(
|
|
295
|
+
self.input_batch, scheduler_output)
|
|
296
|
+
|
|
297
|
+
# For models with multiple KV cache groups, the groups should agree on
|
|
298
|
+
# the same order of requests. We ensure this by only allowing the first
|
|
299
|
+
# group to reorder the batch and asserting that all other groups do not
|
|
300
|
+
# reorder the batch.
|
|
301
|
+
for i in range(1, len(self.kv_cache_config.kv_cache_groups)):
|
|
302
|
+
assert not self.attn_metadata_builders[i].reorder_batch(
|
|
303
|
+
self.input_batch, scheduler_output)
|
|
304
|
+
return batch_reordered
|
|
305
|
+
|
|
306
|
+
def _update_states(self, scheduler_output: "SchedulerOutput") -> None:
|
|
307
|
+
"""Update the cached states and the persistent batch with the scheduler
|
|
308
|
+
output.
|
|
309
|
+
|
|
310
|
+
The updated states are used by the `_prepare_inputs` function to create
|
|
311
|
+
the input GPU tensors for the model.
|
|
312
|
+
|
|
313
|
+
The SamplingMetadata is updated and copied to the GPU if there is a
|
|
314
|
+
new/resumed/paused/finished request in the batch.
|
|
315
|
+
"""
|
|
316
|
+
# Remove finished requests from the cached states.
|
|
317
|
+
for req_id in scheduler_output.finished_req_ids:
|
|
318
|
+
self.requests.pop(req_id, None)
|
|
319
|
+
self.encoder_cache.pop(req_id, None)
|
|
320
|
+
# Remove the finished requests from the persistent batch.
|
|
321
|
+
# NOTE(woosuk): There could be an edge case where finished_req_ids and
|
|
322
|
+
# scheduled_req_ids overlap. This happens when a request is aborted and
|
|
323
|
+
# then resubmitted with the same ID. In this case, we treat them as two
|
|
324
|
+
# distinct requests - clearing the cached states for the first request
|
|
325
|
+
# and handling the second as a new request.
|
|
326
|
+
removed_req_indices: list[int] = []
|
|
327
|
+
for req_id in scheduler_output.finished_req_ids:
|
|
328
|
+
req_index = self.input_batch.remove_request(req_id)
|
|
329
|
+
if req_index is not None:
|
|
330
|
+
removed_req_indices.append(req_index)
|
|
331
|
+
|
|
332
|
+
# Free the cached encoder outputs.
|
|
333
|
+
for req_id, input_id in scheduler_output.free_encoder_input_ids:
|
|
334
|
+
encoder_outputs = self.encoder_cache.get(req_id)
|
|
335
|
+
if encoder_outputs is not None:
|
|
336
|
+
encoder_outputs.pop(input_id, None)
|
|
337
|
+
if not encoder_outputs:
|
|
338
|
+
self.encoder_cache.pop(req_id, None)
|
|
339
|
+
|
|
340
|
+
# Remove the unscheduled requests from the persistent batch.
|
|
341
|
+
# NOTE(woosuk): The unscheduled requests are either preempted requests
|
|
342
|
+
# or running requests that are not scheduled in this step. We remove
|
|
343
|
+
# them from the persistent batch but keep their cached states since
|
|
344
|
+
# they will be scheduled again sometime in the future.
|
|
345
|
+
scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
|
|
346
|
+
cached_req_ids = self.input_batch.req_id_to_index.keys()
|
|
347
|
+
unscheduled_req_ids = cached_req_ids - scheduled_req_ids
|
|
348
|
+
# NOTE(woosuk): The persistent batch optimization assumes that
|
|
349
|
+
# consecutive batches contain mostly the same requests. If batches
|
|
350
|
+
# have low request overlap (e.g., alternating between two distinct
|
|
351
|
+
# sets of requests), this optimization becomes very inefficient.
|
|
352
|
+
for req_id in unscheduled_req_ids:
|
|
353
|
+
req_index = self.input_batch.remove_request(req_id)
|
|
354
|
+
assert req_index is not None
|
|
355
|
+
removed_req_indices.append(req_index)
|
|
356
|
+
|
|
357
|
+
req_ids_to_add: list[str] = []
|
|
358
|
+
# Add new requests to the cached states.
|
|
359
|
+
for new_req_data in scheduler_output.scheduled_new_reqs:
|
|
360
|
+
req_id = new_req_data.req_id
|
|
361
|
+
sampling_params = new_req_data.sampling_params
|
|
362
|
+
if sampling_params.sampling_type == SamplingType.RANDOM_SEED:
|
|
363
|
+
generator = torch.Generator(device=self.device)
|
|
364
|
+
generator.manual_seed(sampling_params.seed)
|
|
365
|
+
else:
|
|
366
|
+
generator = None
|
|
367
|
+
|
|
368
|
+
self.requests[req_id] = CachedRequestState(
|
|
369
|
+
req_id=req_id,
|
|
370
|
+
prompt_token_ids=new_req_data.prompt_token_ids,
|
|
371
|
+
mm_inputs=new_req_data.mm_inputs,
|
|
372
|
+
mm_positions=new_req_data.mm_positions,
|
|
373
|
+
sampling_params=sampling_params,
|
|
374
|
+
generator=generator,
|
|
375
|
+
block_ids=new_req_data.block_ids,
|
|
376
|
+
num_computed_tokens=new_req_data.num_computed_tokens,
|
|
377
|
+
output_token_ids=[],
|
|
378
|
+
lora_request=new_req_data.lora_request,
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
# Only relevant for models using M-RoPE (e.g, Qwen2-VL)
|
|
382
|
+
if self.uses_mrope:
|
|
383
|
+
image_grid_thw = []
|
|
384
|
+
video_grid_thw = []
|
|
385
|
+
second_per_grid_ts = []
|
|
386
|
+
audio_feature_lengths = []
|
|
387
|
+
use_audio_in_video = False
|
|
388
|
+
for mm_input in self.requests[req_id].mm_inputs:
|
|
389
|
+
if mm_input.get("image_grid_thw") is not None:
|
|
390
|
+
image_grid_thw.extend(
|
|
391
|
+
mm_input["image_grid_thw"].tolist())
|
|
392
|
+
if mm_input.get("video_grid_thw") is not None:
|
|
393
|
+
video_grid_thw.extend(
|
|
394
|
+
mm_input["video_grid_thw"].tolist())
|
|
395
|
+
if mm_input.get("second_per_grid_ts") is not None:
|
|
396
|
+
second_per_grid_ts.extend(
|
|
397
|
+
mm_input["second_per_grid_ts"])
|
|
398
|
+
if mm_input.get("audio_feature_lengths") is not None:
|
|
399
|
+
audio_feature_lengths.extend(
|
|
400
|
+
mm_input["audio_feature_lengths"])
|
|
401
|
+
if mm_input.get("use_audio_in_video") is True:
|
|
402
|
+
use_audio_in_video = True
|
|
403
|
+
|
|
404
|
+
hf_config = self.model_config.hf_config
|
|
405
|
+
|
|
406
|
+
self.requests[req_id].mrope_positions, \
|
|
407
|
+
self.requests[req_id].mrope_position_delta = \
|
|
408
|
+
MRotaryEmbedding.get_input_positions_tensor(
|
|
409
|
+
self.requests[req_id].prompt_token_ids,
|
|
410
|
+
hf_config=hf_config,
|
|
411
|
+
image_grid_thw=image_grid_thw,
|
|
412
|
+
video_grid_thw=video_grid_thw,
|
|
413
|
+
second_per_grid_ts=second_per_grid_ts,
|
|
414
|
+
audio_feature_lengths=audio_feature_lengths,
|
|
415
|
+
use_audio_in_video=use_audio_in_video,
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
req_ids_to_add.append(req_id)
|
|
419
|
+
|
|
420
|
+
# Update the states of the running/resumed requests.
|
|
421
|
+
for req_data in scheduler_output.scheduled_cached_reqs:
|
|
422
|
+
req_id = req_data.req_id
|
|
423
|
+
req_state = self.requests[req_id]
|
|
424
|
+
|
|
425
|
+
# Update the cached states.
|
|
426
|
+
num_computed_tokens = req_data.num_computed_tokens
|
|
427
|
+
req_state.num_computed_tokens = num_computed_tokens
|
|
428
|
+
# Add the sampled token(s) from the previous step (if any).
|
|
429
|
+
# This doesn't include "unverified" tokens like spec decode tokens.
|
|
430
|
+
num_new_tokens = (num_computed_tokens +
|
|
431
|
+
len(req_data.new_token_ids) -
|
|
432
|
+
req_state.num_tokens)
|
|
433
|
+
if num_new_tokens == 1:
|
|
434
|
+
# Avoid slicing list in most common case.
|
|
435
|
+
req_state.output_token_ids.append(req_data.new_token_ids[-1])
|
|
436
|
+
elif num_new_tokens > 0:
|
|
437
|
+
req_state.output_token_ids.extend(
|
|
438
|
+
req_data.new_token_ids[-num_new_tokens:])
|
|
439
|
+
# Update the block IDs.
|
|
440
|
+
if not req_data.resumed_from_preemption:
|
|
441
|
+
# Append the new blocks to the existing block IDs.
|
|
442
|
+
for i in range(len(self.kv_cache_config.kv_cache_groups)):
|
|
443
|
+
req_state.block_ids[i].extend(req_data.new_block_ids[i])
|
|
444
|
+
else:
|
|
445
|
+
# The request is resumed from preemption.
|
|
446
|
+
# Replace the existing block IDs with the new ones.
|
|
447
|
+
req_state.block_ids = req_data.new_block_ids
|
|
448
|
+
|
|
449
|
+
req_index = self.input_batch.req_id_to_index.get(req_id)
|
|
450
|
+
if req_index is None:
|
|
451
|
+
# The request is not in the persistent batch.
|
|
452
|
+
# The request was either preempted and resumed later, or was not
|
|
453
|
+
# scheduled in the previous step and needs to be added again.
|
|
454
|
+
req_ids_to_add.append(req_id)
|
|
455
|
+
continue
|
|
456
|
+
|
|
457
|
+
# Update the persistent batch.
|
|
458
|
+
self.input_batch.num_computed_tokens_cpu[req_index] = (
|
|
459
|
+
num_computed_tokens)
|
|
460
|
+
self.input_batch.block_table.append_row(req_data.new_block_ids,
|
|
461
|
+
req_index)
|
|
462
|
+
# Add new_token_ids to token_ids_cpu.
|
|
463
|
+
start_token_index = num_computed_tokens
|
|
464
|
+
end_token_index = num_computed_tokens + len(req_data.new_token_ids)
|
|
465
|
+
self.input_batch.token_ids_cpu[
|
|
466
|
+
req_index,
|
|
467
|
+
start_token_index:end_token_index] = req_data.new_token_ids
|
|
468
|
+
self.input_batch.num_tokens_no_spec[req_index] = end_token_index
|
|
469
|
+
# Add spec_token_ids to token_ids_cpu.
|
|
470
|
+
spec_token_ids = scheduler_output.scheduled_spec_decode_tokens.get(
|
|
471
|
+
req_id, ())
|
|
472
|
+
if spec_token_ids:
|
|
473
|
+
start_index = end_token_index
|
|
474
|
+
end_token_index += len(spec_token_ids)
|
|
475
|
+
self.input_batch.token_ids_cpu[
|
|
476
|
+
req_index, start_index:end_token_index] = spec_token_ids
|
|
477
|
+
# NOTE(woosuk): `num_tokens` here may include spec decode tokens.
|
|
478
|
+
self.input_batch.num_tokens[req_index] = end_token_index
|
|
479
|
+
|
|
480
|
+
# Check if the batch has changed. If not, we can skip copying the
|
|
481
|
+
# sampling metadata from CPU to GPU.
|
|
482
|
+
batch_changed = len(removed_req_indices) > 0 or len(req_ids_to_add) > 0
|
|
483
|
+
|
|
484
|
+
# Add the new or resumed requests to the persistent batch.
|
|
485
|
+
# The smaller empty indices are filled first.
|
|
486
|
+
removed_req_indices.sort(reverse=True)
|
|
487
|
+
for req_id in req_ids_to_add:
|
|
488
|
+
req_state = self.requests[req_id]
|
|
489
|
+
if removed_req_indices:
|
|
490
|
+
# Fill the empty index.
|
|
491
|
+
req_index = removed_req_indices.pop()
|
|
492
|
+
else:
|
|
493
|
+
# Append to the end.
|
|
494
|
+
req_index = None
|
|
495
|
+
self.input_batch.add_request(req_state, req_index)
|
|
496
|
+
|
|
497
|
+
# Condense the batched states if there are empty indices.
|
|
498
|
+
if removed_req_indices:
|
|
499
|
+
self.input_batch.condense(removed_req_indices)
|
|
500
|
+
|
|
501
|
+
batch_reordered = self._may_reorder_batch(scheduler_output)
|
|
502
|
+
|
|
503
|
+
if batch_changed or batch_reordered:
|
|
504
|
+
self.input_batch.refresh_sampling_metadata()
|
|
505
|
+
|
|
506
|
+
def _prepare_inputs(
|
|
507
|
+
self,
|
|
508
|
+
scheduler_output: "SchedulerOutput",
|
|
509
|
+
) -> tuple[dict[str, FlashAttentionMetadata], torch.Tensor,
|
|
510
|
+
Optional[SpecDecodeMetadata]]:
|
|
511
|
+
total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
|
|
512
|
+
assert total_num_scheduled_tokens > 0
|
|
513
|
+
num_reqs = self.input_batch.num_reqs
|
|
514
|
+
assert num_reqs > 0
|
|
515
|
+
|
|
516
|
+
# OPTIMIZATION: Start copying the block table first.
|
|
517
|
+
# This way, we can overlap the copy with the following CPU operations.
|
|
518
|
+
self.input_batch.block_table.commit(num_reqs)
|
|
519
|
+
|
|
520
|
+
# Get the number of scheduled tokens for each request.
|
|
521
|
+
req_ids = self.input_batch.req_ids
|
|
522
|
+
tokens = [scheduler_output.num_scheduled_tokens[i] for i in req_ids]
|
|
523
|
+
num_scheduled_tokens = np.array(tokens, dtype=np.int32)
|
|
524
|
+
max_num_scheduled_tokens = max(tokens)
|
|
525
|
+
|
|
526
|
+
# Get request indices.
|
|
527
|
+
# E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
|
|
528
|
+
req_indices = np.repeat(self.arange_np[:num_reqs],
|
|
529
|
+
num_scheduled_tokens)
|
|
530
|
+
|
|
531
|
+
# Get batched arange.
|
|
532
|
+
# E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
|
533
|
+
# Equivalent to but faster than:
|
|
534
|
+
# np.concatenate([np.arange(n) for n in num_scheduled_tokens])
|
|
535
|
+
# Step 1. [2, 5, 3] -> [2, 7, 10]
|
|
536
|
+
cu_num_tokens = np.cumsum(num_scheduled_tokens)
|
|
537
|
+
# Step 2. [2, 7, 10] -> [0, 0, 2, 2, 2, 2, 2, 7, 7, 7]
|
|
538
|
+
cumsums_offsets = np.repeat(cu_num_tokens - num_scheduled_tokens,
|
|
539
|
+
num_scheduled_tokens)
|
|
540
|
+
# Step 3. [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
|
541
|
+
arange = self.arange_np[:total_num_scheduled_tokens] - cumsums_offsets
|
|
542
|
+
|
|
543
|
+
# Get positions.
|
|
544
|
+
positions_np = self.positions_np[:total_num_scheduled_tokens]
|
|
545
|
+
np.add(self.input_batch.num_computed_tokens_cpu[req_indices],
|
|
546
|
+
arange,
|
|
547
|
+
out=positions_np)
|
|
548
|
+
|
|
549
|
+
# Calculate M-RoPE positions.
|
|
550
|
+
# Only relevant for models using M-RoPE (e.g, Qwen2-VL)
|
|
551
|
+
if self.uses_mrope:
|
|
552
|
+
self._calc_mrope_positions(scheduler_output)
|
|
553
|
+
|
|
554
|
+
# Get token indices.
|
|
555
|
+
# E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
|
556
|
+
# -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
|
|
557
|
+
# where M is the max_model_len.
|
|
558
|
+
token_indices = (positions_np +
|
|
559
|
+
req_indices * self.input_batch.token_ids_cpu.shape[1])
|
|
560
|
+
|
|
561
|
+
# NOTE(woosuk): We use torch.index_select instead of np.take here
|
|
562
|
+
# because torch.index_select is much faster than np.take for large
|
|
563
|
+
# tensors.
|
|
564
|
+
torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(),
|
|
565
|
+
0,
|
|
566
|
+
torch.from_numpy(token_indices),
|
|
567
|
+
out=self.input_ids_cpu[:total_num_scheduled_tokens])
|
|
568
|
+
|
|
569
|
+
# Calculate the slot mapping for each KV cache group.
|
|
570
|
+
for kv_cache_group_id, kv_cache_group_spec in enumerate(
|
|
571
|
+
self.kv_cache_config.kv_cache_groups):
|
|
572
|
+
block_size = kv_cache_group_spec.kv_cache_spec.block_size
|
|
573
|
+
block_table: BlockTable = self.input_batch.block_table[
|
|
574
|
+
kv_cache_group_id]
|
|
575
|
+
# E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
|
576
|
+
# -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1]
|
|
577
|
+
# where K is the max_num_blocks_per_req and the block size is 2.
|
|
578
|
+
# NOTE(woosuk): We can't simply use `token_indices // block_size`
|
|
579
|
+
# here because M (max_model_len) is not necessarily divisible by
|
|
580
|
+
# block_size.
|
|
581
|
+
block_table_indices = (
|
|
582
|
+
req_indices * block_table.max_num_blocks_per_req +
|
|
583
|
+
positions_np // block_size)
|
|
584
|
+
block_table_cpu = block_table.get_cpu_tensor()
|
|
585
|
+
block_numbers = block_table_cpu.flatten(
|
|
586
|
+
)[block_table_indices].numpy()
|
|
587
|
+
block_offsets = positions_np % block_size
|
|
588
|
+
np.add(
|
|
589
|
+
block_numbers * block_size,
|
|
590
|
+
block_offsets,
|
|
591
|
+
out=block_table.slot_mapping_np[:total_num_scheduled_tokens])
|
|
592
|
+
|
|
593
|
+
# Prepare the attention metadata.
|
|
594
|
+
self.query_start_loc_np[0] = 0
|
|
595
|
+
self.query_start_loc_np[1:num_reqs + 1] = cu_num_tokens
|
|
596
|
+
|
|
597
|
+
self.seq_lens_np[:num_reqs] = (
|
|
598
|
+
self.input_batch.num_computed_tokens_cpu[:num_reqs] +
|
|
599
|
+
num_scheduled_tokens)
|
|
600
|
+
|
|
601
|
+
# Copy the tensors to the GPU.
|
|
602
|
+
self.input_ids[:total_num_scheduled_tokens].copy_(
|
|
603
|
+
self.input_ids_cpu[:total_num_scheduled_tokens], non_blocking=True)
|
|
604
|
+
if self.uses_mrope:
|
|
605
|
+
# Only relevant for models using M-RoPE (e.g, Qwen2-VL)
|
|
606
|
+
self.mrope_positions[:, :total_num_scheduled_tokens].copy_(
|
|
607
|
+
self.mrope_positions_cpu[:, :total_num_scheduled_tokens],
|
|
608
|
+
non_blocking=True)
|
|
609
|
+
else:
|
|
610
|
+
# Common case (1D positions)
|
|
611
|
+
self.positions[:total_num_scheduled_tokens].copy_(
|
|
612
|
+
self.positions_cpu[:total_num_scheduled_tokens],
|
|
613
|
+
non_blocking=True)
|
|
614
|
+
|
|
615
|
+
self.query_start_loc[:num_reqs + 1].copy_(
|
|
616
|
+
self.query_start_loc_cpu[:num_reqs + 1], non_blocking=True)
|
|
617
|
+
self.seq_lens[:num_reqs].copy_(self.seq_lens_cpu[:num_reqs],
|
|
618
|
+
non_blocking=True)
|
|
619
|
+
|
|
620
|
+
# Fill unused with -1. Needed for reshape_and_cache
|
|
621
|
+
self.seq_lens[num_reqs:].fill_(0)
|
|
622
|
+
self.query_start_loc[num_reqs + 1:].fill_(-1)
|
|
623
|
+
|
|
624
|
+
query_start_loc = self.query_start_loc[:num_reqs + 1]
|
|
625
|
+
seq_lens = self.seq_lens[:num_reqs]
|
|
626
|
+
|
|
627
|
+
common_attn_metadata = CommonAttentionMetadata(
|
|
628
|
+
query_start_loc=query_start_loc, seq_lens=seq_lens)
|
|
629
|
+
|
|
630
|
+
attn_metadata: dict[str, FlashAttentionMetadata] = {}
|
|
631
|
+
# Prepare the attention metadata for each KV cache group and make layers
|
|
632
|
+
# in the same group share the same metadata.
|
|
633
|
+
for kv_cache_group_id, kv_cache_group_spec in enumerate(
|
|
634
|
+
self.kv_cache_config.kv_cache_groups):
|
|
635
|
+
|
|
636
|
+
# Prepare for cascade attention if enabled & beneficial.
|
|
637
|
+
common_prefix_len = 0
|
|
638
|
+
if self.cascade_attn_enabled:
|
|
639
|
+
common_prefix_len = self._compute_cascade_attn_prefix_len(
|
|
640
|
+
num_scheduled_tokens,
|
|
641
|
+
scheduler_output.
|
|
642
|
+
num_common_prefix_blocks[kv_cache_group_id],
|
|
643
|
+
kv_cache_group_spec.kv_cache_spec,
|
|
644
|
+
self.attn_metadata_builders[kv_cache_group_id],
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
attn_metadata_i = (
|
|
648
|
+
self.attn_metadata_builders[kv_cache_group_id].build(
|
|
649
|
+
num_reqs=num_reqs,
|
|
650
|
+
num_actual_tokens=total_num_scheduled_tokens,
|
|
651
|
+
max_query_len=max_num_scheduled_tokens,
|
|
652
|
+
common_prefix_len=common_prefix_len,
|
|
653
|
+
common_attn_metadata=common_attn_metadata))
|
|
654
|
+
for layer_name in kv_cache_group_spec.layer_names:
|
|
655
|
+
attn_metadata[layer_name] = attn_metadata_i
|
|
656
|
+
|
|
657
|
+
use_spec_decode = len(
|
|
658
|
+
scheduler_output.scheduled_spec_decode_tokens) > 0
|
|
659
|
+
if not use_spec_decode:
|
|
660
|
+
# NOTE(woosuk): Due to chunked prefills, the batch may contain
|
|
661
|
+
# partial requests. While we should not sample any token
|
|
662
|
+
# from these partial requests, we do so for simplicity.
|
|
663
|
+
# We will ignore the sampled tokens from the partial requests.
|
|
664
|
+
# TODO: Support prompt logprobs.
|
|
665
|
+
logits_indices = query_start_loc[1:] - 1
|
|
666
|
+
spec_decode_metadata = None
|
|
667
|
+
else:
|
|
668
|
+
# Get the number of draft tokens for each request.
|
|
669
|
+
# Iterate over the dictionary rather than all requests since not all
|
|
670
|
+
# requests have draft tokens.
|
|
671
|
+
num_draft_tokens = np.zeros(num_reqs, dtype=np.int32)
|
|
672
|
+
for req_id, draft_token_ids in (
|
|
673
|
+
scheduler_output.scheduled_spec_decode_tokens.items()):
|
|
674
|
+
req_idx = self.input_batch.req_id_to_index[req_id]
|
|
675
|
+
num_draft_tokens[req_idx] = len(draft_token_ids)
|
|
676
|
+
|
|
677
|
+
spec_decode_metadata = self._calc_spec_decode_metadata(
|
|
678
|
+
num_draft_tokens, cu_num_tokens)
|
|
679
|
+
logits_indices = spec_decode_metadata.logits_indices
|
|
680
|
+
|
|
681
|
+
# Hot-Swap lora model
|
|
682
|
+
if self.lora_config:
|
|
683
|
+
self.set_active_loras(self.input_batch, num_scheduled_tokens)
|
|
684
|
+
|
|
685
|
+
return attn_metadata, logits_indices, spec_decode_metadata
|
|
686
|
+
|
|
687
|
+
def _compute_cascade_attn_prefix_len(
|
|
688
|
+
self,
|
|
689
|
+
num_scheduled_tokens: np.ndarray,
|
|
690
|
+
num_common_prefix_blocks: int,
|
|
691
|
+
kv_cache_spec: KVCacheSpec,
|
|
692
|
+
attn_metadata_builder: AttentionMetadataBuilder,
|
|
693
|
+
) -> int:
|
|
694
|
+
"""Compute the length of the common prefix for cascade attention.
|
|
695
|
+
|
|
696
|
+
NOTE(woosuk): The common prefix length returned by this function
|
|
697
|
+
represents the length used specifically for cascade attention, not the
|
|
698
|
+
actual number of tokens shared between requests. When cascade attention
|
|
699
|
+
is disabled (use_cascade=False), this function returns 0 even if
|
|
700
|
+
requests share common tokens. Additionally, the common prefix length is
|
|
701
|
+
truncated to a multiple of the block size and may be further truncated
|
|
702
|
+
due to implementation details explained below.
|
|
703
|
+
|
|
704
|
+
Args:
|
|
705
|
+
num_scheduled_tokens: Number of tokens scheduled per request.
|
|
706
|
+
num_common_prefix_blocks: Number of shared KV cache blocks.
|
|
707
|
+
|
|
708
|
+
Returns:
|
|
709
|
+
int: Length of common prefix in tokens.
|
|
710
|
+
"""
|
|
711
|
+
common_prefix_len = num_common_prefix_blocks * kv_cache_spec.block_size
|
|
712
|
+
if common_prefix_len == 0:
|
|
713
|
+
# Common case.
|
|
714
|
+
return 0
|
|
715
|
+
|
|
716
|
+
# NOTE(woosuk): Cascade attention uses two attention kernels: one
|
|
717
|
+
# for the common prefix and the other for the rest. For the first
|
|
718
|
+
# kernel, we concatenate all the query tokens (possibly from
|
|
719
|
+
# different requests) and treat them as if they are from the same
|
|
720
|
+
# request. Then, we use bi-directional attention to process the
|
|
721
|
+
# common prefix in the KV cache. Importantly, this means that the
|
|
722
|
+
# first kernel does not do any masking.
|
|
723
|
+
|
|
724
|
+
# Consider the following example:
|
|
725
|
+
# Request 1's input query: [D, E, X]
|
|
726
|
+
# Request 1's kv cache: [A, B, C, D, E, X]
|
|
727
|
+
# Request 1's num_computed_tokens: 3 (i.e., [A, B, C])
|
|
728
|
+
# Request 2's input query: [E, Y]
|
|
729
|
+
# Request 2's kv cache: [A, B, C, D, E, Y]
|
|
730
|
+
# Request 2's num_computed_tokens: 4 (i.e., [A, B, C, D])
|
|
731
|
+
|
|
732
|
+
# If we use [A, B, C, D, E] as the common prefix, then the
|
|
733
|
+
# first kernel will compute the bi-directional attention between
|
|
734
|
+
# input query [D, E, X, E, Y] and common prefix [A, B, C, D, E].
|
|
735
|
+
# However, this is wrong because D in Request 1 should not attend to
|
|
736
|
+
# E in the common prefix (i.e., we need masking).
|
|
737
|
+
# To avoid this, [A, B, C, D] should be the common prefix.
|
|
738
|
+
# That is, the common prefix should be capped by the minimum
|
|
739
|
+
# num_computed_tokens among the requests, and plus one to include
|
|
740
|
+
# the first token of the query.
|
|
741
|
+
|
|
742
|
+
# In practice, we use [A, B, C] as the common prefix, instead of
|
|
743
|
+
# [A, B, C, D] (i.e., the common prefix is capped by the minimum
|
|
744
|
+
# num_computed_tokens, without plus one).
|
|
745
|
+
# This is because of an implementation detail: We want to always
|
|
746
|
+
# use two kernels for cascade attention. Let's imagine:
|
|
747
|
+
# Request 3's input query: [D]
|
|
748
|
+
# Request 3's kv cache: [A, B, C, D]
|
|
749
|
+
# Request 3's num_computed_tokens: 3 (i.e., [A, B, C])
|
|
750
|
+
# If we use [A, B, C, D] as the common prefix for Request 1-3,
|
|
751
|
+
# then Request 3 will be processed only by the first kernel,
|
|
752
|
+
# and the second kernel will get an empty input. While this is not
|
|
753
|
+
# a fundamental problem, our current implementation does not support
|
|
754
|
+
# this case.
|
|
755
|
+
num_reqs = len(num_scheduled_tokens)
|
|
756
|
+
common_prefix_len = min(
|
|
757
|
+
common_prefix_len,
|
|
758
|
+
self.input_batch.num_computed_tokens_cpu[:num_reqs].min())
|
|
759
|
+
# common_prefix_len should be a multiple of the block size.
|
|
760
|
+
common_prefix_len = (common_prefix_len // kv_cache_spec.block_size *
|
|
761
|
+
kv_cache_spec.block_size)
|
|
762
|
+
use_sliding_window = (isinstance(kv_cache_spec, SlidingWindowSpec) or
|
|
763
|
+
(isinstance(kv_cache_spec, FullAttentionSpec)
|
|
764
|
+
and kv_cache_spec.sliding_window is not None))
|
|
765
|
+
assert isinstance(kv_cache_spec, AttentionSpec)
|
|
766
|
+
use_cascade = attn_metadata_builder.use_cascade_attention(
|
|
767
|
+
common_prefix_len=common_prefix_len,
|
|
768
|
+
query_lens=num_scheduled_tokens,
|
|
769
|
+
num_query_heads=self.num_query_heads,
|
|
770
|
+
num_kv_heads=kv_cache_spec.num_kv_heads,
|
|
771
|
+
use_alibi=self.use_alibi,
|
|
772
|
+
use_sliding_window=use_sliding_window,
|
|
773
|
+
num_sms=self.num_sms,
|
|
774
|
+
)
|
|
775
|
+
return common_prefix_len if use_cascade else 0
|
|
776
|
+
|
|
777
|
+
def _calc_mrope_positions(self, scheduler_output: "SchedulerOutput"):
|
|
778
|
+
mrope_pos_ptr = 0
|
|
779
|
+
for index, req_id in enumerate(self.input_batch.req_ids):
|
|
780
|
+
req = self.requests[req_id]
|
|
781
|
+
assert req.mrope_positions is not None
|
|
782
|
+
|
|
783
|
+
num_computed_tokens = \
|
|
784
|
+
self.input_batch.num_computed_tokens_cpu[index]
|
|
785
|
+
num_scheduled_tokens = \
|
|
786
|
+
scheduler_output.num_scheduled_tokens[req_id]
|
|
787
|
+
num_prompt_tokens = len(req.prompt_token_ids)
|
|
788
|
+
|
|
789
|
+
if num_computed_tokens + num_scheduled_tokens > num_prompt_tokens:
|
|
790
|
+
prompt_part_len = max(0,
|
|
791
|
+
num_prompt_tokens - num_computed_tokens)
|
|
792
|
+
completion_part_len = max(
|
|
793
|
+
0, num_scheduled_tokens - prompt_part_len)
|
|
794
|
+
else:
|
|
795
|
+
prompt_part_len = num_scheduled_tokens
|
|
796
|
+
completion_part_len = 0
|
|
797
|
+
|
|
798
|
+
assert num_scheduled_tokens == prompt_part_len + completion_part_len
|
|
799
|
+
|
|
800
|
+
if prompt_part_len > 0:
|
|
801
|
+
# prompt's mrope_positions are pre-computed
|
|
802
|
+
dst_start = mrope_pos_ptr
|
|
803
|
+
dst_end = mrope_pos_ptr + prompt_part_len
|
|
804
|
+
src_start = num_computed_tokens
|
|
805
|
+
src_end = num_computed_tokens + prompt_part_len
|
|
806
|
+
|
|
807
|
+
self.mrope_positions_cpu[:, dst_start:dst_end] = \
|
|
808
|
+
req.mrope_positions[:,src_start:src_end]
|
|
809
|
+
|
|
810
|
+
mrope_pos_ptr += prompt_part_len
|
|
811
|
+
|
|
812
|
+
if completion_part_len > 0:
|
|
813
|
+
# compute completion's mrope_positions on-the-fly
|
|
814
|
+
dst_start = mrope_pos_ptr
|
|
815
|
+
dst_end = mrope_pos_ptr + completion_part_len
|
|
816
|
+
|
|
817
|
+
self.mrope_positions_cpu[:, dst_start:dst_end] = \
|
|
818
|
+
MRotaryEmbedding.get_next_input_positions_tensor(
|
|
819
|
+
req.mrope_position_delta,
|
|
820
|
+
context_len=num_computed_tokens +
|
|
821
|
+
prompt_part_len,
|
|
822
|
+
seq_len=num_computed_tokens +
|
|
823
|
+
prompt_part_len +
|
|
824
|
+
completion_part_len,
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
mrope_pos_ptr += completion_part_len
|
|
828
|
+
|
|
829
|
+
def _calc_spec_decode_metadata(
|
|
830
|
+
self,
|
|
831
|
+
num_draft_tokens: np.ndarray,
|
|
832
|
+
cu_num_scheduled_tokens: np.ndarray,
|
|
833
|
+
) -> SpecDecodeMetadata:
|
|
834
|
+
# Inputs:
|
|
835
|
+
# cu_num_scheduled_tokens: [ 4, 104, 107, 207, 209]
|
|
836
|
+
# num_draft_tokens: [ 3, 0, 2, 0, 1]
|
|
837
|
+
# Outputs:
|
|
838
|
+
# cu_num_draft_tokens: [ 3, 3, 5, 5, 6]
|
|
839
|
+
# logits_indices: [ 0, 1, 2, 3, 103, 104, 105, 106,
|
|
840
|
+
# 206, 207, 208]
|
|
841
|
+
# target_logits_indices: [ 0, 1, 2, 5, 6, 9]
|
|
842
|
+
# bonus_logits_indices: [ 3, 4, 7, 8, 10]
|
|
843
|
+
|
|
844
|
+
# Compute the logits indices.
|
|
845
|
+
# [4, 1, 3, 1, 2]
|
|
846
|
+
num_sampled_tokens = num_draft_tokens + 1
|
|
847
|
+
# Step 1. [4, 5, 8, 9, 11]
|
|
848
|
+
cu_num_sampled_tokens = np.cumsum(num_sampled_tokens, dtype=np.int32)
|
|
849
|
+
total_num_sampled_tokens = cu_num_sampled_tokens[-1]
|
|
850
|
+
# Step 2. [0, 0, 0, 0, 4, 5, 5, 5, 8, 9, 9]
|
|
851
|
+
cumsums_offsets = np.repeat(cu_num_sampled_tokens - num_sampled_tokens,
|
|
852
|
+
num_sampled_tokens)
|
|
853
|
+
# Step 3. [0, 1, 2, 3, 0, 0, 1, 2, 0, 0, 1]
|
|
854
|
+
arange = self.arange_np[:total_num_sampled_tokens] - cumsums_offsets
|
|
855
|
+
# Step 4. [0, 0, 0, 0, 103, 104, 104, 104, 206, 207, 207]
|
|
856
|
+
logits_indices = np.repeat(
|
|
857
|
+
cu_num_scheduled_tokens - num_sampled_tokens, num_sampled_tokens)
|
|
858
|
+
# Step 5. [0, 1, 2, 3, 103, 104, 105, 106, 206, 207, 208]
|
|
859
|
+
logits_indices += arange
|
|
860
|
+
|
|
861
|
+
# Compute the bonus logits indices.
|
|
862
|
+
bonus_logits_indices = cu_num_sampled_tokens - 1
|
|
863
|
+
|
|
864
|
+
# Compute the draft logits indices.
|
|
865
|
+
# [3, 3, 5, 5, 6]
|
|
866
|
+
cu_num_draft_tokens = np.cumsum(num_draft_tokens, dtype=np.int32)
|
|
867
|
+
total_num_draft_tokens = cu_num_draft_tokens[-1]
|
|
868
|
+
# [0, 0, 0, 3, 3, 5]
|
|
869
|
+
cumsums_offsets = np.repeat(cu_num_draft_tokens - num_draft_tokens,
|
|
870
|
+
num_draft_tokens)
|
|
871
|
+
# [0, 1, 2, 0, 1, 0]
|
|
872
|
+
arange = self.arange_np[:total_num_draft_tokens] - cumsums_offsets
|
|
873
|
+
# [0, 0, 0, 5, 5, 9]
|
|
874
|
+
target_logits_indices = np.repeat(
|
|
875
|
+
cu_num_sampled_tokens - num_sampled_tokens, num_draft_tokens)
|
|
876
|
+
# [0, 1, 2, 5, 6, 9]
|
|
877
|
+
target_logits_indices += arange
|
|
878
|
+
|
|
879
|
+
# TODO: Optimize the CPU -> GPU copy.
|
|
880
|
+
cu_num_draft_tokens = torch.from_numpy(cu_num_draft_tokens).to(
|
|
881
|
+
self.device, non_blocking=True)
|
|
882
|
+
logits_indices = torch.from_numpy(logits_indices).to(self.device,
|
|
883
|
+
non_blocking=True)
|
|
884
|
+
target_logits_indices = torch.from_numpy(target_logits_indices).to(
|
|
885
|
+
self.device, non_blocking=True)
|
|
886
|
+
bonus_logits_indices = torch.from_numpy(bonus_logits_indices).to(
|
|
887
|
+
self.device, non_blocking=True)
|
|
888
|
+
|
|
889
|
+
# Compute the draft token ids.
|
|
890
|
+
# draft_token_indices: [ 1, 2, 3, 105, 106, 208]
|
|
891
|
+
draft_token_ids = self.input_ids[logits_indices]
|
|
892
|
+
draft_token_ids = draft_token_ids[target_logits_indices + 1]
|
|
893
|
+
|
|
894
|
+
metadata = SpecDecodeMetadata(
|
|
895
|
+
draft_token_ids=draft_token_ids,
|
|
896
|
+
num_draft_tokens=num_draft_tokens.tolist(),
|
|
897
|
+
cu_num_draft_tokens=cu_num_draft_tokens,
|
|
898
|
+
target_logits_indices=target_logits_indices,
|
|
899
|
+
bonus_logits_indices=bonus_logits_indices,
|
|
900
|
+
logits_indices=logits_indices,
|
|
901
|
+
)
|
|
902
|
+
return metadata
|
|
903
|
+
|
|
904
|
+
def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
|
|
905
|
+
scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
|
|
906
|
+
if not scheduled_encoder_inputs:
|
|
907
|
+
return
|
|
908
|
+
|
|
909
|
+
# Batch the multi-modal inputs.
|
|
910
|
+
mm_inputs = list[MultiModalKwargs]()
|
|
911
|
+
req_ids_pos = list[tuple[str, int, PlaceholderRange]]()
|
|
912
|
+
for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
|
|
913
|
+
req_state = self.requests[req_id]
|
|
914
|
+
|
|
915
|
+
for mm_input_id in encoder_input_ids:
|
|
916
|
+
mm_inputs.append(req_state.mm_inputs[mm_input_id])
|
|
917
|
+
req_ids_pos.append(
|
|
918
|
+
(req_id, mm_input_id, req_state.mm_positions[mm_input_id]))
|
|
919
|
+
|
|
920
|
+
# Batch mm inputs as much as we can: if a request in the batch has
|
|
921
|
+
# multiple modalities or a different modality than the previous one,
|
|
922
|
+
# we process it separately to preserve item order.
|
|
923
|
+
# FIXME(ywang96): This is a hacky way to deal with multiple modalities
|
|
924
|
+
# in the same batch while still being able to benefit from batching
|
|
925
|
+
# multimodal inputs. The proper solution should be reordering the
|
|
926
|
+
# encoder outputs.
|
|
927
|
+
grouped_mm_inputs_list = group_mm_inputs_by_modality(mm_inputs)
|
|
928
|
+
|
|
929
|
+
encoder_outputs = []
|
|
930
|
+
for grouped_mm_inputs in grouped_mm_inputs_list:
|
|
931
|
+
batched_mm_inputs = MultiModalKwargs.batch(grouped_mm_inputs)
|
|
932
|
+
batched_mm_inputs = MultiModalKwargs.as_kwargs(
|
|
933
|
+
batched_mm_inputs,
|
|
934
|
+
dtype=self.model_config.dtype,
|
|
935
|
+
device=self.device,
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
# Run the encoder.
|
|
939
|
+
# `curr_group_outputs` is either of the following:
|
|
940
|
+
# 1. A tensor of shape (num_items, feature_size, hidden_size)
|
|
941
|
+
# in case feature_size is fixed across all multimodal items.
|
|
942
|
+
# 2. A list or tuple (length: num_items) of tensors, each of shape
|
|
943
|
+
# (feature_size, hidden_size) in case the feature size is dynamic
|
|
944
|
+
# depending on the input multimodal items.
|
|
945
|
+
curr_group_outputs = self.model.get_multimodal_embeddings(
|
|
946
|
+
**batched_mm_inputs)
|
|
947
|
+
|
|
948
|
+
sanity_check_mm_encoder_outputs(
|
|
949
|
+
curr_group_outputs,
|
|
950
|
+
expected_num_items=len(grouped_mm_inputs),
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
for output in curr_group_outputs:
|
|
954
|
+
encoder_outputs.append(output)
|
|
955
|
+
|
|
956
|
+
# Cache the encoder outputs.
|
|
957
|
+
for (req_id, input_id, pos_info), output in zip(
|
|
958
|
+
req_ids_pos,
|
|
959
|
+
encoder_outputs,
|
|
960
|
+
):
|
|
961
|
+
if req_id not in self.encoder_cache:
|
|
962
|
+
self.encoder_cache[req_id] = {}
|
|
963
|
+
|
|
964
|
+
self.encoder_cache[req_id][input_id] = scatter_mm_placeholders(
|
|
965
|
+
output,
|
|
966
|
+
is_embed=pos_info.is_embed,
|
|
967
|
+
)
|
|
968
|
+
|
|
969
|
+
def _gather_mm_embeddings(
|
|
970
|
+
self,
|
|
971
|
+
scheduler_output: "SchedulerOutput",
|
|
972
|
+
) -> list[torch.Tensor]:
|
|
973
|
+
mm_embeds: list[torch.Tensor] = []
|
|
974
|
+
for req_id in self.input_batch.req_ids:
|
|
975
|
+
num_scheduled_tokens = scheduler_output.num_scheduled_tokens[
|
|
976
|
+
req_id]
|
|
977
|
+
req_state = self.requests[req_id]
|
|
978
|
+
num_computed_tokens = req_state.num_computed_tokens
|
|
979
|
+
mm_positions = req_state.mm_positions
|
|
980
|
+
for i, pos_info in enumerate(mm_positions):
|
|
981
|
+
start_pos = pos_info.offset
|
|
982
|
+
num_encoder_tokens = pos_info.length
|
|
983
|
+
|
|
984
|
+
# The encoder output is needed if the two ranges overlap:
|
|
985
|
+
# [num_computed_tokens,
|
|
986
|
+
# num_computed_tokens + num_scheduled_tokens) and
|
|
987
|
+
# [start_pos, start_pos + num_encoder_tokens)
|
|
988
|
+
if start_pos >= num_computed_tokens + num_scheduled_tokens:
|
|
989
|
+
# The encoder output is not needed in this step.
|
|
990
|
+
break
|
|
991
|
+
if start_pos + num_encoder_tokens <= num_computed_tokens:
|
|
992
|
+
# The encoder output is already processed and stored
|
|
993
|
+
# in the decoder's KV cache.
|
|
994
|
+
continue
|
|
995
|
+
|
|
996
|
+
start_idx = max(num_computed_tokens - start_pos, 0)
|
|
997
|
+
end_idx = min(
|
|
998
|
+
num_computed_tokens - start_pos + num_scheduled_tokens,
|
|
999
|
+
num_encoder_tokens)
|
|
1000
|
+
assert start_idx < end_idx
|
|
1001
|
+
assert req_id in self.encoder_cache
|
|
1002
|
+
assert i in self.encoder_cache[req_id]
|
|
1003
|
+
encoder_output = self.encoder_cache[req_id][i]
|
|
1004
|
+
|
|
1005
|
+
if (is_embed := pos_info.is_embed) is not None:
|
|
1006
|
+
is_embed = is_embed[start_idx:end_idx]
|
|
1007
|
+
|
|
1008
|
+
mm_embeds_item = gather_mm_placeholders(
|
|
1009
|
+
encoder_output[start_idx:end_idx],
|
|
1010
|
+
is_embed=is_embed,
|
|
1011
|
+
)
|
|
1012
|
+
mm_embeds.append(mm_embeds_item)
|
|
1013
|
+
return mm_embeds
|
|
1014
|
+
|
|
1015
|
+
def get_model(self) -> nn.Module:
|
|
1016
|
+
return self.model
|
|
1017
|
+
|
|
1018
|
+
def apply_grammar_bitmask(
|
|
1019
|
+
self,
|
|
1020
|
+
scheduler_output: "SchedulerOutput",
|
|
1021
|
+
logits: torch.Tensor,
|
|
1022
|
+
):
|
|
1023
|
+
grammar_bitmask = scheduler_output.grammar_bitmask
|
|
1024
|
+
if grammar_bitmask is None:
|
|
1025
|
+
return
|
|
1026
|
+
|
|
1027
|
+
# We receive the structured output bitmask from the scheduler,
|
|
1028
|
+
# compacted to contain bitmasks only for structured output requests.
|
|
1029
|
+
# The order of the requests in the bitmask is not guaranteed to be the
|
|
1030
|
+
# same as the order of the requests in the gpu runner's batch. We need
|
|
1031
|
+
# to sort the bitmask to match the order of the requests used here.
|
|
1032
|
+
|
|
1033
|
+
# Get the batch indices of the structured output requests.
|
|
1034
|
+
# Keep track of the number of speculative tokens scheduled for every
|
|
1035
|
+
# request in the batch, as the logit indices are offset by this amount.
|
|
1036
|
+
struct_out_req_batch_indices: dict[str, int] = {}
|
|
1037
|
+
cumulative_offset = 0
|
|
1038
|
+
seq = sorted(self.input_batch.req_id_to_index.items(),
|
|
1039
|
+
key=lambda x: x[1])
|
|
1040
|
+
for req_id, batch_index in seq:
|
|
1041
|
+
logit_index = batch_index + cumulative_offset
|
|
1042
|
+
cumulative_offset += len(
|
|
1043
|
+
scheduler_output.scheduled_spec_decode_tokens.get(req_id, []))
|
|
1044
|
+
if req_id in scheduler_output.structured_output_request_ids:
|
|
1045
|
+
struct_out_req_batch_indices[req_id] = logit_index
|
|
1046
|
+
|
|
1047
|
+
out_indices = []
|
|
1048
|
+
|
|
1049
|
+
# Reorder the bitmask to match the order of the requests in the batch.
|
|
1050
|
+
sorted_bitmask = np.zeros_like(grammar_bitmask,
|
|
1051
|
+
shape=(logits.shape[0],
|
|
1052
|
+
grammar_bitmask.shape[1]))
|
|
1053
|
+
cumulative_index = 0
|
|
1054
|
+
seq = sorted(scheduler_output.structured_output_request_ids.items(),
|
|
1055
|
+
key=lambda x: x[1])
|
|
1056
|
+
for req_id, _ in seq:
|
|
1057
|
+
logit_index = struct_out_req_batch_indices[req_id]
|
|
1058
|
+
num_spec_tokens = len(
|
|
1059
|
+
scheduler_output.scheduled_spec_decode_tokens.get(req_id, []))
|
|
1060
|
+
for i in range(1 + num_spec_tokens):
|
|
1061
|
+
sorted_bitmask[logit_index + i] = \
|
|
1062
|
+
grammar_bitmask[cumulative_index + i]
|
|
1063
|
+
out_indices.append(logit_index + i)
|
|
1064
|
+
cumulative_index += 1 + num_spec_tokens
|
|
1065
|
+
grammar_bitmask = sorted_bitmask
|
|
1066
|
+
|
|
1067
|
+
# Serialization of np.ndarray is much more efficient than a tensor,
|
|
1068
|
+
# so we receive it in that format.
|
|
1069
|
+
grammar_bitmask = torch.from_numpy(grammar_bitmask)
|
|
1070
|
+
|
|
1071
|
+
xgr.apply_token_bitmask_inplace(
|
|
1072
|
+
logits,
|
|
1073
|
+
grammar_bitmask.to(self.device, non_blocking=True),
|
|
1074
|
+
indices=out_indices,
|
|
1075
|
+
)
|
|
1076
|
+
|
|
1077
|
+
def sync_and_slice_intermediate_tensors(
|
|
1078
|
+
self, num_tokens: int, intermediate_tensors: IntermediateTensors,
|
|
1079
|
+
sync_self: bool) -> IntermediateTensors:
|
|
1080
|
+
|
|
1081
|
+
assert self.intermediate_tensors is not None
|
|
1082
|
+
|
|
1083
|
+
tp = self.vllm_config.parallel_config.tensor_parallel_size
|
|
1084
|
+
enabled_sp = self.vllm_config.compilation_config.pass_config. \
|
|
1085
|
+
enable_sequence_parallelism
|
|
1086
|
+
if enabled_sp:
|
|
1087
|
+
# When sequence parallelism is enabled, we always pad num_tokens
|
|
1088
|
+
# to be a multiple of tensor_parallel_size (tp) earlier
|
|
1089
|
+
assert num_tokens % tp == 0
|
|
1090
|
+
is_residual_scattered = tp > 1 and enabled_sp \
|
|
1091
|
+
and num_tokens % tp == 0
|
|
1092
|
+
|
|
1093
|
+
# When sequence parallelism is enabled, the "residual" tensor is sharded
|
|
1094
|
+
# across tensor parallel ranks, so each rank only needs its own slice.
|
|
1095
|
+
if sync_self:
|
|
1096
|
+
assert intermediate_tensors is not None
|
|
1097
|
+
for k, v in intermediate_tensors.items():
|
|
1098
|
+
is_scattered = "residual" and is_residual_scattered
|
|
1099
|
+
copy_len = num_tokens // tp if is_scattered else \
|
|
1100
|
+
num_tokens
|
|
1101
|
+
self.intermediate_tensors[k][:copy_len].copy_(
|
|
1102
|
+
v[:copy_len], non_blocking=True)
|
|
1103
|
+
|
|
1104
|
+
return IntermediateTensors({
|
|
1105
|
+
k:
|
|
1106
|
+
v[:num_tokens // tp]
|
|
1107
|
+
if k == "residual" and is_residual_scattered else v[:num_tokens]
|
|
1108
|
+
for k, v in self.intermediate_tensors.items()
|
|
1109
|
+
})
|
|
1110
|
+
|
|
1111
|
+
@torch.inference_mode()
|
|
1112
|
+
def execute_model(
|
|
1113
|
+
self,
|
|
1114
|
+
scheduler_output: "SchedulerOutput",
|
|
1115
|
+
intermediate_tensors: Optional[IntermediateTensors] = None,
|
|
1116
|
+
) -> Union[ModelRunnerOutput, IntermediateTensors]:
|
|
1117
|
+
|
|
1118
|
+
self._update_states(scheduler_output)
|
|
1119
|
+
if not scheduler_output.total_num_scheduled_tokens:
|
|
1120
|
+
if not has_kv_transfer_group():
|
|
1121
|
+
# Return empty ModelRunnerOutput if there's no work to do.
|
|
1122
|
+
return EMPTY_MODEL_RUNNER_OUTPUT
|
|
1123
|
+
|
|
1124
|
+
return self.kv_connector_no_forward(scheduler_output)
|
|
1125
|
+
|
|
1126
|
+
# Prepare the decoder inputs.
|
|
1127
|
+
attn_metadata, logits_indices, spec_decode_metadata = (
|
|
1128
|
+
self._prepare_inputs(scheduler_output))
|
|
1129
|
+
num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
|
|
1130
|
+
if (self.use_cuda_graph
|
|
1131
|
+
and num_scheduled_tokens <= self.cudagraph_batch_sizes[-1]):
|
|
1132
|
+
# Use piecewise CUDA graphs.
|
|
1133
|
+
# Add padding to the batch size.
|
|
1134
|
+
num_input_tokens = self.vllm_config.pad_for_cudagraph(
|
|
1135
|
+
num_scheduled_tokens)
|
|
1136
|
+
else:
|
|
1137
|
+
# Eager mode.
|
|
1138
|
+
# Pad tokens to multiple of tensor_parallel_size when
|
|
1139
|
+
# enabled collective fusion for SP
|
|
1140
|
+
tp_size = self.vllm_config.parallel_config.tensor_parallel_size
|
|
1141
|
+
if self.vllm_config.compilation_config.pass_config. \
|
|
1142
|
+
enable_sequence_parallelism and tp_size > 1:
|
|
1143
|
+
from vllm.utils import round_up
|
|
1144
|
+
num_input_tokens = round_up(num_scheduled_tokens, tp_size)
|
|
1145
|
+
else:
|
|
1146
|
+
num_input_tokens = num_scheduled_tokens
|
|
1147
|
+
|
|
1148
|
+
# _prepare_inputs may reorder the batch, so we must gather multi
|
|
1149
|
+
# modal outputs after that to ensure the correct order
|
|
1150
|
+
if self.is_multimodal_model:
|
|
1151
|
+
# Run the multimodal encoder if any.
|
|
1152
|
+
self._execute_mm_encoder(scheduler_output)
|
|
1153
|
+
mm_embeds = self._gather_mm_embeddings(scheduler_output)
|
|
1154
|
+
else:
|
|
1155
|
+
mm_embeds = []
|
|
1156
|
+
|
|
1157
|
+
if self.is_multimodal_model and get_pp_group().is_first_rank:
|
|
1158
|
+
# NOTE(woosuk): To unify token ids and soft tokens (vision
|
|
1159
|
+
# embeddings), we always use embeddings (rather than token ids)
|
|
1160
|
+
# as input to the multimodal model, even when the input is text.
|
|
1161
|
+
input_ids = self.input_ids[:num_scheduled_tokens]
|
|
1162
|
+
if mm_embeds:
|
|
1163
|
+
inputs_embeds = self.model.get_input_embeddings(
|
|
1164
|
+
input_ids, mm_embeds)
|
|
1165
|
+
else:
|
|
1166
|
+
inputs_embeds = self.model.get_input_embeddings(input_ids)
|
|
1167
|
+
# TODO(woosuk): Avoid the copy. Optimize.
|
|
1168
|
+
self.inputs_embeds[:num_scheduled_tokens].copy_(inputs_embeds)
|
|
1169
|
+
inputs_embeds = self.inputs_embeds[:num_input_tokens]
|
|
1170
|
+
input_ids = None
|
|
1171
|
+
else:
|
|
1172
|
+
# For text-only models, we use token ids as input.
|
|
1173
|
+
# While it is possible to use embeddings as input just like the
|
|
1174
|
+
# multimodal models, it is not desirable for performance since
|
|
1175
|
+
# then the embedding layer is not included in the CUDA graph.
|
|
1176
|
+
input_ids = self.input_ids[:num_input_tokens]
|
|
1177
|
+
inputs_embeds = None
|
|
1178
|
+
if self.uses_mrope:
|
|
1179
|
+
positions = self.mrope_positions[:, :num_input_tokens]
|
|
1180
|
+
else:
|
|
1181
|
+
positions = self.positions[:num_input_tokens]
|
|
1182
|
+
|
|
1183
|
+
if get_pp_group().is_first_rank:
|
|
1184
|
+
intermediate_tensors = None
|
|
1185
|
+
else:
|
|
1186
|
+
intermediate_tensors = self.sync_and_slice_intermediate_tensors(
|
|
1187
|
+
num_input_tokens, intermediate_tensors, True)
|
|
1188
|
+
|
|
1189
|
+
# Run the decoder.
|
|
1190
|
+
# Use persistent buffers for CUDA graphs.
|
|
1191
|
+
with set_forward_context(attn_metadata,
|
|
1192
|
+
self.vllm_config,
|
|
1193
|
+
num_tokens=num_input_tokens):
|
|
1194
|
+
self.maybe_setup_kv_connector(scheduler_output)
|
|
1195
|
+
|
|
1196
|
+
model_output = self.model(
|
|
1197
|
+
input_ids=input_ids,
|
|
1198
|
+
positions=positions,
|
|
1199
|
+
intermediate_tensors=intermediate_tensors,
|
|
1200
|
+
inputs_embeds=inputs_embeds,
|
|
1201
|
+
)
|
|
1202
|
+
|
|
1203
|
+
self.maybe_wait_for_kv_save()
|
|
1204
|
+
finished_sending, finished_recving = (
|
|
1205
|
+
self.get_finished_kv_transfers(scheduler_output))
|
|
1206
|
+
|
|
1207
|
+
if self.use_aux_hidden_state_outputs:
|
|
1208
|
+
hidden_states, aux_hidden_states = model_output
|
|
1209
|
+
else:
|
|
1210
|
+
hidden_states = model_output
|
|
1211
|
+
# Broadcast PP output for external_launcher (torchrun)
|
|
1212
|
+
# to make sure we are synced across pp ranks
|
|
1213
|
+
# TODO: Support overlapping mirco-batches
|
|
1214
|
+
# https://github.com/vllm-project/vllm/issues/18019
|
|
1215
|
+
broadcast_pp_output = \
|
|
1216
|
+
self.parallel_config.distributed_executor_backend \
|
|
1217
|
+
== "external_launcher" and len(get_pp_group().ranks) > 0
|
|
1218
|
+
if not get_pp_group().is_last_rank:
|
|
1219
|
+
# For mid-pipeline stages, return the hidden states.
|
|
1220
|
+
if not broadcast_pp_output:
|
|
1221
|
+
return hidden_states
|
|
1222
|
+
assert isinstance(hidden_states, IntermediateTensors)
|
|
1223
|
+
get_pp_group().send_tensor_dict(hidden_states.tensors,
|
|
1224
|
+
all_gather_group=get_tp_group())
|
|
1225
|
+
logits = None
|
|
1226
|
+
else:
|
|
1227
|
+
sample_hidden_states = hidden_states[logits_indices]
|
|
1228
|
+
logits = self.model.compute_logits(sample_hidden_states, None)
|
|
1229
|
+
if broadcast_pp_output:
|
|
1230
|
+
model_output_broadcast_data = {
|
|
1231
|
+
"logits": logits.contiguous(),
|
|
1232
|
+
} if logits is not None else {}
|
|
1233
|
+
model_output_broadcast_data = get_pp_group().broadcast_tensor_dict(
|
|
1234
|
+
model_output_broadcast_data, src=len(get_pp_group().ranks) - 1)
|
|
1235
|
+
assert model_output_broadcast_data is not None
|
|
1236
|
+
logits = model_output_broadcast_data["logits"]
|
|
1237
|
+
|
|
1238
|
+
# Apply structured output bitmasks if present
|
|
1239
|
+
if scheduler_output.grammar_bitmask is not None:
|
|
1240
|
+
self.apply_grammar_bitmask(scheduler_output, logits)
|
|
1241
|
+
|
|
1242
|
+
# Sample the next token and get logprobs if needed.
|
|
1243
|
+
sampling_metadata = self.input_batch.sampling_metadata
|
|
1244
|
+
if spec_decode_metadata is None:
|
|
1245
|
+
sampler_output = self.sampler(
|
|
1246
|
+
logits=logits,
|
|
1247
|
+
sampling_metadata=sampling_metadata,
|
|
1248
|
+
)
|
|
1249
|
+
else:
|
|
1250
|
+
# When indexing with a tensor (bonus_logits_indices), PyTorch
|
|
1251
|
+
# creates a new tensor with separate storage from the original
|
|
1252
|
+
# logits tensor. This means any in-place operations on bonus_logits
|
|
1253
|
+
# won't affect the original logits tensor.
|
|
1254
|
+
assert logits is not None
|
|
1255
|
+
bonus_logits = logits[spec_decode_metadata.bonus_logits_indices]
|
|
1256
|
+
sampler_output = self.sampler(
|
|
1257
|
+
logits=bonus_logits,
|
|
1258
|
+
sampling_metadata=sampling_metadata,
|
|
1259
|
+
)
|
|
1260
|
+
bonus_token_ids = sampler_output.sampled_token_ids
|
|
1261
|
+
|
|
1262
|
+
# Just like `bonus_logits`, `target_logits` is a new tensor with
|
|
1263
|
+
# separate storage from the original `logits` tensor. Therefore,
|
|
1264
|
+
# it is safe to update `target_logits` in place.
|
|
1265
|
+
target_logits = logits[spec_decode_metadata.target_logits_indices]
|
|
1266
|
+
output_token_ids = self.rejection_sampler(
|
|
1267
|
+
spec_decode_metadata,
|
|
1268
|
+
None, # draft_probs
|
|
1269
|
+
target_logits,
|
|
1270
|
+
bonus_token_ids,
|
|
1271
|
+
sampling_metadata,
|
|
1272
|
+
)
|
|
1273
|
+
sampler_output.sampled_token_ids = output_token_ids
|
|
1274
|
+
|
|
1275
|
+
# TODO(woosuk): The following loop can be slow since it iterates over
|
|
1276
|
+
# the requests one by one. Optimize.
|
|
1277
|
+
discard_sampled_tokens_req_indices = []
|
|
1278
|
+
for i, req_id in enumerate(self.input_batch.req_ids):
|
|
1279
|
+
req_state = self.requests[req_id]
|
|
1280
|
+
seq_len = (req_state.num_computed_tokens +
|
|
1281
|
+
scheduler_output.num_scheduled_tokens[req_id])
|
|
1282
|
+
if seq_len < req_state.num_tokens:
|
|
1283
|
+
# Ignore the sampled token for partial prefills.
|
|
1284
|
+
# Rewind the generator state as if the token was not sampled.
|
|
1285
|
+
# This relies on cuda-specific torch-internal impl details
|
|
1286
|
+
generator = self.input_batch.generators.get(i)
|
|
1287
|
+
if generator is not None:
|
|
1288
|
+
generator.set_offset(generator.get_offset() - 4)
|
|
1289
|
+
# Record the index of the request that should not be sampled,
|
|
1290
|
+
# so that we could clear the sampled tokens before returning.
|
|
1291
|
+
discard_sampled_tokens_req_indices.append(i)
|
|
1292
|
+
|
|
1293
|
+
# NOTE: GPU -> CPU Sync happens here.
|
|
1294
|
+
# Move as many CPU operations as possible before this sync point.
|
|
1295
|
+
logprobs_tensors = sampler_output.logprobs_tensors
|
|
1296
|
+
logprobs_lists = logprobs_tensors.tolists() \
|
|
1297
|
+
if logprobs_tensors is not None else None
|
|
1298
|
+
|
|
1299
|
+
# Compute prompt logprobs if needed.
|
|
1300
|
+
prompt_logprobs_dict = self._get_prompt_logprobs_dict(
|
|
1301
|
+
hidden_states[:num_scheduled_tokens],
|
|
1302
|
+
scheduler_output,
|
|
1303
|
+
)
|
|
1304
|
+
|
|
1305
|
+
# Get the valid generated tokens.
|
|
1306
|
+
sampled_token_ids = sampler_output.sampled_token_ids
|
|
1307
|
+
max_gen_len = sampled_token_ids.shape[-1]
|
|
1308
|
+
if max_gen_len == 1:
|
|
1309
|
+
# No spec decode tokens.
|
|
1310
|
+
valid_sampled_token_ids = sampled_token_ids.tolist()
|
|
1311
|
+
else:
|
|
1312
|
+
# Includes spec decode tokens.
|
|
1313
|
+
valid_sampled_token_ids = self.rejection_sampler.parse_output(
|
|
1314
|
+
sampled_token_ids,
|
|
1315
|
+
self.input_batch.vocab_size,
|
|
1316
|
+
)
|
|
1317
|
+
# Mask out the sampled tokens that should not be sampled.
|
|
1318
|
+
for i in discard_sampled_tokens_req_indices:
|
|
1319
|
+
valid_sampled_token_ids[i].clear()
|
|
1320
|
+
|
|
1321
|
+
if not self.use_spec_decode:
|
|
1322
|
+
# Speculative decoding is not enabled.
|
|
1323
|
+
spec_token_ids = None
|
|
1324
|
+
elif self.speculative_config.method == "ngram":
|
|
1325
|
+
assert isinstance(self.drafter, NgramProposer)
|
|
1326
|
+
spec_token_ids = self.generate_draft_token_ids(
|
|
1327
|
+
valid_sampled_token_ids, sampling_metadata)
|
|
1328
|
+
elif self.speculative_config.method == "medusa":
|
|
1329
|
+
assert isinstance(self.drafter, MedusaProposer)
|
|
1330
|
+
if max_gen_len == 1:
|
|
1331
|
+
hidden_states = sample_hidden_states
|
|
1332
|
+
else:
|
|
1333
|
+
indices = []
|
|
1334
|
+
offset = 0
|
|
1335
|
+
for num_draft, tokens in zip(
|
|
1336
|
+
spec_decode_metadata.num_draft_tokens,
|
|
1337
|
+
valid_sampled_token_ids):
|
|
1338
|
+
indices.append(offset + len(tokens) - 1)
|
|
1339
|
+
offset += num_draft + 1
|
|
1340
|
+
|
|
1341
|
+
indices = torch.tensor(indices,
|
|
1342
|
+
device=sample_hidden_states.device)
|
|
1343
|
+
hidden_states = sample_hidden_states[indices]
|
|
1344
|
+
|
|
1345
|
+
spec_token_ids = self.drafter.propose(
|
|
1346
|
+
target_hidden_states=hidden_states,
|
|
1347
|
+
sampling_metadata=sampling_metadata,
|
|
1348
|
+
)
|
|
1349
|
+
elif self.speculative_config.use_eagle():
|
|
1350
|
+
assert isinstance(self.drafter, EagleProposer)
|
|
1351
|
+
# TODO(woosuk): Refactor the loop.
|
|
1352
|
+
next_token_ids: list[int] = []
|
|
1353
|
+
for i, token_ids in enumerate(valid_sampled_token_ids):
|
|
1354
|
+
if token_ids:
|
|
1355
|
+
# Common case.
|
|
1356
|
+
next_token_id = token_ids[-1]
|
|
1357
|
+
else:
|
|
1358
|
+
# Partial prefill (rare case).
|
|
1359
|
+
# Get the next token id from the request state.
|
|
1360
|
+
req_id = self.input_batch.req_ids[i]
|
|
1361
|
+
req_state = self.requests[req_id]
|
|
1362
|
+
seq_len = (req_state.num_computed_tokens +
|
|
1363
|
+
scheduler_output.num_scheduled_tokens[req_id])
|
|
1364
|
+
next_token_id = req_state.get_token_id(seq_len)
|
|
1365
|
+
next_token_ids.append(next_token_id)
|
|
1366
|
+
next_token_ids = torch.tensor(next_token_ids,
|
|
1367
|
+
dtype=torch.int32,
|
|
1368
|
+
device=self.device)
|
|
1369
|
+
# At this moment, we assume all eagle layers belong to the same KV
|
|
1370
|
+
# cache group, thus using the same attention metadata.
|
|
1371
|
+
eagle_attn_metadata = attn_metadata[
|
|
1372
|
+
self.drafter.attn_layer_names[0]]
|
|
1373
|
+
|
|
1374
|
+
# NOTE: deepseek_mtp uses MLA which does not have `block_table`
|
|
1375
|
+
if hasattr(eagle_attn_metadata, "block_table"):
|
|
1376
|
+
block_table = eagle_attn_metadata.block_table
|
|
1377
|
+
else:
|
|
1378
|
+
block_table = None
|
|
1379
|
+
|
|
1380
|
+
if spec_decode_metadata is None:
|
|
1381
|
+
# input_ids can be None for multimodal models.
|
|
1382
|
+
target_token_ids = self.input_ids[:num_scheduled_tokens]
|
|
1383
|
+
target_positions = positions[:num_scheduled_tokens]
|
|
1384
|
+
if self.use_aux_hidden_state_outputs:
|
|
1385
|
+
target_hidden_states = torch.cat(
|
|
1386
|
+
[h[:num_scheduled_tokens] for h in aux_hidden_states],
|
|
1387
|
+
dim=-1)
|
|
1388
|
+
else:
|
|
1389
|
+
target_hidden_states = hidden_states[:num_scheduled_tokens]
|
|
1390
|
+
target_slot_mapping = eagle_attn_metadata.slot_mapping
|
|
1391
|
+
cu_num_tokens = eagle_attn_metadata.query_start_loc
|
|
1392
|
+
else:
|
|
1393
|
+
# TODO(woosuk): Refactor this.
|
|
1394
|
+
num_draft_tokens = spec_decode_metadata.num_draft_tokens
|
|
1395
|
+
num_rejected_tokens = [
|
|
1396
|
+
n + 1 - len(valid_sampled_token_ids[i]) if n > 0 else 0
|
|
1397
|
+
for i, n in enumerate(num_draft_tokens)
|
|
1398
|
+
]
|
|
1399
|
+
num_rejected_tokens_tensor = async_tensor_h2d(
|
|
1400
|
+
num_rejected_tokens,
|
|
1401
|
+
dtype=torch.int32,
|
|
1402
|
+
target_device=self.device,
|
|
1403
|
+
pin_memory=True)
|
|
1404
|
+
num_tokens = num_scheduled_tokens - sum(num_rejected_tokens)
|
|
1405
|
+
cu_num_tokens, token_indices = self.drafter.prepare_inputs(
|
|
1406
|
+
eagle_attn_metadata.query_start_loc,
|
|
1407
|
+
num_rejected_tokens_tensor,
|
|
1408
|
+
num_tokens,
|
|
1409
|
+
)
|
|
1410
|
+
target_token_ids = self.input_ids[token_indices]
|
|
1411
|
+
target_positions = positions[token_indices]
|
|
1412
|
+
if self.use_aux_hidden_state_outputs:
|
|
1413
|
+
target_hidden_states = torch.cat(
|
|
1414
|
+
[h[token_indices] for h in aux_hidden_states], dim=-1)
|
|
1415
|
+
else:
|
|
1416
|
+
target_hidden_states = hidden_states[token_indices]
|
|
1417
|
+
target_slot_mapping = eagle_attn_metadata.slot_mapping[
|
|
1418
|
+
token_indices]
|
|
1419
|
+
draft_token_ids = self.drafter.propose(
|
|
1420
|
+
target_token_ids=target_token_ids,
|
|
1421
|
+
target_positions=target_positions,
|
|
1422
|
+
target_hidden_states=target_hidden_states,
|
|
1423
|
+
target_slot_mapping=target_slot_mapping,
|
|
1424
|
+
next_token_ids=next_token_ids,
|
|
1425
|
+
cu_num_tokens=cu_num_tokens,
|
|
1426
|
+
block_table=block_table,
|
|
1427
|
+
sampling_metadata=sampling_metadata,
|
|
1428
|
+
)
|
|
1429
|
+
spec_token_ids = draft_token_ids.tolist()
|
|
1430
|
+
|
|
1431
|
+
# Clear KVConnector state after all KVs are generated.
|
|
1432
|
+
if has_kv_transfer_group():
|
|
1433
|
+
get_kv_transfer_group().clear_connector_metadata()
|
|
1434
|
+
|
|
1435
|
+
return ModelRunnerOutput(
|
|
1436
|
+
req_ids=self.input_batch.req_ids,
|
|
1437
|
+
req_id_to_index=self.input_batch.req_id_to_index,
|
|
1438
|
+
sampled_token_ids=valid_sampled_token_ids,
|
|
1439
|
+
spec_token_ids=spec_token_ids,
|
|
1440
|
+
logprobs=logprobs_lists,
|
|
1441
|
+
prompt_logprobs_dict=prompt_logprobs_dict,
|
|
1442
|
+
finished_sending=finished_sending,
|
|
1443
|
+
finished_recving=finished_recving,
|
|
1444
|
+
)
|
|
1445
|
+
|
|
1446
|
+
def kv_connector_no_forward(
|
|
1447
|
+
self, scheduler_output: "SchedulerOutput") -> ModelRunnerOutput:
|
|
1448
|
+
# KV send/recv even if no work to do.
|
|
1449
|
+
with set_forward_context(None, self.vllm_config):
|
|
1450
|
+
self.maybe_setup_kv_connector(scheduler_output)
|
|
1451
|
+
finished_sending, finished_recving = (
|
|
1452
|
+
self.get_finished_kv_transfers(scheduler_output))
|
|
1453
|
+
|
|
1454
|
+
if not finished_sending and not finished_recving:
|
|
1455
|
+
return EMPTY_MODEL_RUNNER_OUTPUT
|
|
1456
|
+
|
|
1457
|
+
output = copy.copy(EMPTY_MODEL_RUNNER_OUTPUT)
|
|
1458
|
+
output.finished_sending = finished_sending
|
|
1459
|
+
output.finished_recving = finished_recving
|
|
1460
|
+
return output
|
|
1461
|
+
|
|
1462
|
+
@staticmethod
|
|
1463
|
+
def maybe_setup_kv_connector(scheduler_output: "SchedulerOutput"):
|
|
1464
|
+
# Update KVConnector with the KVConnector metadata forward().
|
|
1465
|
+
if has_kv_transfer_group():
|
|
1466
|
+
kv_connector = get_kv_transfer_group()
|
|
1467
|
+
assert isinstance(kv_connector, KVConnectorBase_V1)
|
|
1468
|
+
assert scheduler_output.kv_connector_metadata is not None
|
|
1469
|
+
kv_connector.bind_connector_metadata(
|
|
1470
|
+
scheduler_output.kv_connector_metadata)
|
|
1471
|
+
|
|
1472
|
+
# Background KV cache transfers happen here.
|
|
1473
|
+
# These transfers are designed to be async and the requests
|
|
1474
|
+
# involved may be disjoint from the running requests.
|
|
1475
|
+
# Do this here to save a collective_rpc.
|
|
1476
|
+
kv_connector.start_load_kv(get_forward_context())
|
|
1477
|
+
|
|
1478
|
+
@staticmethod
|
|
1479
|
+
def maybe_wait_for_kv_save() -> None:
|
|
1480
|
+
if has_kv_transfer_group():
|
|
1481
|
+
get_kv_transfer_group().wait_for_save()
|
|
1482
|
+
|
|
1483
|
+
@staticmethod
|
|
1484
|
+
def get_finished_kv_transfers(
|
|
1485
|
+
scheduler_output: "SchedulerOutput",
|
|
1486
|
+
) -> tuple[Optional[set[str]], Optional[set[str]]]:
|
|
1487
|
+
if has_kv_transfer_group():
|
|
1488
|
+
return get_kv_transfer_group().get_finished(
|
|
1489
|
+
scheduler_output.finished_req_ids)
|
|
1490
|
+
return None, None
|
|
1491
|
+
|
|
1492
|
+
def generate_draft_token_ids(
|
|
1493
|
+
self,
|
|
1494
|
+
sampled_token_ids: list[list[int]],
|
|
1495
|
+
sampling_metadata: SamplingMetadata,
|
|
1496
|
+
) -> list[list[int]]:
|
|
1497
|
+
# TODO(woosuk): Optimize.
|
|
1498
|
+
draft_token_ids: list[list[int]] = []
|
|
1499
|
+
for i, sampled_ids in enumerate(sampled_token_ids):
|
|
1500
|
+
num_sampled_ids = len(sampled_ids)
|
|
1501
|
+
if not num_sampled_ids:
|
|
1502
|
+
# Skip speculative decoding.
|
|
1503
|
+
draft_token_ids.append([])
|
|
1504
|
+
continue
|
|
1505
|
+
|
|
1506
|
+
# Skip requests that require sampling parameters that are not
|
|
1507
|
+
# supported with speculative decoding.
|
|
1508
|
+
req_id = self.input_batch.req_ids[i]
|
|
1509
|
+
if not is_spec_decode_supported(req_id, self.input_batch):
|
|
1510
|
+
draft_token_ids.append([])
|
|
1511
|
+
continue
|
|
1512
|
+
|
|
1513
|
+
# Add sampled_token_ids to token_ids_cpu.
|
|
1514
|
+
start_idx = self.input_batch.num_tokens_no_spec[i]
|
|
1515
|
+
end_idx = start_idx + num_sampled_ids
|
|
1516
|
+
if end_idx >= self.max_model_len:
|
|
1517
|
+
# Skip requests that have already reached the max model length.
|
|
1518
|
+
draft_token_ids.append([])
|
|
1519
|
+
continue
|
|
1520
|
+
|
|
1521
|
+
self.input_batch.token_ids_cpu[i, start_idx:end_idx] = sampled_ids
|
|
1522
|
+
drafter_output = self.drafter.propose(
|
|
1523
|
+
self.input_batch.token_ids_cpu[i, :end_idx])
|
|
1524
|
+
if drafter_output is None or len(drafter_output) == 0:
|
|
1525
|
+
draft_token_ids.append([])
|
|
1526
|
+
else:
|
|
1527
|
+
draft_token_ids.append(drafter_output.tolist())
|
|
1528
|
+
return draft_token_ids
|
|
1529
|
+
|
|
1530
|
+
def load_model(self) -> None:
|
|
1531
|
+
logger.info("Starting to load model %s...", self.model_config.model)
|
|
1532
|
+
with DeviceMemoryProfiler() as m: # noqa: SIM117
|
|
1533
|
+
time_before_load = time.perf_counter()
|
|
1534
|
+
self.model = get_model(vllm_config=self.vllm_config)
|
|
1535
|
+
if self.lora_config:
|
|
1536
|
+
self.model = self.load_lora_model(self.model,
|
|
1537
|
+
self.model_config,
|
|
1538
|
+
self.scheduler_config,
|
|
1539
|
+
self.lora_config,
|
|
1540
|
+
self.device)
|
|
1541
|
+
if hasattr(self, "drafter"):
|
|
1542
|
+
logger.info("Loading drafter model...")
|
|
1543
|
+
self.drafter.load_model(self.model)
|
|
1544
|
+
if self.use_aux_hidden_state_outputs:
|
|
1545
|
+
self.model.set_aux_hidden_state_layers(
|
|
1546
|
+
self.model.get_eagle3_aux_hidden_state_layers())
|
|
1547
|
+
time_after_load = time.perf_counter()
|
|
1548
|
+
self.model_memory_usage = m.consumed_memory
|
|
1549
|
+
logger.info("Model loading took %.4f GiB and %.6f seconds",
|
|
1550
|
+
self.model_memory_usage / GiB_bytes,
|
|
1551
|
+
time_after_load - time_before_load)
|
|
1552
|
+
prepare_communication_buffer_for_model(self.model)
|
|
1553
|
+
|
|
1554
|
+
def save_tensorized_model(
|
|
1555
|
+
self,
|
|
1556
|
+
tensorizer_config: "TensorizerConfig",
|
|
1557
|
+
) -> None:
|
|
1558
|
+
TensorizerLoader.save_model(
|
|
1559
|
+
self.model,
|
|
1560
|
+
tensorizer_config=tensorizer_config,
|
|
1561
|
+
)
|
|
1562
|
+
|
|
1563
|
+
def _get_prompt_logprobs_dict(
|
|
1564
|
+
self,
|
|
1565
|
+
hidden_states: torch.Tensor,
|
|
1566
|
+
scheduler_output: "SchedulerOutput",
|
|
1567
|
+
) -> dict[str, Optional[LogprobsTensors]]:
|
|
1568
|
+
num_prompt_logprobs_dict = self.input_batch.num_prompt_logprobs
|
|
1569
|
+
if not num_prompt_logprobs_dict:
|
|
1570
|
+
return {}
|
|
1571
|
+
|
|
1572
|
+
in_progress_dict = self.input_batch.in_progress_prompt_logprobs_cpu
|
|
1573
|
+
prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] = {}
|
|
1574
|
+
|
|
1575
|
+
# Since prompt logprobs are a rare feature, prioritize simple,
|
|
1576
|
+
# maintainable loop over optimal performance.
|
|
1577
|
+
completed_prefill_reqs = []
|
|
1578
|
+
for req_id, num_prompt_logprobs in num_prompt_logprobs_dict.items():
|
|
1579
|
+
|
|
1580
|
+
num_tokens = scheduler_output.num_scheduled_tokens[req_id]
|
|
1581
|
+
|
|
1582
|
+
# Get metadata for this request.
|
|
1583
|
+
request = self.requests[req_id]
|
|
1584
|
+
num_prompt_tokens = len(request.prompt_token_ids)
|
|
1585
|
+
prompt_token_ids = torch.tensor(request.prompt_token_ids).to(
|
|
1586
|
+
self.device, non_blocking=True)
|
|
1587
|
+
|
|
1588
|
+
# Set up target LogprobsTensors object.
|
|
1589
|
+
logprobs_tensors = in_progress_dict.get(req_id)
|
|
1590
|
+
if not logprobs_tensors:
|
|
1591
|
+
# Create empty logprobs CPU tensors for the entire prompt.
|
|
1592
|
+
# If chunked, we'll copy in slice by slice.
|
|
1593
|
+
logprobs_tensors = LogprobsTensors.empty_cpu(
|
|
1594
|
+
num_prompt_tokens - 1, num_prompt_logprobs + 1)
|
|
1595
|
+
in_progress_dict[req_id] = logprobs_tensors
|
|
1596
|
+
|
|
1597
|
+
# Determine number of logits to retrieve.
|
|
1598
|
+
start_idx = request.num_computed_tokens
|
|
1599
|
+
start_tok = start_idx + 1
|
|
1600
|
+
num_remaining_tokens = num_prompt_tokens - start_tok
|
|
1601
|
+
if num_tokens <= num_remaining_tokens:
|
|
1602
|
+
# This is a chunk, more tokens remain.
|
|
1603
|
+
# In the == case, there are no more prompt logprobs to produce
|
|
1604
|
+
# but we want to defer returning them to the next step where we
|
|
1605
|
+
# have new generated tokens to return.
|
|
1606
|
+
num_logits = num_tokens
|
|
1607
|
+
else:
|
|
1608
|
+
# This is the last chunk of prompt tokens to return.
|
|
1609
|
+
num_logits = num_remaining_tokens
|
|
1610
|
+
completed_prefill_reqs.append(req_id)
|
|
1611
|
+
prompt_logprobs_dict[req_id] = logprobs_tensors
|
|
1612
|
+
|
|
1613
|
+
if num_logits <= 0:
|
|
1614
|
+
# This can happen for the final chunk if we prefilled exactly
|
|
1615
|
+
# (num_prompt_tokens - 1) tokens for this request in the prior
|
|
1616
|
+
# step. There are no more prompt logprobs to produce.
|
|
1617
|
+
continue
|
|
1618
|
+
|
|
1619
|
+
# Get the logits corresponding to this req's prompt tokens.
|
|
1620
|
+
# If this is a partial request (i.e. chunked prefill),
|
|
1621
|
+
# then there is prompt logprob generated for each index.
|
|
1622
|
+
req_idx = self.input_batch.req_id_to_index[req_id]
|
|
1623
|
+
offset = self.query_start_loc_np[req_idx].item()
|
|
1624
|
+
prompt_hidden_states = hidden_states[offset:offset + num_logits]
|
|
1625
|
+
logits = self.model.compute_logits(prompt_hidden_states, None)
|
|
1626
|
+
|
|
1627
|
+
# Get the "target" tokens for each index. For prompt at index i,
|
|
1628
|
+
# the token at prompt index i+1 is the "sampled" token we want
|
|
1629
|
+
# to gather the logprob for.
|
|
1630
|
+
tgt_token_ids = prompt_token_ids[start_tok:start_tok + num_logits]
|
|
1631
|
+
|
|
1632
|
+
# Compute prompt logprobs.
|
|
1633
|
+
logprobs = self.sampler.compute_logprobs(logits)
|
|
1634
|
+
token_ids, logprobs, ranks = self.sampler.gather_logprobs(
|
|
1635
|
+
logprobs, num_prompt_logprobs, tgt_token_ids)
|
|
1636
|
+
|
|
1637
|
+
# Transfer GPU->CPU async.
|
|
1638
|
+
chunk_slice = slice(start_idx, start_idx + num_logits)
|
|
1639
|
+
logprobs_tensors.logprob_token_ids[chunk_slice].copy_(
|
|
1640
|
+
token_ids, non_blocking=True)
|
|
1641
|
+
logprobs_tensors.logprobs[chunk_slice].copy_(logprobs,
|
|
1642
|
+
non_blocking=True)
|
|
1643
|
+
logprobs_tensors.selected_token_ranks[chunk_slice].copy_(
|
|
1644
|
+
ranks, non_blocking=True)
|
|
1645
|
+
|
|
1646
|
+
# Remove requests that have completed prefill from the batch
|
|
1647
|
+
# num_prompt_logprobs_dict.
|
|
1648
|
+
for req_id in completed_prefill_reqs:
|
|
1649
|
+
del num_prompt_logprobs_dict[req_id]
|
|
1650
|
+
del in_progress_dict[req_id]
|
|
1651
|
+
|
|
1652
|
+
# Must synchronize the non-blocking GPU->CPU transfers.
|
|
1653
|
+
if prompt_logprobs_dict:
|
|
1654
|
+
torch.cuda.synchronize()
|
|
1655
|
+
|
|
1656
|
+
return prompt_logprobs_dict
|
|
1657
|
+
|
|
1658
|
+
@torch.inference_mode()
|
|
1659
|
+
def _dummy_run(
|
|
1660
|
+
self,
|
|
1661
|
+
num_tokens: int,
|
|
1662
|
+
skip_attn: bool = True,
|
|
1663
|
+
) -> torch.Tensor:
|
|
1664
|
+
|
|
1665
|
+
# Set num_scheduled_tokens based on num_tokens and max_num_seqs
|
|
1666
|
+
# for dummy run with LoRA so that the num_reqs collectively
|
|
1667
|
+
# has num_tokens in total.
|
|
1668
|
+
assert num_tokens <= self.scheduler_config.max_num_batched_tokens
|
|
1669
|
+
max_num_reqs = self.scheduler_config.max_num_seqs
|
|
1670
|
+
num_reqs = max_num_reqs if num_tokens >= max_num_reqs else num_tokens
|
|
1671
|
+
min_tokens_per_req = num_tokens // num_reqs
|
|
1672
|
+
num_scheduled_tokens_list = [min_tokens_per_req] * num_reqs
|
|
1673
|
+
num_scheduled_tokens_list[-1] += num_tokens % num_reqs
|
|
1674
|
+
assert sum(num_scheduled_tokens_list) == num_tokens
|
|
1675
|
+
assert len(num_scheduled_tokens_list) == num_reqs
|
|
1676
|
+
num_scheduled_tokens = np.array(num_scheduled_tokens_list,
|
|
1677
|
+
dtype=np.int32)
|
|
1678
|
+
|
|
1679
|
+
if skip_attn:
|
|
1680
|
+
attn_metadata: Optional[dict[str, FlashAttentionMetadata]] = None
|
|
1681
|
+
else:
|
|
1682
|
+
query_start_loc = self.query_start_loc[:num_reqs + 1]
|
|
1683
|
+
seq_lens = self.seq_lens[:num_reqs]
|
|
1684
|
+
|
|
1685
|
+
common_attn_metadata = CommonAttentionMetadata(
|
|
1686
|
+
query_start_loc=query_start_loc, seq_lens=seq_lens)
|
|
1687
|
+
|
|
1688
|
+
attn_metadata = {}
|
|
1689
|
+
for kv_cache_group_id, kv_cache_group_spec in enumerate(
|
|
1690
|
+
self.kv_cache_config.kv_cache_groups):
|
|
1691
|
+
attn_metadata_i = (
|
|
1692
|
+
self.attn_metadata_builders[kv_cache_group_id].build(
|
|
1693
|
+
num_reqs=num_tokens,
|
|
1694
|
+
num_actual_tokens=num_tokens,
|
|
1695
|
+
max_query_len=num_tokens,
|
|
1696
|
+
common_prefix_len=0,
|
|
1697
|
+
common_attn_metadata=common_attn_metadata,
|
|
1698
|
+
))
|
|
1699
|
+
for layer_name in kv_cache_group_spec.layer_names:
|
|
1700
|
+
attn_metadata[layer_name] = attn_metadata_i
|
|
1701
|
+
|
|
1702
|
+
with self.maybe_dummy_run_with_lora(self.lora_config,
|
|
1703
|
+
num_scheduled_tokens):
|
|
1704
|
+
model = self.model
|
|
1705
|
+
if self.is_multimodal_model:
|
|
1706
|
+
input_ids = None
|
|
1707
|
+
inputs_embeds = self.inputs_embeds[:num_tokens]
|
|
1708
|
+
else:
|
|
1709
|
+
input_ids = self.input_ids[:num_tokens]
|
|
1710
|
+
inputs_embeds = None
|
|
1711
|
+
if self.uses_mrope:
|
|
1712
|
+
positions = self.mrope_positions[:, :num_tokens]
|
|
1713
|
+
else:
|
|
1714
|
+
positions = self.positions[:num_tokens]
|
|
1715
|
+
|
|
1716
|
+
if get_pp_group().is_first_rank:
|
|
1717
|
+
intermediate_tensors = None
|
|
1718
|
+
else:
|
|
1719
|
+
if self.intermediate_tensors is None:
|
|
1720
|
+
self.intermediate_tensors = (
|
|
1721
|
+
self.model.make_empty_intermediate_tensors(
|
|
1722
|
+
batch_size=self.max_num_tokens,
|
|
1723
|
+
dtype=self.model_config.dtype,
|
|
1724
|
+
device=self.device))
|
|
1725
|
+
|
|
1726
|
+
intermediate_tensors = self.sync_and_slice_intermediate_tensors(
|
|
1727
|
+
num_tokens, None, False)
|
|
1728
|
+
|
|
1729
|
+
with set_forward_context(attn_metadata,
|
|
1730
|
+
self.vllm_config,
|
|
1731
|
+
num_tokens=num_tokens):
|
|
1732
|
+
outputs = model(
|
|
1733
|
+
input_ids=input_ids,
|
|
1734
|
+
positions=positions,
|
|
1735
|
+
intermediate_tensors=intermediate_tensors,
|
|
1736
|
+
inputs_embeds=inputs_embeds,
|
|
1737
|
+
)
|
|
1738
|
+
if self.use_aux_hidden_state_outputs:
|
|
1739
|
+
hidden_states, _ = outputs
|
|
1740
|
+
else:
|
|
1741
|
+
hidden_states = outputs
|
|
1742
|
+
|
|
1743
|
+
if self.use_spec_decode and self.speculative_config.use_eagle():
|
|
1744
|
+
assert isinstance(self.drafter, EagleProposer)
|
|
1745
|
+
self.drafter.dummy_run(num_tokens)
|
|
1746
|
+
|
|
1747
|
+
logit_indices = np.cumsum(num_scheduled_tokens) - 1
|
|
1748
|
+
return hidden_states[logit_indices]
|
|
1749
|
+
|
|
1750
|
+
@torch.inference_mode()
|
|
1751
|
+
def _dummy_sampler_run(
|
|
1752
|
+
self,
|
|
1753
|
+
hidden_states: torch.Tensor,
|
|
1754
|
+
) -> torch.Tensor:
|
|
1755
|
+
# The dummy hidden states may contain special values,
|
|
1756
|
+
# like `inf` or `nan`.
|
|
1757
|
+
# To avoid breaking the sampler, we use a random tensor here instead.
|
|
1758
|
+
hidden_states = torch.rand_like(hidden_states)
|
|
1759
|
+
|
|
1760
|
+
logits = self.model.compute_logits(hidden_states, None)
|
|
1761
|
+
num_reqs = logits.size(0)
|
|
1762
|
+
|
|
1763
|
+
dummy_tensors = lambda v: torch.full(
|
|
1764
|
+
(num_reqs, ), v, device=self.device)
|
|
1765
|
+
|
|
1766
|
+
dummy_metadata = SamplingMetadata(
|
|
1767
|
+
temperature=dummy_tensors(0.5),
|
|
1768
|
+
all_greedy=False,
|
|
1769
|
+
all_random=False,
|
|
1770
|
+
top_p=dummy_tensors(0.9),
|
|
1771
|
+
top_k=dummy_tensors(logits.size(1) - 1),
|
|
1772
|
+
min_p=None,
|
|
1773
|
+
generators={},
|
|
1774
|
+
max_num_logprobs=None,
|
|
1775
|
+
no_penalties=True,
|
|
1776
|
+
prompt_token_ids=None,
|
|
1777
|
+
frequency_penalties=dummy_tensors(0.1),
|
|
1778
|
+
presence_penalties=dummy_tensors(0.1),
|
|
1779
|
+
repetition_penalties=dummy_tensors(0.1),
|
|
1780
|
+
output_token_ids=[[] for _ in range(num_reqs)],
|
|
1781
|
+
min_tokens={},
|
|
1782
|
+
logit_bias=[None for _ in range(num_reqs)],
|
|
1783
|
+
allowed_token_ids_mask=None,
|
|
1784
|
+
bad_words_token_ids={},
|
|
1785
|
+
)
|
|
1786
|
+
try:
|
|
1787
|
+
sampler_output = self.sampler(logits=logits,
|
|
1788
|
+
sampling_metadata=dummy_metadata)
|
|
1789
|
+
except RuntimeError as e:
|
|
1790
|
+
if 'out of memory' in str(e):
|
|
1791
|
+
raise RuntimeError(
|
|
1792
|
+
"CUDA out of memory occurred when warming up sampler with "
|
|
1793
|
+
f"{num_reqs} dummy requests. Please try lowering "
|
|
1794
|
+
"`max_num_seqs` or `gpu_memory_utilization` when "
|
|
1795
|
+
"initializing the engine.") from e
|
|
1796
|
+
else:
|
|
1797
|
+
raise e
|
|
1798
|
+
if self.use_spec_decode:
|
|
1799
|
+
draft_token_ids = [[0] for _ in range(num_reqs)]
|
|
1800
|
+
dummy_spec_decode_metadata = SpecDecodeMetadata.make_dummy(
|
|
1801
|
+
draft_token_ids, self.device)
|
|
1802
|
+
|
|
1803
|
+
num_tokens = sum(len(ids) for ids in draft_token_ids)
|
|
1804
|
+
# draft_probs = torch.randn(
|
|
1805
|
+
# num_tokens, logits.shape[-1], device=self.device,
|
|
1806
|
+
# dtype=logits.dtype)
|
|
1807
|
+
draft_probs = None
|
|
1808
|
+
target_logits = torch.randn(num_tokens,
|
|
1809
|
+
logits.shape[-1],
|
|
1810
|
+
device=self.device,
|
|
1811
|
+
dtype=logits.dtype)
|
|
1812
|
+
# NOTE(woosuk): Here, we should use int32 because the sampler uses
|
|
1813
|
+
# int32 for bonus_token_ids. If the dtype mismatches, re-compilation
|
|
1814
|
+
# will occur at runtime.
|
|
1815
|
+
bonus_token_ids = torch.zeros(num_reqs,
|
|
1816
|
+
device=self.device,
|
|
1817
|
+
dtype=torch.int32)
|
|
1818
|
+
self.rejection_sampler(
|
|
1819
|
+
dummy_spec_decode_metadata,
|
|
1820
|
+
draft_probs,
|
|
1821
|
+
target_logits,
|
|
1822
|
+
bonus_token_ids,
|
|
1823
|
+
dummy_metadata,
|
|
1824
|
+
)
|
|
1825
|
+
return sampler_output
|
|
1826
|
+
|
|
1827
|
+
def profile_run(self) -> None:
|
|
1828
|
+
# Profile with multimodal encoder & encoder cache.
|
|
1829
|
+
# TODO: handle encoder-decoder models once we support them.
|
|
1830
|
+
if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
|
|
1831
|
+
and self.encoder_cache_size > 0):
|
|
1832
|
+
|
|
1833
|
+
# NOTE: Currently model is profiled with a single non-text
|
|
1834
|
+
# modality with the max possible input tokens even when
|
|
1835
|
+
# it supports multiple.
|
|
1836
|
+
max_tokens_by_modality_dict = self.mm_registry \
|
|
1837
|
+
.get_max_tokens_per_item_by_nonzero_modality(self.model_config)
|
|
1838
|
+
dummy_data_modality, max_tokens_per_mm_item = max(
|
|
1839
|
+
max_tokens_by_modality_dict.items(), key=lambda item: item[1])
|
|
1840
|
+
|
|
1841
|
+
# Check how many items of this modality can be supported by
|
|
1842
|
+
# the encoder budget.
|
|
1843
|
+
encoder_budget = min(self.max_num_encoder_input_tokens,
|
|
1844
|
+
self.encoder_cache_size)
|
|
1845
|
+
|
|
1846
|
+
max_num_mm_items_encoder_budget = cdiv(encoder_budget,
|
|
1847
|
+
max_tokens_per_mm_item)
|
|
1848
|
+
|
|
1849
|
+
# Check how many items of this modality can be supported by
|
|
1850
|
+
# the decoder budget.
|
|
1851
|
+
max_mm_items_per_req = self.mm_registry.get_mm_limits_per_prompt(
|
|
1852
|
+
self.model_config)[dummy_data_modality]
|
|
1853
|
+
|
|
1854
|
+
# NOTE: We do not consider max_num_batched_tokens on purpose
|
|
1855
|
+
# because the multimodal embeddings can be generated in advance
|
|
1856
|
+
# and chunked prefilled.
|
|
1857
|
+
max_num_mm_items_decoder_budget = self.max_num_reqs * \
|
|
1858
|
+
max_mm_items_per_req
|
|
1859
|
+
|
|
1860
|
+
max_num_mm_items = min(max_num_mm_items_encoder_budget,
|
|
1861
|
+
max_num_mm_items_decoder_budget)
|
|
1862
|
+
|
|
1863
|
+
logger.info(
|
|
1864
|
+
"Encoder cache will be initialized with a budget of %s tokens,"
|
|
1865
|
+
" and profiled with %s %s items of the maximum feature size.",
|
|
1866
|
+
encoder_budget, max_num_mm_items, dummy_data_modality)
|
|
1867
|
+
|
|
1868
|
+
# Create dummy batch of multimodal inputs.
|
|
1869
|
+
dummy_mm_kwargs = self.mm_registry.get_decoder_dummy_data(
|
|
1870
|
+
model_config=self.model_config,
|
|
1871
|
+
seq_len=self.max_num_tokens,
|
|
1872
|
+
mm_counts={
|
|
1873
|
+
dummy_data_modality: 1
|
|
1874
|
+
},
|
|
1875
|
+
).multi_modal_data
|
|
1876
|
+
|
|
1877
|
+
batched_dummy_mm_inputs = MultiModalKwargs.batch(
|
|
1878
|
+
[dummy_mm_kwargs] * max_num_mm_items)
|
|
1879
|
+
batched_dummy_mm_inputs = MultiModalKwargs.as_kwargs(
|
|
1880
|
+
batched_dummy_mm_inputs,
|
|
1881
|
+
dtype=self.model_config.dtype,
|
|
1882
|
+
device=self.device,
|
|
1883
|
+
)
|
|
1884
|
+
|
|
1885
|
+
# Run multimodal encoder.
|
|
1886
|
+
dummy_encoder_outputs = self.model.get_multimodal_embeddings(
|
|
1887
|
+
**batched_dummy_mm_inputs)
|
|
1888
|
+
|
|
1889
|
+
sanity_check_mm_encoder_outputs(
|
|
1890
|
+
dummy_encoder_outputs,
|
|
1891
|
+
expected_num_items=max_num_mm_items,
|
|
1892
|
+
)
|
|
1893
|
+
|
|
1894
|
+
# Cache the dummy encoder outputs.
|
|
1895
|
+
self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs))
|
|
1896
|
+
|
|
1897
|
+
hidden_states = self._dummy_run(self.max_num_tokens)
|
|
1898
|
+
if get_pp_group().is_last_rank:
|
|
1899
|
+
sampler_output = self._dummy_sampler_run(hidden_states)
|
|
1900
|
+
else:
|
|
1901
|
+
sampler_output = None
|
|
1902
|
+
torch.cuda.synchronize()
|
|
1903
|
+
del hidden_states, sampler_output
|
|
1904
|
+
self.encoder_cache.clear()
|
|
1905
|
+
gc.collect()
|
|
1906
|
+
|
|
1907
|
+
def capture_model(self) -> None:
|
|
1908
|
+
if not self.use_cuda_graph:
|
|
1909
|
+
logger.warning(
|
|
1910
|
+
"Skipping CUDA graph capture. Please add "
|
|
1911
|
+
"-O %s to use CUDA graphs.", CompilationLevel.PIECEWISE)
|
|
1912
|
+
return
|
|
1913
|
+
|
|
1914
|
+
start_time = time.perf_counter()
|
|
1915
|
+
start_free_gpu_memory = torch.cuda.mem_get_info()[0]
|
|
1916
|
+
|
|
1917
|
+
# Trigger CUDA graph capture for specific shapes.
|
|
1918
|
+
# Capture the large shapes first so that the smaller shapes
|
|
1919
|
+
# can reuse the memory pool allocated for the large shapes.
|
|
1920
|
+
with graph_capture(device=self.device):
|
|
1921
|
+
skip_attn = not self.vllm_config.compilation_config.full_cuda_graph
|
|
1922
|
+
for num_tokens in reversed(self.cudagraph_batch_sizes):
|
|
1923
|
+
for _ in range(self.vllm_config.compilation_config.
|
|
1924
|
+
cudagraph_num_of_warmups):
|
|
1925
|
+
self._dummy_run(num_tokens, skip_attn=skip_attn)
|
|
1926
|
+
self._dummy_run(num_tokens, skip_attn=skip_attn)
|
|
1927
|
+
|
|
1928
|
+
end_time = time.perf_counter()
|
|
1929
|
+
end_free_gpu_memory = torch.cuda.mem_get_info()[0]
|
|
1930
|
+
elapsed_time = end_time - start_time
|
|
1931
|
+
cuda_graph_size = start_free_gpu_memory - end_free_gpu_memory
|
|
1932
|
+
# This usually takes 5~20 seconds.
|
|
1933
|
+
logger.info("Graph capturing finished in %.0f secs, took %.2f GiB",
|
|
1934
|
+
elapsed_time, cuda_graph_size / (1 << 30))
|
|
1935
|
+
|
|
1936
|
+
def initialize_attn_backend(self, kv_cache_config: KVCacheConfig) -> None:
|
|
1937
|
+
"""
|
|
1938
|
+
Initialize the attention backends and attention metadata builders.
|
|
1939
|
+
"""
|
|
1940
|
+
assert len(self.attn_backends) == 0 and len(
|
|
1941
|
+
self.attn_metadata_builders
|
|
1942
|
+
) == 0, "Attention backends are already initialized"
|
|
1943
|
+
for i, kv_cache_group_spec in enumerate(
|
|
1944
|
+
kv_cache_config.kv_cache_groups):
|
|
1945
|
+
kv_cache_spec = kv_cache_group_spec.kv_cache_spec
|
|
1946
|
+
if not isinstance(kv_cache_spec, AttentionSpec):
|
|
1947
|
+
raise NotImplementedError(
|
|
1948
|
+
"Only AttentionSpec is supported for now.")
|
|
1949
|
+
attn_backend_i = get_attn_backend(
|
|
1950
|
+
kv_cache_spec.head_size,
|
|
1951
|
+
self.dtype,
|
|
1952
|
+
kv_cache_spec.dtype,
|
|
1953
|
+
kv_cache_spec.block_size,
|
|
1954
|
+
self.model_config.is_attention_free,
|
|
1955
|
+
use_mla=kv_cache_spec.use_mla,
|
|
1956
|
+
)
|
|
1957
|
+
if attn_backend_i is None:
|
|
1958
|
+
error_msg = (
|
|
1959
|
+
f"Error with get_attn_backend: {kv_cache_spec.head_size=}, "
|
|
1960
|
+
f"{self.dtype=}, {kv_cache_spec.dtype=}, "
|
|
1961
|
+
f"{kv_cache_spec.block_size=}, "
|
|
1962
|
+
f"{self.model_config.is_attention_free=}, "
|
|
1963
|
+
f"{kv_cache_spec.use_mla=}")
|
|
1964
|
+
logger.error(error_msg)
|
|
1965
|
+
raise NotImplementedError(
|
|
1966
|
+
"Non-Attention backend is not supported by V1 "
|
|
1967
|
+
"GPUModelRunner.")
|
|
1968
|
+
|
|
1969
|
+
if self.vllm_config.compilation_config.full_cuda_graph:
|
|
1970
|
+
attn_backend_name = attn_backend_i.__name__
|
|
1971
|
+
flash_attn_version = get_flash_attn_version()
|
|
1972
|
+
if attn_backend_name != "FlashAttentionBackend" or \
|
|
1973
|
+
flash_attn_version != 3:
|
|
1974
|
+
raise ValueError(
|
|
1975
|
+
f"full_cuda_graph is only supported with "
|
|
1976
|
+
f"FA3. Current attention backend is "
|
|
1977
|
+
f"{attn_backend_name}, FlashAttention version is "
|
|
1978
|
+
f"{flash_attn_version}.")
|
|
1979
|
+
|
|
1980
|
+
block_table_i = self.input_batch.block_table[i]
|
|
1981
|
+
attn_metadata_builder_i = attn_backend_i.get_builder_cls()(
|
|
1982
|
+
weakref.proxy(self), kv_cache_spec, block_table_i)
|
|
1983
|
+
self.attn_backends.append(attn_backend_i)
|
|
1984
|
+
self.attn_metadata_builders.append(attn_metadata_builder_i)
|
|
1985
|
+
|
|
1986
|
+
def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
|
|
1987
|
+
"""
|
|
1988
|
+
Initialize KV cache based on `kv_cache_config`.
|
|
1989
|
+
Args:
|
|
1990
|
+
kv_cache_config: Configuration for the KV cache, including the KV
|
|
1991
|
+
cache size of each layer
|
|
1992
|
+
"""
|
|
1993
|
+
if len(kv_cache_config.kv_cache_groups) > 1:
|
|
1994
|
+
raise NotImplementedError(
|
|
1995
|
+
"Hybrid models with more than one KV cache type are not "
|
|
1996
|
+
"supported yet.")
|
|
1997
|
+
self.kv_cache_config = kv_cache_config
|
|
1998
|
+
self.initialize_attn_backend(kv_cache_config)
|
|
1999
|
+
|
|
2000
|
+
kv_caches: dict[str, torch.Tensor] = {}
|
|
2001
|
+
|
|
2002
|
+
for i, kv_cache_group in enumerate(kv_cache_config.kv_cache_groups):
|
|
2003
|
+
kv_cache_spec = kv_cache_group.kv_cache_spec
|
|
2004
|
+
for layer_name in kv_cache_group.layer_names:
|
|
2005
|
+
tensor_config = kv_cache_config.tensors[layer_name]
|
|
2006
|
+
assert tensor_config.size % kv_cache_spec.page_size_bytes == 0
|
|
2007
|
+
num_blocks = tensor_config.size // kv_cache_spec.page_size_bytes
|
|
2008
|
+
# `num_blocks` is the number of blocks the model runner can use.
|
|
2009
|
+
# `kv_cache_config.num_blocks` is the number of blocks that
|
|
2010
|
+
# KVCacheManager may allocate.
|
|
2011
|
+
# Since different GPUs may have different number of layers and
|
|
2012
|
+
# different memory capacities, `num_blocks` can be different on
|
|
2013
|
+
# different GPUs, and `kv_cache_config.num_blocks` is set to
|
|
2014
|
+
# the min of all `num_blocks`. Verify it here.
|
|
2015
|
+
assert num_blocks >= kv_cache_config.num_blocks
|
|
2016
|
+
if isinstance(kv_cache_spec, AttentionSpec):
|
|
2017
|
+
kv_cache_shape = self.attn_backends[i].get_kv_cache_shape(
|
|
2018
|
+
num_blocks, kv_cache_spec.block_size,
|
|
2019
|
+
kv_cache_spec.num_kv_heads, kv_cache_spec.head_size)
|
|
2020
|
+
dtype = kv_cache_spec.dtype
|
|
2021
|
+
kv_caches[layer_name] = torch.zeros(kv_cache_shape,
|
|
2022
|
+
dtype=dtype,
|
|
2023
|
+
device=self.device)
|
|
2024
|
+
else:
|
|
2025
|
+
# TODO: add new branches when introducing more types of
|
|
2026
|
+
# KV cache specs.
|
|
2027
|
+
raise ValueError("Unknown KV cache spec type.")
|
|
2028
|
+
|
|
2029
|
+
if self.speculative_config and self.speculative_config.use_eagle():
|
|
2030
|
+
assert isinstance(self.drafter, EagleProposer)
|
|
2031
|
+
# validate all draft model layers belong to the same kv cache
|
|
2032
|
+
# group
|
|
2033
|
+
self.drafter.validate_same_kv_cache_group(kv_cache_config)
|
|
2034
|
+
|
|
2035
|
+
bind_kv_cache(
|
|
2036
|
+
kv_caches,
|
|
2037
|
+
self.vllm_config.compilation_config.static_forward_context,
|
|
2038
|
+
self.kv_caches)
|
|
2039
|
+
|
|
2040
|
+
if has_kv_transfer_group():
|
|
2041
|
+
get_kv_transfer_group().register_kv_caches(kv_caches)
|
|
2042
|
+
|
|
2043
|
+
def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
|
|
2044
|
+
"""
|
|
2045
|
+
Generates the KVCacheSpec by parsing the kv cache format from each
|
|
2046
|
+
Attention module in the static forward context.
|
|
2047
|
+
Returns:
|
|
2048
|
+
KVCacheSpec: A dictionary mapping layer names to their KV cache
|
|
2049
|
+
format. Layers that do not need KV cache are not included.
|
|
2050
|
+
"""
|
|
2051
|
+
|
|
2052
|
+
layers = get_layers_from_vllm_config(self.vllm_config, Attention)
|
|
2053
|
+
block_size = self.vllm_config.cache_config.block_size
|
|
2054
|
+
use_mla = self.vllm_config.model_config.use_mla
|
|
2055
|
+
kv_cache_spec: dict[str, KVCacheSpec] = {}
|
|
2056
|
+
for layer_name, attn_module in layers.items():
|
|
2057
|
+
# TODO: Support other attention modules, e.g., cross-attention
|
|
2058
|
+
if attn_module.attn_type == AttentionType.DECODER:
|
|
2059
|
+
if attn_module.sliding_window is not None:
|
|
2060
|
+
kv_cache_spec[layer_name] = SlidingWindowSpec(
|
|
2061
|
+
block_size=block_size,
|
|
2062
|
+
num_kv_heads=attn_module.num_kv_heads,
|
|
2063
|
+
head_size=attn_module.head_size,
|
|
2064
|
+
dtype=self.kv_cache_dtype,
|
|
2065
|
+
sliding_window=attn_module.sliding_window,
|
|
2066
|
+
use_mla=use_mla)
|
|
2067
|
+
else:
|
|
2068
|
+
kv_cache_spec[layer_name] = FullAttentionSpec(
|
|
2069
|
+
block_size=block_size,
|
|
2070
|
+
num_kv_heads=attn_module.num_kv_heads,
|
|
2071
|
+
head_size=attn_module.head_size,
|
|
2072
|
+
dtype=self.kv_cache_dtype,
|
|
2073
|
+
use_mla=use_mla)
|
|
2074
|
+
elif attn_module.attn_type in (AttentionType.ENCODER,
|
|
2075
|
+
AttentionType.ENCODER_ONLY):
|
|
2076
|
+
# encoder-only attention does not need KV cache.
|
|
2077
|
+
continue
|
|
2078
|
+
elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
|
|
2079
|
+
raise NotImplementedError
|
|
2080
|
+
else:
|
|
2081
|
+
raise ValueError(
|
|
2082
|
+
f"Unknown attention type: {attn_module.attn_type}")
|
|
2083
|
+
|
|
2084
|
+
return kv_cache_spec
|