vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +53 -0
- vllm/_custom_ops.py +1828 -0
- vllm/_ipex_ops.py +244 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +34 -0
- vllm/assets/video.py +115 -0
- vllm/attention/__init__.py +20 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +308 -0
- vllm/attention/backends/blocksparse_attn.py +461 -0
- vllm/attention/backends/cpu_mla.py +307 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
- vllm/attention/backends/flash_attn.py +1003 -0
- vllm/attention/backends/flashinfer.py +1104 -0
- vllm/attention/backends/flashmla.py +244 -0
- vllm/attention/backends/hpu_attn.py +313 -0
- vllm/attention/backends/ipex_attn.py +398 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1385 -0
- vllm/attention/backends/pallas.py +351 -0
- vllm/attention/backends/placeholder_attn.py +400 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +975 -0
- vllm/attention/backends/torch_sdpa.py +703 -0
- vllm/attention/backends/triton_mla.py +115 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +802 -0
- vllm/attention/layer.py +468 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
- vllm/attention/ops/blocksparse_attention/interface.py +239 -0
- vllm/attention/ops/blocksparse_attention/utils.py +246 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
- vllm/attention/ops/flashmla.py +116 -0
- vllm/attention/ops/hpu_paged_attn.py +88 -0
- vllm/attention/ops/ipex_attn.py +195 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/nki_flash_attn.py +906 -0
- vllm/attention/ops/paged_attn.py +256 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +100 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +674 -0
- vllm/attention/ops/triton_flash_attention.py +979 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +334 -0
- vllm/attention/selector.py +187 -0
- vllm/attention/utils/fa_utils.py +55 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +1185 -0
- vllm/benchmarks/endpoint_request_func.py +381 -0
- vllm/benchmarks/latency.py +168 -0
- vllm/benchmarks/serve.py +1135 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +70 -0
- vllm/collect_env.py +820 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +89 -0
- vllm/compilation/backends.py +563 -0
- vllm/compilation/base_piecewise_backend.py +72 -0
- vllm/compilation/collective_fusion.py +127 -0
- vllm/compilation/compiler_interface.py +544 -0
- vllm/compilation/counter.py +38 -0
- vllm/compilation/cuda_piecewise_backend.py +214 -0
- vllm/compilation/decorators.py +250 -0
- vllm/compilation/fix_functionalization.py +191 -0
- vllm/compilation/fusion.py +618 -0
- vllm/compilation/fx_utils.py +62 -0
- vllm/compilation/inductor_pass.py +115 -0
- vllm/compilation/monitor.py +39 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +137 -0
- vllm/compilation/pass_manager.py +78 -0
- vllm/compilation/sequence_parallelism.py +268 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +67 -0
- vllm/compilation/wrapper.py +135 -0
- vllm/config.py +4746 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +441 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +521 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +135 -0
- vllm/core/placeholder_block_space_manager.py +100 -0
- vllm/core/scheduler.py +2093 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +281 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +264 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +145 -0
- vllm/distributed/device_communicators/cuda_communicator.py +176 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
- vllm/distributed/device_communicators/hpu_communicator.py +46 -0
- vllm/distributed/device_communicators/neuron_communicator.py +20 -0
- vllm/distributed/device_communicators/pynccl.py +218 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/tpu_communicator.py +103 -0
- vllm/distributed/device_communicators/xpu_communicator.py +55 -0
- vllm/distributed/kv_events.py +356 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +12 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
- vllm/distributed/parallel_state.py +1296 -0
- vllm/distributed/tpu_distributed_utils.py +177 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1708 -0
- vllm/engine/async_llm_engine.py +1200 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +2097 -0
- vllm/engine/metrics.py +629 -0
- vllm/engine/metrics_types.py +94 -0
- vllm/engine/multiprocessing/__init__.py +148 -0
- vllm/engine/multiprocessing/client.py +681 -0
- vllm/engine/multiprocessing/engine.py +460 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +75 -0
- vllm/engine/output_processor/multi_step.py +216 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +317 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1299 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +39 -0
- vllm/entrypoints/cli/benchmark/latency.py +30 -0
- vllm/entrypoints/cli/benchmark/main.py +54 -0
- vllm/entrypoints/cli/benchmark/serve.py +30 -0
- vllm/entrypoints/cli/benchmark/throughput.py +30 -0
- vllm/entrypoints/cli/collect_env.py +35 -0
- vllm/entrypoints/cli/main.py +65 -0
- vllm/entrypoints/cli/openai.py +205 -0
- vllm/entrypoints/cli/run_batch.py +62 -0
- vllm/entrypoints/cli/serve.py +328 -0
- vllm/entrypoints/cli/types.py +25 -0
- vllm/entrypoints/launcher.py +147 -0
- vllm/entrypoints/llm.py +1544 -0
- vllm/entrypoints/logger.py +50 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1387 -0
- vllm/entrypoints/openai/cli_args.py +315 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +1913 -0
- vllm/entrypoints/openai/run_batch.py +463 -0
- vllm/entrypoints/openai/serving_chat.py +1221 -0
- vllm/entrypoints/openai/serving_classification.py +160 -0
- vllm/entrypoints/openai/serving_completion.py +592 -0
- vllm/entrypoints/openai/serving_embedding.py +201 -0
- vllm/entrypoints/openai/serving_engine.py +986 -0
- vllm/entrypoints/openai/serving_models.py +315 -0
- vllm/entrypoints/openai/serving_pooling.py +232 -0
- vllm/entrypoints/openai/serving_score.py +433 -0
- vllm/entrypoints/openai/serving_tokenization.py +157 -0
- vllm/entrypoints/openai/serving_transcription.py +424 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/score_utils.py +50 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/utils.py +233 -0
- vllm/env_override.py +41 -0
- vllm/envs.py +944 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +401 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +30 -0
- vllm/executor/multiproc_worker_utils.py +313 -0
- vllm/executor/ray_distributed_executor.py +701 -0
- vllm/executor/ray_utils.py +399 -0
- vllm/executor/uniproc_executor.py +139 -0
- vllm/forward_context.py +179 -0
- vllm/inputs/__init__.py +41 -0
- vllm/inputs/data.py +331 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +909 -0
- vllm/inputs/registry.py +237 -0
- vllm/jsontree.py +80 -0
- vllm/logger.py +212 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +85 -0
- vllm/logging_utils/formatter.py +18 -0
- vllm/logits_process.py +119 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +355 -0
- vllm/lora/layers.py +1285 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +818 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
- vllm/lora/ops/triton_ops/utils.py +120 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +136 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +485 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +290 -0
- vllm/lora/punica_wrapper/punica_hpu.py +145 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +405 -0
- vllm/lora/punica_wrapper/utils.py +164 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +240 -0
- vllm/lora/worker_manager.py +259 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +152 -0
- vllm/model_executor/guided_decoding/__init__.py +181 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
- vllm/model_executor/guided_decoding/guided_fields.py +41 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
- vllm/model_executor/guided_decoding/utils.py +242 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +369 -0
- vllm/model_executor/layers/fused_moe/__init__.py +54 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
- vllm/model_executor/layers/fused_moe/layer.py +1535 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/utils.py +98 -0
- vllm/model_executor/layers/layernorm.py +288 -0
- vllm/model_executor/layers/lightning_attn.py +652 -0
- vllm/model_executor/layers/linear.py +1524 -0
- vllm/model_executor/layers/logits_processor.py +197 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
- vllm/model_executor/layers/pooler.py +350 -0
- vllm/model_executor/layers/quantization/__init__.py +157 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/auto_round.py +310 -0
- vllm/model_executor/layers/quantization/awq.py +194 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +151 -0
- vllm/model_executor/layers/quantization/bitblas.py +461 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
- vllm/model_executor/layers/quantization/experts_int8.py +196 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +906 -0
- vllm/model_executor/layers/quantization/gguf.py +565 -0
- vllm/model_executor/layers/quantization/gptq.py +278 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/marlin.py +261 -0
- vllm/model_executor/layers/quantization/modelopt.py +737 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
- vllm/model_executor/layers/quantization/qqq.py +275 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +441 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +161 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
- vllm/model_executor/layers/rejection_sampler.py +406 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding.py +1862 -0
- vllm/model_executor/layers/sampler.py +1204 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
- vllm/model_executor/layers/utils.py +95 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +76 -0
- vllm/model_executor/model_loader/base_loader.py +43 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
- vllm/model_executor/model_loader/default_loader.py +282 -0
- vllm/model_executor/model_loader/dummy_loader.py +27 -0
- vllm/model_executor/model_loader/gguf_loader.py +120 -0
- vllm/model_executor/model_loader/neuron.py +476 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
- vllm/model_executor/model_loader/tensorizer.py +600 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
- vllm/model_executor/model_loader/tpu.py +112 -0
- vllm/model_executor/model_loader/utils.py +302 -0
- vllm/model_executor/model_loader/weight_utils.py +782 -0
- vllm/model_executor/models/__init__.py +28 -0
- vllm/model_executor/models/adapters.py +248 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +657 -0
- vllm/model_executor/models/aya_vision.py +466 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bamba.py +543 -0
- vllm/model_executor/models/bart.py +938 -0
- vllm/model_executor/models/bert.py +523 -0
- vllm/model_executor/models/bert_with_rope.py +769 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +718 -0
- vllm/model_executor/models/bloom.py +373 -0
- vllm/model_executor/models/chameleon.py +1136 -0
- vllm/model_executor/models/chatglm.py +478 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/commandr.py +472 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +472 -0
- vllm/model_executor/models/deepseek.py +486 -0
- vllm/model_executor/models/deepseek_mtp.py +269 -0
- vllm/model_executor/models/deepseek_v2.py +843 -0
- vllm/model_executor/models/deepseek_vl2.py +648 -0
- vllm/model_executor/models/eagle.py +260 -0
- vllm/model_executor/models/exaone.py +551 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +510 -0
- vllm/model_executor/models/falcon_h1.py +685 -0
- vllm/model_executor/models/florence2.py +1103 -0
- vllm/model_executor/models/fuyu.py +389 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +425 -0
- vllm/model_executor/models/gemma3.py +533 -0
- vllm/model_executor/models/gemma3_mm.py +709 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4v.py +648 -0
- vllm/model_executor/models/gpt2.py +328 -0
- vllm/model_executor/models/gpt_bigcode.py +335 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +332 -0
- vllm/model_executor/models/granite.py +493 -0
- vllm/model_executor/models/granite_speech.py +779 -0
- vllm/model_executor/models/granitemoe.py +437 -0
- vllm/model_executor/models/granitemoehybrid.py +586 -0
- vllm/model_executor/models/granitemoeshared.py +341 -0
- vllm/model_executor/models/gritlm.py +224 -0
- vllm/model_executor/models/grok1.py +546 -0
- vllm/model_executor/models/h2ovl.py +546 -0
- vllm/model_executor/models/idefics2_vision_model.py +389 -0
- vllm/model_executor/models/idefics3.py +776 -0
- vllm/model_executor/models/interfaces.py +572 -0
- vllm/model_executor/models/interfaces_base.py +164 -0
- vllm/model_executor/models/intern_vit.py +480 -0
- vllm/model_executor/models/internlm2.py +455 -0
- vllm/model_executor/models/internlm2_ve.py +147 -0
- vllm/model_executor/models/internvl.py +1418 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +592 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/llama.py +644 -0
- vllm/model_executor/models/llama4.py +532 -0
- vllm/model_executor/models/llama_eagle.py +165 -0
- vllm/model_executor/models/llama_eagle3.py +263 -0
- vllm/model_executor/models/llava.py +866 -0
- vllm/model_executor/models/llava_next.py +586 -0
- vllm/model_executor/models/llava_next_video.py +471 -0
- vllm/model_executor/models/llava_onevision.py +956 -0
- vllm/model_executor/models/mamba.py +273 -0
- vllm/model_executor/models/mamba2.py +308 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/mimo.py +192 -0
- vllm/model_executor/models/mimo_mtp.py +285 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +759 -0
- vllm/model_executor/models/minicpmv.py +1287 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1301 -0
- vllm/model_executor/models/minimax_vl_01.py +364 -0
- vllm/model_executor/models/mistral3.py +604 -0
- vllm/model_executor/models/mixtral.py +488 -0
- vllm/model_executor/models/mixtral_quant.py +453 -0
- vllm/model_executor/models/mllama.py +1624 -0
- vllm/model_executor/models/mllama4.py +938 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +331 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1568 -0
- vllm/model_executor/models/moonvit.py +630 -0
- vllm/model_executor/models/mpt.py +331 -0
- vllm/model_executor/models/nemotron.py +508 -0
- vllm/model_executor/models/nemotron_h.py +573 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +389 -0
- vllm/model_executor/models/olmo2.py +414 -0
- vllm/model_executor/models/olmoe.py +468 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +349 -0
- vllm/model_executor/models/ovis.py +567 -0
- vllm/model_executor/models/paligemma.py +398 -0
- vllm/model_executor/models/persimmon.py +344 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3_small.py +465 -0
- vllm/model_executor/models/phi3v.py +723 -0
- vllm/model_executor/models/phi4mm.py +1246 -0
- vllm/model_executor/models/phi4mm_audio.py +1233 -0
- vllm/model_executor/models/phi4mm_utils.py +1884 -0
- vllm/model_executor/models/phimoe.py +665 -0
- vllm/model_executor/models/pixtral.py +1316 -0
- vllm/model_executor/models/plamo2.py +738 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
- vllm/model_executor/models/qwen.py +362 -0
- vllm/model_executor/models/qwen2.py +497 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
- vllm/model_executor/models/qwen2_5_vl.py +1166 -0
- vllm/model_executor/models/qwen2_audio.py +410 -0
- vllm/model_executor/models/qwen2_moe.py +540 -0
- vllm/model_executor/models/qwen2_rm.py +132 -0
- vllm/model_executor/models/qwen2_vl.py +1405 -0
- vllm/model_executor/models/qwen3.py +321 -0
- vllm/model_executor/models/qwen3_moe.py +535 -0
- vllm/model_executor/models/qwen_vl.py +785 -0
- vllm/model_executor/models/registry.py +622 -0
- vllm/model_executor/models/roberta.py +276 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/skyworkr1v.py +951 -0
- vllm/model_executor/models/smolvlm.py +52 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +343 -0
- vllm/model_executor/models/starcoder2.py +356 -0
- vllm/model_executor/models/tarsier.py +643 -0
- vllm/model_executor/models/telechat2.py +140 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/transformers.py +508 -0
- vllm/model_executor/models/ultravox.py +656 -0
- vllm/model_executor/models/utils.py +731 -0
- vllm/model_executor/models/vision.py +147 -0
- vllm/model_executor/models/whisper.py +747 -0
- vllm/model_executor/models/zamba2.py +1009 -0
- vllm/model_executor/parameter.py +459 -0
- vllm/model_executor/pooling_metadata.py +72 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +77 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +106 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/hasher.py +118 -0
- vllm/multimodal/image.py +97 -0
- vllm/multimodal/inputs.py +876 -0
- vllm/multimodal/parse.py +461 -0
- vllm/multimodal/processing.py +1895 -0
- vllm/multimodal/profiling.py +258 -0
- vllm/multimodal/registry.py +331 -0
- vllm/multimodal/utils.py +436 -0
- vllm/multimodal/video.py +198 -0
- vllm/outputs.py +512 -0
- vllm/platforms/__init__.py +291 -0
- vllm/platforms/cpu.py +266 -0
- vllm/platforms/cuda.py +526 -0
- vllm/platforms/hpu.py +106 -0
- vllm/platforms/interface.py +538 -0
- vllm/platforms/neuron.py +150 -0
- vllm/platforms/rocm.py +435 -0
- vllm/platforms/tpu.py +216 -0
- vllm/platforms/xpu.py +156 -0
- vllm/plugins/__init__.py +94 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +54 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +83 -0
- vllm/prompt_adapter/models.py +358 -0
- vllm/prompt_adapter/request.py +37 -0
- vllm/prompt_adapter/utils.py +98 -0
- vllm/prompt_adapter/worker_manager.py +179 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +15 -0
- vllm/reasoning/abs_reasoning_parsers.py +192 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/sampling_params.py +602 -0
- vllm/scalar_type.py +347 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1568 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +506 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +99 -0
- vllm/spec_decode/medusa_worker.py +138 -0
- vllm/spec_decode/metrics.py +213 -0
- vllm/spec_decode/mlp_speculator_worker.py +94 -0
- vllm/spec_decode/mqa_scorer.py +160 -0
- vllm/spec_decode/multi_step_worker.py +423 -0
- vllm/spec_decode/ngram_worker.py +196 -0
- vllm/spec_decode/proposer_worker_base.py +59 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
- vllm/spec_decode/spec_decode_worker.py +1326 -0
- vllm/spec_decode/target_model_runner.py +45 -0
- vllm/spec_decode/top1_proposer.py +275 -0
- vllm/spec_decode/util.py +277 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +131 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +60 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +887 -0
- vllm/transformers_utils/configs/__init__.py +61 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/cohere2.py +195 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +85 -0
- vllm/transformers_utils/configs/exaone.py +190 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/h2ovl.py +16 -0
- vllm/transformers_utils/configs/internvl.py +54 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/minimax_text_01.py +70 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
- vllm/transformers_utils/configs/mllama.py +31 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/mpt.py +180 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +258 -0
- vllm/transformers_utils/configs/nvlm_d.py +15 -0
- vllm/transformers_utils/configs/ovis.py +184 -0
- vllm/transformers_utils/configs/skyworkr1v.py +54 -0
- vllm/transformers_utils/configs/solar.py +247 -0
- vllm/transformers_utils/configs/telechat2.py +64 -0
- vllm/transformers_utils/configs/ultravox.py +108 -0
- vllm/transformers_utils/detokenizer.py +168 -0
- vllm/transformers_utils/detokenizer_utils.py +189 -0
- vllm/transformers_utils/processor.py +221 -0
- vllm/transformers_utils/processors/__init__.py +8 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/s3_utils.py +162 -0
- vllm/transformers_utils/tokenizer.py +302 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +120 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +493 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +14 -0
- vllm/triton_utils/importing.py +50 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +256 -0
- vllm/utils.py +2910 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +163 -0
- vllm/v1/attention/backends/flash_attn.py +869 -0
- vllm/v1/attention/backends/flashinfer.py +651 -0
- vllm/v1/attention/backends/flex_attention.py +477 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +931 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
- vllm/v1/attention/backends/mla/flashmla.py +152 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
- vllm/v1/attention/backends/mla/triton_mla.py +120 -0
- vllm/v1/attention/backends/pallas.py +240 -0
- vllm/v1/attention/backends/triton_attn.py +285 -0
- vllm/v1/attention/backends/utils.py +52 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +349 -0
- vllm/v1/core/encoder_cache_manager.py +150 -0
- vllm/v1/core/kv_cache_coordinator.py +363 -0
- vllm/v1/core/kv_cache_manager.py +392 -0
- vllm/v1/core/kv_cache_utils.py +996 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +150 -0
- vllm/v1/core/sched/output.py +154 -0
- vllm/v1/core/sched/scheduler.py +1044 -0
- vllm/v1/core/sched/utils.py +23 -0
- vllm/v1/core/single_type_kv_cache_manager.py +403 -0
- vllm/v1/engine/__init__.py +173 -0
- vllm/v1/engine/async_llm.py +558 -0
- vllm/v1/engine/coordinator.py +253 -0
- vllm/v1/engine/core.py +961 -0
- vllm/v1/engine/core_client.py +1129 -0
- vllm/v1/engine/detokenizer.py +261 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +317 -0
- vllm/v1/engine/logprobs.py +199 -0
- vllm/v1/engine/mm_input_cache.py +91 -0
- vllm/v1/engine/output_processor.py +428 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +407 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +113 -0
- vllm/v1/executor/multiproc_executor.py +537 -0
- vllm/v1/executor/ray_distributed_executor.py +62 -0
- vllm/v1/kv_cache_interface.py +194 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +523 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +131 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +239 -0
- vllm/v1/outputs.py +116 -0
- vllm/v1/request.py +193 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/penalties.py +59 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +286 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +145 -0
- vllm/v1/serial_utils.py +315 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +432 -0
- vllm/v1/spec_decode/medusa.py +62 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +178 -0
- vllm/v1/spec_decode/ngram_proposer.py +132 -0
- vllm/v1/spec_decode/utils.py +46 -0
- vllm/v1/structured_output/__init__.py +222 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +318 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +175 -0
- vllm/v1/utils.py +743 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +142 -0
- vllm/v1/worker/cpu_model_runner.py +86 -0
- vllm/v1/worker/cpu_worker.py +152 -0
- vllm/v1/worker/gpu_input_batch.py +681 -0
- vllm/v1/worker/gpu_model_runner.py +2320 -0
- vllm/v1/worker/gpu_worker.py +393 -0
- vllm/v1/worker/lora_model_runner_mixin.py +173 -0
- vllm/v1/worker/tpu_model_runner.py +1673 -0
- vllm/v1/worker/tpu_worker.py +299 -0
- vllm/v1/worker/utils.py +111 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +450 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2320 -0
- vllm/worker/hpu_worker.py +484 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +282 -0
- vllm/worker/multi_step_hpu_worker.py +123 -0
- vllm/worker/multi_step_model_runner.py +911 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +108 -0
- vllm/worker/multi_step_worker.py +197 -0
- vllm/worker/neuron_model_runner.py +460 -0
- vllm/worker/neuron_worker.py +193 -0
- vllm/worker/neuronx_distributed_model_runner.py +294 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +909 -0
- vllm/worker/tpu_worker.py +337 -0
- vllm/worker/utils.py +53 -0
- vllm/worker/worker.py +577 -0
- vllm/worker/worker_base.py +646 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +186 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2178 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
import dataclasses
|
|
5
|
+
import gc
|
|
6
|
+
import inspect
|
|
7
|
+
import itertools
|
|
8
|
+
import time
|
|
9
|
+
import weakref
|
|
10
|
+
from contextlib import contextmanager
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set,
|
|
13
|
+
Tuple, Type, TypeVar, Union)
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
import torch
|
|
17
|
+
import torch.distributed
|
|
18
|
+
import torch.nn as nn
|
|
19
|
+
from tqdm.auto import tqdm
|
|
20
|
+
|
|
21
|
+
import vllm.envs as envs
|
|
22
|
+
from vllm.attention import AttentionMetadata, get_attn_backend
|
|
23
|
+
from vllm.attention.backends.abstract import AttentionState
|
|
24
|
+
from vllm.attention.backends.utils import CommonAttentionState
|
|
25
|
+
from vllm.config import CompilationLevel, VllmConfig
|
|
26
|
+
from vllm.core.scheduler import SchedulerOutputs
|
|
27
|
+
from vllm.distributed import broadcast_tensor_dict, get_pp_group
|
|
28
|
+
from vllm.distributed.kv_transfer import get_kv_transfer_group
|
|
29
|
+
from vllm.distributed.parallel_state import (get_tensor_model_parallel_rank,
|
|
30
|
+
graph_capture)
|
|
31
|
+
from vllm.forward_context import get_forward_context, set_forward_context
|
|
32
|
+
from vllm.inputs import INPUT_REGISTRY, InputRegistry
|
|
33
|
+
from vllm.logger import init_logger
|
|
34
|
+
from vllm.lora.layers import LoRAMapping
|
|
35
|
+
from vllm.lora.request import LoRARequest
|
|
36
|
+
from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
|
|
37
|
+
from vllm.model_executor import SamplingMetadata, SamplingMetadataCache
|
|
38
|
+
from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding
|
|
39
|
+
from vllm.model_executor.layers.sampler import (Sampler, SamplerOutput,
|
|
40
|
+
get_sampler)
|
|
41
|
+
from vllm.model_executor.model_loader import get_model
|
|
42
|
+
from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
|
|
43
|
+
from vllm.model_executor.models import supports_lora, supports_multimodal
|
|
44
|
+
from vllm.model_executor.models.utils import set_cpu_offload_max_bytes
|
|
45
|
+
from vllm.multimodal import (MULTIMODAL_REGISTRY, BatchedTensorInputs,
|
|
46
|
+
MultiModalKwargs, MultiModalPlaceholderMap,
|
|
47
|
+
MultiModalRegistry)
|
|
48
|
+
from vllm.prompt_adapter.layers import PromptAdapterMapping
|
|
49
|
+
from vllm.prompt_adapter.request import PromptAdapterRequest
|
|
50
|
+
from vllm.prompt_adapter.worker_manager import (
|
|
51
|
+
LRUCacheWorkerPromptAdapterManager)
|
|
52
|
+
from vllm.sampling_params import SamplingParams
|
|
53
|
+
from vllm.sequence import IntermediateTensors, SequenceGroupMetadata
|
|
54
|
+
from vllm.utils import (DeviceMemoryProfiler, GiB_bytes, PyObjectCache,
|
|
55
|
+
async_tensor_h2d, flatten_2d_lists,
|
|
56
|
+
is_pin_memory_available, supports_dynamo,
|
|
57
|
+
weak_ref_tensor)
|
|
58
|
+
from vllm.worker.model_runner_base import (
|
|
59
|
+
InputProcessingError, ModelRunnerBase, ModelRunnerInputBase,
|
|
60
|
+
ModelRunnerInputBuilderBase, _add_attn_metadata_broadcastable_dict,
|
|
61
|
+
_add_sampling_metadata_broadcastable_dict,
|
|
62
|
+
_init_attn_metadata_from_tensor_dict,
|
|
63
|
+
_init_sampling_metadata_from_tensor_dict)
|
|
64
|
+
|
|
65
|
+
if TYPE_CHECKING:
|
|
66
|
+
from vllm.attention.backends.abstract import AttentionBackend
|
|
67
|
+
|
|
68
|
+
logger = init_logger(__name__)
|
|
69
|
+
|
|
70
|
+
LORA_WARMUP_RANK = 8
|
|
71
|
+
|
|
72
|
+
_NUM_WARMUP_ITERS = 2
|
|
73
|
+
|
|
74
|
+
TModelInputForGPU = TypeVar('TModelInputForGPU', bound="ModelInputForGPU")
|
|
75
|
+
|
|
76
|
+
# For now, bump up cache limits for recompilations during CUDA graph warmups.
|
|
77
|
+
torch._dynamo.config.cache_size_limit = 128
|
|
78
|
+
torch._dynamo.config.accumulated_cache_size_limit = 128
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass(frozen=True)
|
|
82
|
+
class ModelInputForGPU(ModelRunnerInputBase):
|
|
83
|
+
"""
|
|
84
|
+
This base class contains metadata needed for the base model forward pass
|
|
85
|
+
but not metadata for possible additional steps, e.g., sampling. Model
|
|
86
|
+
runners that run additional steps should subclass this method to add
|
|
87
|
+
additional fields.
|
|
88
|
+
"""
|
|
89
|
+
input_tokens: Optional[torch.Tensor] = None
|
|
90
|
+
inputs_embeds: Optional[torch.Tensor] = None
|
|
91
|
+
input_positions: Optional[torch.Tensor] = None
|
|
92
|
+
token_types: Optional[torch.Tensor] = None
|
|
93
|
+
seq_lens: Optional[List[int]] = None
|
|
94
|
+
query_lens: Optional[List[int]] = None
|
|
95
|
+
lora_mapping: Optional["LoRAMapping"] = None
|
|
96
|
+
lora_requests: Optional[Set[LoRARequest]] = None
|
|
97
|
+
attn_metadata: Optional["AttentionMetadata"] = None
|
|
98
|
+
prompt_adapter_mapping: Optional[PromptAdapterMapping] = None
|
|
99
|
+
prompt_adapter_requests: Optional[Set[PromptAdapterRequest]] = None
|
|
100
|
+
multi_modal_kwargs: Optional[BatchedTensorInputs] = None
|
|
101
|
+
request_ids_to_seq_ids: Optional[Dict[str, List[int]]] = None
|
|
102
|
+
finished_requests_ids: Optional[List[str]] = None
|
|
103
|
+
virtual_engine: int = 0
|
|
104
|
+
async_callback: Optional[Callable] = None
|
|
105
|
+
scheduler_outputs: Optional[SchedulerOutputs] = None
|
|
106
|
+
previous_hidden_states: Optional[torch.Tensor] = None
|
|
107
|
+
|
|
108
|
+
def as_broadcastable_tensor_dict(self) -> Dict[str, Any]:
|
|
109
|
+
tensor_dict = {
|
|
110
|
+
"input_tokens": self.input_tokens,
|
|
111
|
+
"inputs_embeds": self.inputs_embeds,
|
|
112
|
+
"input_positions": self.input_positions,
|
|
113
|
+
"lora_requests": self.lora_requests,
|
|
114
|
+
"lora_mapping": self.lora_mapping,
|
|
115
|
+
"multi_modal_kwargs": self.multi_modal_kwargs,
|
|
116
|
+
"prompt_adapter_mapping": self.prompt_adapter_mapping,
|
|
117
|
+
"prompt_adapter_requests": self.prompt_adapter_requests,
|
|
118
|
+
"virtual_engine": self.virtual_engine,
|
|
119
|
+
"request_ids_to_seq_ids": self.request_ids_to_seq_ids,
|
|
120
|
+
"finished_requests_ids": self.finished_requests_ids,
|
|
121
|
+
}
|
|
122
|
+
_add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata)
|
|
123
|
+
return tensor_dict
|
|
124
|
+
|
|
125
|
+
@classmethod
|
|
126
|
+
def from_broadcasted_tensor_dict(
|
|
127
|
+
cls: Type[TModelInputForGPU],
|
|
128
|
+
tensor_dict: Dict[str, Any],
|
|
129
|
+
attn_backend: Optional["AttentionBackend"] = None,
|
|
130
|
+
) -> TModelInputForGPU:
|
|
131
|
+
if attn_backend is not None:
|
|
132
|
+
tensor_dict = _init_attn_metadata_from_tensor_dict(
|
|
133
|
+
attn_backend, tensor_dict)
|
|
134
|
+
return cls(**tensor_dict)
|
|
135
|
+
|
|
136
|
+
# Exclude `async_callback` to be able to pickle this object
|
|
137
|
+
def __getstate__(self):
|
|
138
|
+
state = self.__dict__.copy()
|
|
139
|
+
del state["async_callback"]
|
|
140
|
+
return state
|
|
141
|
+
|
|
142
|
+
# TODO: What happens when we depickle this object?
|
|
143
|
+
# How can we update this callback to properly pass it to the engine?
|
|
144
|
+
def __setstate__(self, state):
|
|
145
|
+
self.__dict__.update(state)
|
|
146
|
+
self.__dict__.update({'async_callback': None})
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
@dataclass(frozen=True)
|
|
150
|
+
class ModelInputForGPUWithSamplingMetadata(ModelInputForGPU):
|
|
151
|
+
"""
|
|
152
|
+
Used by the ModelRunner.
|
|
153
|
+
"""
|
|
154
|
+
sampling_metadata: Optional["SamplingMetadata"] = None
|
|
155
|
+
# Used for speculative decoding. We do not broadcast it because it is only
|
|
156
|
+
# used by the driver worker.
|
|
157
|
+
is_prompt: Optional[bool] = None
|
|
158
|
+
|
|
159
|
+
def as_broadcastable_tensor_dict(self) -> Dict[str, Any]:
|
|
160
|
+
tensor_dict = {
|
|
161
|
+
"input_tokens": self.input_tokens,
|
|
162
|
+
"inputs_embeds": self.inputs_embeds,
|
|
163
|
+
"input_positions": self.input_positions,
|
|
164
|
+
"lora_requests": self.lora_requests,
|
|
165
|
+
"lora_mapping": self.lora_mapping,
|
|
166
|
+
"multi_modal_kwargs": self.multi_modal_kwargs,
|
|
167
|
+
"prompt_adapter_mapping": self.prompt_adapter_mapping,
|
|
168
|
+
"prompt_adapter_requests": self.prompt_adapter_requests,
|
|
169
|
+
"virtual_engine": self.virtual_engine,
|
|
170
|
+
"request_ids_to_seq_ids": self.request_ids_to_seq_ids,
|
|
171
|
+
"finished_requests_ids": self.finished_requests_ids,
|
|
172
|
+
}
|
|
173
|
+
_add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata)
|
|
174
|
+
_add_sampling_metadata_broadcastable_dict(tensor_dict,
|
|
175
|
+
self.sampling_metadata)
|
|
176
|
+
return tensor_dict
|
|
177
|
+
|
|
178
|
+
@classmethod
|
|
179
|
+
def from_broadcasted_tensor_dict(
|
|
180
|
+
cls,
|
|
181
|
+
tensor_dict: Dict[str, Any],
|
|
182
|
+
attn_backend: Optional["AttentionBackend"] = None,
|
|
183
|
+
) -> "ModelInputForGPUWithSamplingMetadata":
|
|
184
|
+
tensor_dict = _init_sampling_metadata_from_tensor_dict(tensor_dict)
|
|
185
|
+
if attn_backend is not None:
|
|
186
|
+
tensor_dict = _init_attn_metadata_from_tensor_dict(
|
|
187
|
+
attn_backend, tensor_dict)
|
|
188
|
+
return cls(**tensor_dict)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class ModelInputForGPUBuilder(ModelRunnerInputBuilderBase[ModelInputForGPU]):
|
|
192
|
+
"""Build ModelInputForGPU from SequenceGroupMetadata."""
|
|
193
|
+
|
|
194
|
+
# Note: ideally we would be using a dataclass(kw_only=True)
|
|
195
|
+
# here, so that this can be subclassed easily,
|
|
196
|
+
# but kw_only is not supported in python<3.10.
|
|
197
|
+
class InterDataForSeqGroup:
|
|
198
|
+
"""Intermediate data for the current sequence group."""
|
|
199
|
+
|
|
200
|
+
def simple_reinit(self):
|
|
201
|
+
self.input_tokens[0].clear() # type: ignore
|
|
202
|
+
self.inputs_embeds = None # type: ignore
|
|
203
|
+
self.input_positions[0].clear() # type: ignore
|
|
204
|
+
self.token_types[0].clear() # type: ignore
|
|
205
|
+
self.mrope_input_positions = None # type: ignore
|
|
206
|
+
self.seq_lens[0] = 0 # type: ignore
|
|
207
|
+
self.orig_seq_lens[0] = 0 # type: ignore
|
|
208
|
+
self.prompt_lens[0] = 0 # type: ignore
|
|
209
|
+
self.query_lens[0] = 0 # type: ignore
|
|
210
|
+
self.context_lens[0] = 0 # type: ignore
|
|
211
|
+
self.curr_sliding_window_blocks[0] = 0 # type: ignore
|
|
212
|
+
self.lora_index_mapping.clear() # type: ignore
|
|
213
|
+
self.lora_prompt_mapping.clear() # type: ignore
|
|
214
|
+
self.lora_requests.clear() # type: ignore
|
|
215
|
+
self.prompt_adapter_index_mapping.clear() # type: ignore
|
|
216
|
+
self.prompt_adapter_prompt_mapping.clear() # type: ignore
|
|
217
|
+
|
|
218
|
+
def __init__(
|
|
219
|
+
self,
|
|
220
|
+
*,
|
|
221
|
+
# From sequence group metadata.
|
|
222
|
+
request_id: str,
|
|
223
|
+
seq_ids: List[int],
|
|
224
|
+
is_prompt: bool,
|
|
225
|
+
block_tables: Optional[Dict[int, List[int]]],
|
|
226
|
+
computed_block_nums: List[int],
|
|
227
|
+
n_seqs: int = 0,
|
|
228
|
+
|
|
229
|
+
# Input tokens and positions.
|
|
230
|
+
input_tokens: Optional[List[List[int]]] = None,
|
|
231
|
+
inputs_embeds: Optional[torch.Tensor] = None,
|
|
232
|
+
input_positions: Optional[List[List[int]]] = None,
|
|
233
|
+
token_types: Optional[List[List[int]]] = None,
|
|
234
|
+
mrope_input_positions: Optional[List[List[List[int]]]] = None,
|
|
235
|
+
|
|
236
|
+
# The sequence length (may be capped to the sliding window).
|
|
237
|
+
seq_lens: Optional[List[int]] = None,
|
|
238
|
+
# The original sequence length (before applying sliding window).
|
|
239
|
+
# This is used to compute slot mapping.
|
|
240
|
+
orig_seq_lens: Optional[List[int]] = None,
|
|
241
|
+
# This is used in the dual-chunk flash attention backend.
|
|
242
|
+
prompt_lens: Optional[List[int]] = None,
|
|
243
|
+
# The query length.
|
|
244
|
+
query_lens: Optional[List[int]] = None,
|
|
245
|
+
# The number of tokens that are already computed.
|
|
246
|
+
context_lens: Optional[List[int]] = None,
|
|
247
|
+
# The current sliding window block.
|
|
248
|
+
curr_sliding_window_blocks: Optional[List[int]] = None,
|
|
249
|
+
|
|
250
|
+
# LoRA inputs.
|
|
251
|
+
lora_index_mapping: Optional[List[List[int]]] = None,
|
|
252
|
+
lora_prompt_mapping: Optional[List[List[int]]] = None,
|
|
253
|
+
lora_requests: Optional[Set[LoRARequest]] = None,
|
|
254
|
+
|
|
255
|
+
# Prompt adapter inputs.
|
|
256
|
+
prompt_adapter_index_mapping: Optional[List[int]] = None,
|
|
257
|
+
prompt_adapter_prompt_mapping: Optional[List[int]] = None,
|
|
258
|
+
prompt_adapter_request: Optional[PromptAdapterRequest] = None,
|
|
259
|
+
|
|
260
|
+
# Multi-modal inputs.
|
|
261
|
+
multi_modal_kwargs: Optional[MultiModalKwargs] = None,
|
|
262
|
+
multi_modal_placeholder_maps: Optional[Dict[
|
|
263
|
+
str, MultiModalPlaceholderMap]] = None,
|
|
264
|
+
|
|
265
|
+
# Whether the prefix cache is hit (prefill only).
|
|
266
|
+
prefix_cache_hit: bool = False,
|
|
267
|
+
reinit: bool = False,
|
|
268
|
+
reinit_use_defaults: bool = False,
|
|
269
|
+
encoder_seq_len: int = 0,
|
|
270
|
+
):
|
|
271
|
+
if reinit:
|
|
272
|
+
assert len(self.seq_ids) == len(seq_ids) # type: ignore
|
|
273
|
+
for i, seq_id in enumerate(seq_ids):
|
|
274
|
+
self.seq_ids[i] = seq_id # type: ignore
|
|
275
|
+
else:
|
|
276
|
+
self.seq_ids = seq_ids
|
|
277
|
+
|
|
278
|
+
self.request_id = request_id
|
|
279
|
+
self.is_prompt = is_prompt
|
|
280
|
+
self.block_tables = block_tables
|
|
281
|
+
self.computed_block_nums = computed_block_nums
|
|
282
|
+
self.n_seqs = n_seqs
|
|
283
|
+
self.encoder_seq_len = encoder_seq_len
|
|
284
|
+
|
|
285
|
+
if reinit:
|
|
286
|
+
if len(self.seq_ids) == 1 and reinit_use_defaults:
|
|
287
|
+
self.simple_reinit()
|
|
288
|
+
else:
|
|
289
|
+
if input_tokens:
|
|
290
|
+
self.input_tokens = input_tokens
|
|
291
|
+
else:
|
|
292
|
+
for seq_id in range(len(self.seq_ids)):
|
|
293
|
+
self.input_tokens[seq_id].clear()
|
|
294
|
+
|
|
295
|
+
self.inputs_embeds = inputs_embeds
|
|
296
|
+
|
|
297
|
+
if input_positions:
|
|
298
|
+
self.input_positions = input_positions
|
|
299
|
+
else:
|
|
300
|
+
for seq_id in range(len(self.seq_ids)):
|
|
301
|
+
self.input_positions[seq_id].clear()
|
|
302
|
+
|
|
303
|
+
if token_types:
|
|
304
|
+
self.token_types = token_types
|
|
305
|
+
else:
|
|
306
|
+
for seq_id in range(len(self.seq_ids)):
|
|
307
|
+
self.token_types[seq_id].clear()
|
|
308
|
+
|
|
309
|
+
self.mrope_input_positions = None
|
|
310
|
+
|
|
311
|
+
if seq_lens:
|
|
312
|
+
self.seq_lens = seq_lens
|
|
313
|
+
else:
|
|
314
|
+
for seq_id in range(len(self.seq_ids)):
|
|
315
|
+
self.seq_lens[seq_id] = 0
|
|
316
|
+
|
|
317
|
+
if orig_seq_lens:
|
|
318
|
+
self.orig_seq_lens = orig_seq_lens
|
|
319
|
+
else:
|
|
320
|
+
for seq_id in range(len(self.seq_ids)):
|
|
321
|
+
self.orig_seq_lens[seq_id] = 0
|
|
322
|
+
|
|
323
|
+
if prompt_lens:
|
|
324
|
+
self.prompt_lens = prompt_lens
|
|
325
|
+
else:
|
|
326
|
+
for seq_id in range(len(self.seq_ids)):
|
|
327
|
+
self.prompt_lens[seq_id] = 0
|
|
328
|
+
|
|
329
|
+
if query_lens:
|
|
330
|
+
self.query_lens = query_lens
|
|
331
|
+
else:
|
|
332
|
+
for seq_id in range(len(self.seq_ids)):
|
|
333
|
+
self.query_lens[seq_id] = 0
|
|
334
|
+
|
|
335
|
+
if context_lens:
|
|
336
|
+
self.context_lens = context_lens
|
|
337
|
+
else:
|
|
338
|
+
for seq_id in range(len(self.seq_ids)):
|
|
339
|
+
self.context_lens[seq_id] = 0
|
|
340
|
+
|
|
341
|
+
if curr_sliding_window_blocks:
|
|
342
|
+
self.curr_sliding_window_blocks = \
|
|
343
|
+
curr_sliding_window_blocks
|
|
344
|
+
else:
|
|
345
|
+
for seq_id in range(len(self.seq_ids)):
|
|
346
|
+
self.curr_sliding_window_blocks[seq_id] = 0
|
|
347
|
+
|
|
348
|
+
if lora_index_mapping:
|
|
349
|
+
self.lora_index_mapping = lora_index_mapping
|
|
350
|
+
else:
|
|
351
|
+
self.lora_index_mapping.clear()
|
|
352
|
+
|
|
353
|
+
if lora_prompt_mapping:
|
|
354
|
+
self.lora_prompt_mapping = lora_prompt_mapping
|
|
355
|
+
else:
|
|
356
|
+
self.lora_prompt_mapping.clear()
|
|
357
|
+
|
|
358
|
+
if lora_requests:
|
|
359
|
+
self.lora_requests = lora_requests
|
|
360
|
+
else:
|
|
361
|
+
self.lora_requests.clear()
|
|
362
|
+
|
|
363
|
+
if prompt_adapter_index_mapping:
|
|
364
|
+
self.prompt_adapter_index_mapping = \
|
|
365
|
+
prompt_adapter_index_mapping
|
|
366
|
+
else:
|
|
367
|
+
self.prompt_adapter_index_mapping.clear()
|
|
368
|
+
|
|
369
|
+
if prompt_adapter_prompt_mapping:
|
|
370
|
+
self.prompt_adapter_prompt_mapping = \
|
|
371
|
+
prompt_adapter_prompt_mapping
|
|
372
|
+
else:
|
|
373
|
+
self.prompt_adapter_prompt_mapping.clear()
|
|
374
|
+
|
|
375
|
+
else:
|
|
376
|
+
self.input_tokens = input_tokens or []
|
|
377
|
+
self.inputs_embeds = inputs_embeds
|
|
378
|
+
self.input_positions = input_positions or []
|
|
379
|
+
self.token_types = token_types or []
|
|
380
|
+
self.mrope_input_positions = mrope_input_positions or None
|
|
381
|
+
self.seq_lens = seq_lens or []
|
|
382
|
+
self.orig_seq_lens = orig_seq_lens or []
|
|
383
|
+
self.prompt_lens = prompt_lens or []
|
|
384
|
+
self.query_lens = query_lens or []
|
|
385
|
+
self.context_lens = context_lens or []
|
|
386
|
+
self.curr_sliding_window_blocks = \
|
|
387
|
+
curr_sliding_window_blocks or []
|
|
388
|
+
|
|
389
|
+
self.lora_index_mapping = lora_index_mapping or []
|
|
390
|
+
self.lora_prompt_mapping = lora_prompt_mapping or []
|
|
391
|
+
self.lora_requests = lora_requests or set()
|
|
392
|
+
|
|
393
|
+
self.prompt_adapter_index_mapping = (
|
|
394
|
+
prompt_adapter_index_mapping or [])
|
|
395
|
+
self.prompt_adapter_prompt_mapping = (
|
|
396
|
+
prompt_adapter_prompt_mapping or [])
|
|
397
|
+
|
|
398
|
+
self.prompt_adapter_request = prompt_adapter_request
|
|
399
|
+
self.multi_modal_kwargs = multi_modal_kwargs
|
|
400
|
+
self.multi_modal_placeholder_maps = multi_modal_placeholder_maps
|
|
401
|
+
self.prefix_cache_hit = prefix_cache_hit
|
|
402
|
+
|
|
403
|
+
self.n_seqs = len(self.seq_ids)
|
|
404
|
+
|
|
405
|
+
if not reinit:
|
|
406
|
+
self.__post_init__()
|
|
407
|
+
|
|
408
|
+
def __post_init__(self):
|
|
409
|
+
self.n_seqs = len(self.seq_ids)
|
|
410
|
+
|
|
411
|
+
self.input_tokens = [[] for _ in range(self.n_seqs)]
|
|
412
|
+
self.input_positions = [[] for _ in range(self.n_seqs)]
|
|
413
|
+
self.token_types = [[] for _ in range(self.n_seqs)]
|
|
414
|
+
self.mrope_input_positions = None
|
|
415
|
+
self.seq_lens = [0] * self.n_seqs
|
|
416
|
+
self.orig_seq_lens = [0] * self.n_seqs
|
|
417
|
+
self.prompt_lens = [0] * self.n_seqs
|
|
418
|
+
self.query_lens = [0] * self.n_seqs
|
|
419
|
+
self.context_lens = [0] * self.n_seqs
|
|
420
|
+
self.curr_sliding_window_blocks = [0] * self.n_seqs
|
|
421
|
+
|
|
422
|
+
self.lora_index_mapping = []
|
|
423
|
+
self.lora_prompt_mapping = []
|
|
424
|
+
|
|
425
|
+
def __repr__(self) -> str:
|
|
426
|
+
return (f"InterDataForSeqGroup("
|
|
427
|
+
f"request_id={self.request_id}, "
|
|
428
|
+
f"seq_ids={self.seq_ids}, "
|
|
429
|
+
f"is_prompt={self.is_prompt}, "
|
|
430
|
+
f"block_tables={self.block_tables}, "
|
|
431
|
+
f"computed_block_nums={self.computed_block_nums}, "
|
|
432
|
+
f"n_seqs={self.n_seqs}, "
|
|
433
|
+
f"input_tokens={self.input_tokens}, "
|
|
434
|
+
f"inputs_embeds.shape="
|
|
435
|
+
f"{getattr(self.inputs_embeds, 'shape', None)}, "
|
|
436
|
+
f"input_positions={self.input_positions}, "
|
|
437
|
+
f"token_types={self.token_types}, "
|
|
438
|
+
f"mrope_input_positions={self.mrope_input_positions}, "
|
|
439
|
+
f"seq_lens={self.seq_lens}, "
|
|
440
|
+
f"orig_seq_lens={self.orig_seq_lens}, "
|
|
441
|
+
f"query_lens={self.query_lens}, "
|
|
442
|
+
f"context_lens={self.context_lens}, "
|
|
443
|
+
f"multi_modal_kwargs={self.multi_modal_kwargs}")
|
|
444
|
+
|
|
445
|
+
def gen_inter_data_builder(self, num_seqs: int):
|
|
446
|
+
return lambda: ModelInputForGPUBuilder.InterDataForSeqGroup(
|
|
447
|
+
request_id="",
|
|
448
|
+
seq_ids=[0] * num_seqs,
|
|
449
|
+
is_prompt=True,
|
|
450
|
+
block_tables=None,
|
|
451
|
+
computed_block_nums=[])
|
|
452
|
+
|
|
453
|
+
def init_cached_inter_data(self, *args, **kwargs):
|
|
454
|
+
assert len(args) == 0
|
|
455
|
+
assert "seq_ids" in kwargs
|
|
456
|
+
seq_ids = kwargs["seq_ids"]
|
|
457
|
+
num_seqs = len(seq_ids)
|
|
458
|
+
|
|
459
|
+
# The inter-data cache is per model_runner
|
|
460
|
+
inter_data_cache = self.runner.inter_data_cache
|
|
461
|
+
if num_seqs not in inter_data_cache:
|
|
462
|
+
inter_data_cache[num_seqs] = PyObjectCache(
|
|
463
|
+
self.gen_inter_data_builder(num_seqs))
|
|
464
|
+
|
|
465
|
+
obj = inter_data_cache[num_seqs].get_object()
|
|
466
|
+
obj.__init__(*args, **kwargs)
|
|
467
|
+
return obj
|
|
468
|
+
|
|
469
|
+
def reset_cached_inter_data(self):
|
|
470
|
+
for cache in self.runner.inter_data_cache.values():
|
|
471
|
+
cache.reset()
|
|
472
|
+
|
|
473
|
+
def __init__(self,
|
|
474
|
+
runner: "GPUModelRunnerBase",
|
|
475
|
+
finished_requests_ids: Optional[List[str]] = None):
|
|
476
|
+
super().__init__()
|
|
477
|
+
# Compute functions for each sequence in a sequence group.
|
|
478
|
+
# WARNING: The order of the functions matters!
|
|
479
|
+
self.per_seq_compute_fns = [
|
|
480
|
+
self._compute_lens,
|
|
481
|
+
self._compute_for_prefix_cache_hit,
|
|
482
|
+
self._compute_for_sliding_window,
|
|
483
|
+
self._compute_lora_input,
|
|
484
|
+
]
|
|
485
|
+
# Compute functions for each sequence group.
|
|
486
|
+
# WARNING: The order of the functions matters!
|
|
487
|
+
self.per_seq_group_compute_fns = [
|
|
488
|
+
self._compute_prompt_adapter_input,
|
|
489
|
+
self._compute_multi_modal_input,
|
|
490
|
+
]
|
|
491
|
+
|
|
492
|
+
self.runner = runner
|
|
493
|
+
self.model_input_cls = self.runner._model_input_cls
|
|
494
|
+
self.attn_backend = self.runner.attn_backend
|
|
495
|
+
self.scheduler_config = self.runner.scheduler_config
|
|
496
|
+
self.sliding_window = self.runner.sliding_window
|
|
497
|
+
self.block_size = self.runner.block_size
|
|
498
|
+
self.enable_lora = self.runner.lora_config is not None
|
|
499
|
+
self.enable_prompt_adapter = (self.runner.prompt_adapter_config
|
|
500
|
+
is not None)
|
|
501
|
+
|
|
502
|
+
# Attention metadata inputs.
|
|
503
|
+
if self.attn_backend is not None:
|
|
504
|
+
# spec decode (e.g. Medusa) does not have atten backend
|
|
505
|
+
self.attn_metadata_builder = self.attn_backend.get_builder_cls()(
|
|
506
|
+
weakref.proxy(self))
|
|
507
|
+
|
|
508
|
+
# Engine/Model configurations.
|
|
509
|
+
self.chunked_prefill_enabled = (
|
|
510
|
+
self.scheduler_config is not None
|
|
511
|
+
and self.scheduler_config.chunked_prefill_enabled)
|
|
512
|
+
if self.sliding_window is not None:
|
|
513
|
+
self.sliding_window_blocks = (
|
|
514
|
+
self.sliding_window + self.block_size - 1) // self.block_size
|
|
515
|
+
self.block_aligned_sliding_window = \
|
|
516
|
+
self.sliding_window_blocks * self.block_size
|
|
517
|
+
|
|
518
|
+
def prepare(self,
|
|
519
|
+
finished_requests_ids: Optional[List[str]] = None) -> None:
|
|
520
|
+
self.finished_requests_ids = finished_requests_ids
|
|
521
|
+
|
|
522
|
+
# if the current batch is decode-only.
|
|
523
|
+
# will be set to False if there is any non-decode request.
|
|
524
|
+
self.decode_only = True
|
|
525
|
+
|
|
526
|
+
# Intermediate data (data in CPU before going to GPU) for
|
|
527
|
+
# the current sequence group.
|
|
528
|
+
self.inter_data_list: List[
|
|
529
|
+
ModelInputForGPUBuilder.InterDataForSeqGroup] = []
|
|
530
|
+
|
|
531
|
+
self.attn_metadata_builder.prepare()
|
|
532
|
+
|
|
533
|
+
def _compute_lens(self, inter_data: InterDataForSeqGroup, seq_idx: int,
|
|
534
|
+
seq_group_metadata: SequenceGroupMetadata):
|
|
535
|
+
"""Compute context length, sequence length and tokens
|
|
536
|
+
for the given sequence data.
|
|
537
|
+
"""
|
|
538
|
+
seq_data = seq_group_metadata.seq_data[inter_data.seq_ids[seq_idx]]
|
|
539
|
+
token_chunk_size = seq_group_metadata.token_chunk_size
|
|
540
|
+
|
|
541
|
+
# Compute context length (the number of tokens that are
|
|
542
|
+
# already computed) and sequence length (total number of tokens).
|
|
543
|
+
|
|
544
|
+
seq_len = seq_data.get_len()
|
|
545
|
+
if inter_data.is_prompt:
|
|
546
|
+
context_len = seq_data.get_num_computed_tokens()
|
|
547
|
+
seq_len = min(seq_len, context_len + token_chunk_size)
|
|
548
|
+
elif self.runner.scheduler_config.is_multi_step or \
|
|
549
|
+
self.runner.model_config.is_encoder_decoder:
|
|
550
|
+
context_len = seq_len - 1
|
|
551
|
+
else:
|
|
552
|
+
context_len = seq_data.get_num_computed_tokens()
|
|
553
|
+
|
|
554
|
+
# Compute tokens.
|
|
555
|
+
if seq_data.prompt_embeds is None:
|
|
556
|
+
tokens = seq_data.get_token_ids()[context_len:seq_len]
|
|
557
|
+
prompt_embeds = None
|
|
558
|
+
else:
|
|
559
|
+
tokens = [0] * (seq_len - context_len)
|
|
560
|
+
prompt_embeds = seq_data.get_token_embeddings(
|
|
561
|
+
)[context_len:seq_len]
|
|
562
|
+
|
|
563
|
+
token_types = seq_group_metadata.token_type_ids
|
|
564
|
+
|
|
565
|
+
inter_data.seq_lens[seq_idx] = seq_len
|
|
566
|
+
inter_data.orig_seq_lens[seq_idx] = seq_len
|
|
567
|
+
inter_data.prompt_lens[seq_idx] = seq_data.get_prompt_len()
|
|
568
|
+
inter_data.context_lens[seq_idx] = context_len
|
|
569
|
+
inter_data.input_tokens[seq_idx].extend(tokens)
|
|
570
|
+
inter_data.inputs_embeds = prompt_embeds
|
|
571
|
+
inter_data.input_positions[seq_idx].extend(range(context_len, seq_len))
|
|
572
|
+
inter_data.token_types[seq_idx].extend(
|
|
573
|
+
token_types if token_types else [])
|
|
574
|
+
inter_data.query_lens[seq_idx] = seq_len - context_len
|
|
575
|
+
|
|
576
|
+
if seq_data.mrope_position_delta is not None:
|
|
577
|
+
if inter_data.mrope_input_positions is None:
|
|
578
|
+
inter_data.mrope_input_positions = [None] * inter_data.n_seqs
|
|
579
|
+
|
|
580
|
+
inter_data.mrope_input_positions[
|
|
581
|
+
seq_idx] = MRotaryEmbedding.get_next_input_positions(
|
|
582
|
+
seq_data.mrope_position_delta,
|
|
583
|
+
context_len,
|
|
584
|
+
seq_len,
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
def _compute_for_prefix_cache_hit(
|
|
588
|
+
self, inter_data: InterDataForSeqGroup, seq_idx: int,
|
|
589
|
+
seq_group_metadata: SequenceGroupMetadata):
|
|
590
|
+
"""Check if hit prefix cache (i.e., some blocks are already computed).
|
|
591
|
+
If hit, update input tokens and positions to only compute the
|
|
592
|
+
remaining blocks.
|
|
593
|
+
"""
|
|
594
|
+
computed_block_nums = inter_data.computed_block_nums
|
|
595
|
+
|
|
596
|
+
# Note that prefix caching does not support sliding window.
|
|
597
|
+
prefix_cache_hit = (computed_block_nums is not None
|
|
598
|
+
and len(computed_block_nums) > 0
|
|
599
|
+
and self.sliding_window is None
|
|
600
|
+
and inter_data.is_prompt)
|
|
601
|
+
inter_data.prefix_cache_hit = prefix_cache_hit
|
|
602
|
+
|
|
603
|
+
if not prefix_cache_hit:
|
|
604
|
+
return
|
|
605
|
+
|
|
606
|
+
assert computed_block_nums is not None
|
|
607
|
+
# The cache hit prompt tokens in this sequence. Note that
|
|
608
|
+
# this may be larger than the sequence length if chunked
|
|
609
|
+
# prefill is enabled.
|
|
610
|
+
prefix_cache_len = len(computed_block_nums) * self.block_size
|
|
611
|
+
seq_group_metadata.seq_data[inter_data.seq_ids[
|
|
612
|
+
seq_idx]].update_num_cached_tokens(prefix_cache_len)
|
|
613
|
+
|
|
614
|
+
# The number of so far computed prompt tokens in this sequence.
|
|
615
|
+
context_len = inter_data.context_lens[seq_idx]
|
|
616
|
+
# The total number of prompt tokens in this sequence.
|
|
617
|
+
# When chunked prefill is enabled, this is the token number of
|
|
618
|
+
# computed chunks + current chunk.
|
|
619
|
+
seq_len = inter_data.seq_lens[seq_idx]
|
|
620
|
+
if prefix_cache_len <= context_len:
|
|
621
|
+
# We already passed the cache hit region,
|
|
622
|
+
# so do normal computation.
|
|
623
|
+
pass
|
|
624
|
+
elif context_len < prefix_cache_len < seq_len:
|
|
625
|
+
# Partial hit. Compute the missing part.
|
|
626
|
+
uncomputed_start = prefix_cache_len - context_len
|
|
627
|
+
inter_data.input_tokens[seq_idx] = inter_data.input_tokens[
|
|
628
|
+
seq_idx][uncomputed_start:]
|
|
629
|
+
inter_data.input_positions[seq_idx] = inter_data.input_positions[
|
|
630
|
+
seq_idx][uncomputed_start:]
|
|
631
|
+
inter_data.token_types[seq_idx] = inter_data.token_types[seq_idx][
|
|
632
|
+
uncomputed_start:]
|
|
633
|
+
context_len = prefix_cache_len
|
|
634
|
+
|
|
635
|
+
inter_data.context_lens[seq_idx] = context_len
|
|
636
|
+
inter_data.query_lens[
|
|
637
|
+
seq_idx] = inter_data.seq_lens[seq_idx] - context_len
|
|
638
|
+
elif seq_len <= prefix_cache_len:
|
|
639
|
+
# Full hit. Only compute the last token to avoid
|
|
640
|
+
# erroneous behavior. FIXME: Ideally we should directly
|
|
641
|
+
# mark all tokens as computed in the scheduler and do not
|
|
642
|
+
# schedule this sequence, so this case should not happen.
|
|
643
|
+
inter_data.input_tokens[seq_idx] = inter_data.input_tokens[
|
|
644
|
+
seq_idx][-1:]
|
|
645
|
+
inter_data.input_positions[seq_idx] = inter_data.input_positions[
|
|
646
|
+
seq_idx][-1:]
|
|
647
|
+
inter_data.token_types[seq_idx] = inter_data.token_types[seq_idx][
|
|
648
|
+
-1:]
|
|
649
|
+
inter_data.query_lens[seq_idx] = 1
|
|
650
|
+
inter_data.context_lens[seq_idx] = inter_data.seq_lens[seq_idx] - 1
|
|
651
|
+
|
|
652
|
+
def _compute_for_sliding_window(self, inter_data: InterDataForSeqGroup,
|
|
653
|
+
seq_idx: int,
|
|
654
|
+
seq_group_metadata: SequenceGroupMetadata):
|
|
655
|
+
"""Update seq_len and curr_sliding_window_block for the given
|
|
656
|
+
sequence data (only required by decoding) if sliding window is enabled.
|
|
657
|
+
"""
|
|
658
|
+
curr_sliding_window_block = 0
|
|
659
|
+
sliding_seq_len = inter_data.seq_lens[seq_idx]
|
|
660
|
+
if not inter_data.is_prompt and self.sliding_window is not None:
|
|
661
|
+
# TODO(sang): This is a hack to make sliding window work with
|
|
662
|
+
# paged attn. We can remove it if we make paged attn kernel
|
|
663
|
+
# to properly handle slinding window attn.
|
|
664
|
+
curr_sliding_window_block = self.sliding_window_blocks
|
|
665
|
+
# number of elements in last block
|
|
666
|
+
suff_len = inter_data.seq_lens[seq_idx] % self.block_size
|
|
667
|
+
sliding_seq_len = min(inter_data.seq_lens[seq_idx],
|
|
668
|
+
self.block_aligned_sliding_window + suff_len)
|
|
669
|
+
if suff_len > 0:
|
|
670
|
+
curr_sliding_window_block += 1
|
|
671
|
+
|
|
672
|
+
inter_data.curr_sliding_window_blocks[
|
|
673
|
+
seq_idx] = curr_sliding_window_block
|
|
674
|
+
inter_data.seq_lens[seq_idx] = sliding_seq_len
|
|
675
|
+
|
|
676
|
+
def _compute_lora_input(self, inter_data: InterDataForSeqGroup,
|
|
677
|
+
seq_idx: int,
|
|
678
|
+
seq_group_metadata: SequenceGroupMetadata):
|
|
679
|
+
"""If LoRA is enabled, compute LoRA index and prompt mapping."""
|
|
680
|
+
if not self.enable_lora:
|
|
681
|
+
return
|
|
682
|
+
|
|
683
|
+
lora_id = seq_group_metadata.lora_int_id
|
|
684
|
+
if lora_id > 0:
|
|
685
|
+
inter_data.lora_requests.add(seq_group_metadata.lora_request)
|
|
686
|
+
query_len = inter_data.query_lens[seq_idx]
|
|
687
|
+
inter_data.lora_index_mapping.append([lora_id] * query_len)
|
|
688
|
+
sampling_params = seq_group_metadata.sampling_params
|
|
689
|
+
if sampling_params and sampling_params.prompt_logprobs is not None:
|
|
690
|
+
inter_data.lora_prompt_mapping.append([lora_id] * query_len)
|
|
691
|
+
elif not self.chunked_prefill_enabled or seq_group_metadata.do_sample:
|
|
692
|
+
inter_data.lora_prompt_mapping.append([lora_id])
|
|
693
|
+
else:
|
|
694
|
+
inter_data.lora_prompt_mapping.append([])
|
|
695
|
+
|
|
696
|
+
def _compute_prompt_adapter_input(
|
|
697
|
+
self, inter_data: InterDataForSeqGroup,
|
|
698
|
+
seq_group_metadata: SequenceGroupMetadata):
|
|
699
|
+
"""If prompt adapter is enabled, compute index and prompt mapping.
|
|
700
|
+
"""
|
|
701
|
+
# Note that when is_prompt=True, we expect only one sequence
|
|
702
|
+
# in the group.
|
|
703
|
+
if not self.enable_prompt_adapter:
|
|
704
|
+
return
|
|
705
|
+
|
|
706
|
+
prompt_adapter_id = seq_group_metadata.prompt_adapter_id
|
|
707
|
+
if prompt_adapter_id <= 0 or not inter_data.is_prompt:
|
|
708
|
+
return
|
|
709
|
+
|
|
710
|
+
# We expect only one sequence in the group when is_prompt=True.
|
|
711
|
+
assert inter_data.n_seqs == 1
|
|
712
|
+
query_len = inter_data.query_lens[0]
|
|
713
|
+
inter_data.prompt_adapter_request = (
|
|
714
|
+
seq_group_metadata.prompt_adapter_request)
|
|
715
|
+
|
|
716
|
+
num_tokens = seq_group_metadata.prompt_adapter_num_virtual_tokens
|
|
717
|
+
inter_data.prompt_adapter_index_mapping = [
|
|
718
|
+
prompt_adapter_id
|
|
719
|
+
] * num_tokens + [0] * (query_len - num_tokens)
|
|
720
|
+
inter_data.prompt_adapter_prompt_mapping = [prompt_adapter_id] * (
|
|
721
|
+
query_len if seq_group_metadata.sampling_params
|
|
722
|
+
and seq_group_metadata.sampling_params.prompt_logprobs else 1)
|
|
723
|
+
|
|
724
|
+
def _compute_multi_modal_input(self, inter_data: InterDataForSeqGroup,
|
|
725
|
+
seq_group_metadata: SequenceGroupMetadata):
|
|
726
|
+
"""If multi-modal data is given, add it to the input."""
|
|
727
|
+
# NOTE: mm_kwargs only includes the subset of multi-modal items that
|
|
728
|
+
# intersect with the current prefill positions.
|
|
729
|
+
positions = inter_data.input_positions[0]
|
|
730
|
+
mm_kwargs, placeholder_maps = MultiModalPlaceholderMap.from_seq_group(
|
|
731
|
+
seq_group_metadata,
|
|
732
|
+
range(positions[0], positions[0] + len(positions)))
|
|
733
|
+
|
|
734
|
+
# M-RoPE requires mrope_positions even for plain text; return early
|
|
735
|
+
# when mm_kwargs is empty only if inter_data.is_prompt is False.
|
|
736
|
+
if not mm_kwargs and not inter_data.is_prompt:
|
|
737
|
+
return
|
|
738
|
+
|
|
739
|
+
inter_data.multi_modal_kwargs = mm_kwargs
|
|
740
|
+
inter_data.multi_modal_placeholder_maps = placeholder_maps
|
|
741
|
+
|
|
742
|
+
# special processing for mrope position deltas.
|
|
743
|
+
if self.runner.model_config.uses_mrope:
|
|
744
|
+
image_grid_thw = mm_kwargs.get("image_grid_thw", None)
|
|
745
|
+
video_grid_thw = mm_kwargs.get("video_grid_thw", None)
|
|
746
|
+
audio_feature_lengths = mm_kwargs.get("audio_feature_lengths",
|
|
747
|
+
None)
|
|
748
|
+
|
|
749
|
+
second_per_grid_ts = mm_kwargs.get("second_per_grid_ts", None)
|
|
750
|
+
use_audio_in_video = mm_kwargs.get("use_audio_in_video", False)
|
|
751
|
+
hf_config = self.runner.model_config.hf_config
|
|
752
|
+
|
|
753
|
+
inter_data.mrope_input_positions = [None] * inter_data.n_seqs
|
|
754
|
+
for seq_idx in range(inter_data.n_seqs):
|
|
755
|
+
seq_data = seq_group_metadata.seq_data[
|
|
756
|
+
inter_data.seq_ids[seq_idx]]
|
|
757
|
+
token_ids = seq_data.get_token_ids()
|
|
758
|
+
|
|
759
|
+
mrope_input_positions, mrope_position_delta = \
|
|
760
|
+
MRotaryEmbedding.get_input_positions(
|
|
761
|
+
token_ids,
|
|
762
|
+
hf_config=hf_config,
|
|
763
|
+
image_grid_thw=image_grid_thw,
|
|
764
|
+
video_grid_thw=video_grid_thw,
|
|
765
|
+
second_per_grid_ts=second_per_grid_ts,
|
|
766
|
+
context_len=inter_data.context_lens[seq_idx],
|
|
767
|
+
seq_len=inter_data.seq_lens[seq_idx],
|
|
768
|
+
audio_feature_lengths=audio_feature_lengths,
|
|
769
|
+
use_audio_in_video=use_audio_in_video,
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
seq_data.mrope_position_delta = mrope_position_delta
|
|
773
|
+
inter_data.mrope_input_positions[
|
|
774
|
+
seq_idx] = mrope_input_positions
|
|
775
|
+
|
|
776
|
+
def add_seq_group(self, seq_group_metadata: SequenceGroupMetadata):
|
|
777
|
+
"""Add a sequence group to the builder."""
|
|
778
|
+
seq_ids = seq_group_metadata.seq_data.keys()
|
|
779
|
+
n_seqs = len(seq_ids)
|
|
780
|
+
is_prompt = seq_group_metadata.is_prompt
|
|
781
|
+
|
|
782
|
+
if is_prompt:
|
|
783
|
+
assert n_seqs == 1
|
|
784
|
+
self.decode_only = False
|
|
785
|
+
|
|
786
|
+
encoder_seq_len = 0
|
|
787
|
+
|
|
788
|
+
if self.runner.model_config.is_encoder_decoder:
|
|
789
|
+
encoder_seq_len = seq_group_metadata.encoder_seq_data.get_len()
|
|
790
|
+
|
|
791
|
+
inter_data = self.init_cached_inter_data(
|
|
792
|
+
request_id=seq_group_metadata.request_id,
|
|
793
|
+
seq_ids=seq_ids,
|
|
794
|
+
is_prompt=is_prompt,
|
|
795
|
+
block_tables=seq_group_metadata.block_tables,
|
|
796
|
+
computed_block_nums=seq_group_metadata.computed_block_nums,
|
|
797
|
+
reinit=True,
|
|
798
|
+
reinit_use_defaults=True,
|
|
799
|
+
encoder_seq_len=encoder_seq_len)
|
|
800
|
+
|
|
801
|
+
self.inter_data_list.append(inter_data)
|
|
802
|
+
|
|
803
|
+
for seq_idx in range(n_seqs):
|
|
804
|
+
for per_seq_fn in self.per_seq_compute_fns:
|
|
805
|
+
per_seq_fn(inter_data, seq_idx, seq_group_metadata)
|
|
806
|
+
for per_seq_group_fn in self.per_seq_group_compute_fns:
|
|
807
|
+
per_seq_group_fn(inter_data, seq_group_metadata)
|
|
808
|
+
|
|
809
|
+
def _use_captured_graph(self,
|
|
810
|
+
batch_size: int,
|
|
811
|
+
decode_only: bool,
|
|
812
|
+
max_decode_seq_len: int,
|
|
813
|
+
max_encoder_seq_len: int = 0) -> bool:
|
|
814
|
+
return (decode_only and not self.runner.model_config.enforce_eager
|
|
815
|
+
and max_decode_seq_len <= self.runner.max_seq_len_to_capture
|
|
816
|
+
and max_encoder_seq_len <= self.runner.max_seq_len_to_capture
|
|
817
|
+
and batch_size <= self.runner.max_batchsize_to_capture)
|
|
818
|
+
|
|
819
|
+
def _get_cuda_graph_pad_size(self,
|
|
820
|
+
num_seqs: int,
|
|
821
|
+
max_decode_seq_len: int,
|
|
822
|
+
max_encoder_seq_len: int = 0) -> int:
|
|
823
|
+
"""
|
|
824
|
+
Determine the number of padding sequences required for running in
|
|
825
|
+
CUDA graph mode. Returns -1 if CUDA graphs cannot be used.
|
|
826
|
+
|
|
827
|
+
In the multi-step + chunked-prefill case, only the first step
|
|
828
|
+
has Prefills (if any). The rest of the steps are guaranteed to be all
|
|
829
|
+
decodes. In this case, we set up the padding as if all the sequences
|
|
830
|
+
are decodes so we may run all steps except the first step in CUDA graph
|
|
831
|
+
mode. The padding is accounted for in the multi-step `advance_step`
|
|
832
|
+
family of functions.
|
|
833
|
+
|
|
834
|
+
Args:
|
|
835
|
+
num_seqs (int): Number of sequences scheduled to run.
|
|
836
|
+
max_decode_seq_len (int): Greatest of all the decode sequence
|
|
837
|
+
lengths. Used only in checking the viablility of using
|
|
838
|
+
CUDA graphs.
|
|
839
|
+
max_encoder_seq_len (int, optional): Greatest of all the encode
|
|
840
|
+
sequence lengths. Defaults to 0. Used only in checking the
|
|
841
|
+
viability of using CUDA graphs.
|
|
842
|
+
Returns:
|
|
843
|
+
int: Returns the determined number of padding sequences. If
|
|
844
|
+
CUDA graphs is not viable, returns -1.
|
|
845
|
+
"""
|
|
846
|
+
is_mscp: bool = self.runner.scheduler_config.is_multi_step and \
|
|
847
|
+
self.runner.scheduler_config.chunked_prefill_enabled
|
|
848
|
+
decode_only = self.decode_only or is_mscp
|
|
849
|
+
if not decode_only:
|
|
850
|
+
# Early exit so we can treat num_seqs as the batch_size below.
|
|
851
|
+
return -1
|
|
852
|
+
|
|
853
|
+
# batch_size out of this function refers to the number of input
|
|
854
|
+
# tokens being scheduled. This conflation of num_seqs as batch_size
|
|
855
|
+
# is valid as this is a decode-only case.
|
|
856
|
+
batch_size = num_seqs
|
|
857
|
+
if not self._use_captured_graph(batch_size, decode_only,
|
|
858
|
+
max_decode_seq_len,
|
|
859
|
+
max_encoder_seq_len):
|
|
860
|
+
return -1
|
|
861
|
+
|
|
862
|
+
graph_batch_size = self.runner.vllm_config.pad_for_cudagraph(
|
|
863
|
+
batch_size)
|
|
864
|
+
assert graph_batch_size >= batch_size
|
|
865
|
+
return graph_batch_size - batch_size
|
|
866
|
+
|
|
867
|
+
def build(self) -> ModelInputForGPU:
|
|
868
|
+
"""Finalize the builder intermediate data and
|
|
869
|
+
create on-device tensors.
|
|
870
|
+
"""
|
|
871
|
+
# Combine and flatten intermediate data.
|
|
872
|
+
input_tokens = list[int]()
|
|
873
|
+
inputs_embeds_list = list[torch.Tensor]()
|
|
874
|
+
token_types = list[int]()
|
|
875
|
+
for inter_data in self.inter_data_list:
|
|
876
|
+
for cur_input_tokens in inter_data.input_tokens:
|
|
877
|
+
input_tokens.extend(cur_input_tokens)
|
|
878
|
+
for cur_token_types in inter_data.token_types:
|
|
879
|
+
token_types.extend(cur_token_types)
|
|
880
|
+
if inter_data.inputs_embeds is not None:
|
|
881
|
+
inputs_embeds_list.append(
|
|
882
|
+
inter_data.inputs_embeds.to(
|
|
883
|
+
dtype=self.runner.model_config.dtype,
|
|
884
|
+
device=self.runner.device))
|
|
885
|
+
inputs_embeds: Optional[torch.Tensor]
|
|
886
|
+
if len(inputs_embeds_list) == 0:
|
|
887
|
+
inputs_embeds = None
|
|
888
|
+
else:
|
|
889
|
+
inputs_embeds = torch.cat(inputs_embeds_list, dim=0).to(
|
|
890
|
+
dtype=self.runner.model_config.dtype,
|
|
891
|
+
device=self.runner.device)
|
|
892
|
+
assert len(inputs_embeds) == len(input_tokens)
|
|
893
|
+
|
|
894
|
+
if not input_tokens and inputs_embeds is None:
|
|
895
|
+
# This may happen when all prefill requests hit
|
|
896
|
+
# prefix caching and there is no decode request.
|
|
897
|
+
return self.model_input_cls()
|
|
898
|
+
|
|
899
|
+
mrope_input_positions: Optional[List[List[int]]] = None
|
|
900
|
+
if any(inter_data.mrope_input_positions is not None
|
|
901
|
+
for inter_data in self.inter_data_list):
|
|
902
|
+
mrope_input_positions = [[] for _ in range(3)]
|
|
903
|
+
for idx in range(3):
|
|
904
|
+
for inter_data in self.inter_data_list:
|
|
905
|
+
msections = inter_data.mrope_input_positions
|
|
906
|
+
if msections is None:
|
|
907
|
+
for _seq_input_positions in inter_data.input_positions:
|
|
908
|
+
mrope_input_positions[idx].extend(
|
|
909
|
+
_seq_input_positions)
|
|
910
|
+
else:
|
|
911
|
+
for _seq_mrope_input_positions in msections:
|
|
912
|
+
mrope_input_positions[idx].extend(
|
|
913
|
+
_seq_mrope_input_positions[idx])
|
|
914
|
+
input_positions = None
|
|
915
|
+
else:
|
|
916
|
+
input_positions = []
|
|
917
|
+
for inter_data in self.inter_data_list:
|
|
918
|
+
for cur_input_positions in inter_data.input_positions:
|
|
919
|
+
input_positions.extend(cur_input_positions)
|
|
920
|
+
|
|
921
|
+
seq_lens = []
|
|
922
|
+
query_lens = []
|
|
923
|
+
max_decode_seq_len = 0
|
|
924
|
+
max_encoder_seq_len = 0
|
|
925
|
+
for inter_data in self.inter_data_list:
|
|
926
|
+
seq_lens.extend(inter_data.seq_lens)
|
|
927
|
+
query_lens.extend(inter_data.query_lens)
|
|
928
|
+
if not inter_data.is_prompt:
|
|
929
|
+
max_decode_seq_len = max(max_decode_seq_len,
|
|
930
|
+
max(inter_data.seq_lens))
|
|
931
|
+
if self.runner.model_config.is_encoder_decoder:
|
|
932
|
+
max_encoder_seq_len = max(max_encoder_seq_len,
|
|
933
|
+
inter_data.encoder_seq_len)
|
|
934
|
+
|
|
935
|
+
# Mapping from request IDs to sequence IDs. Used for Jamba models
|
|
936
|
+
# that manages the cache by itself.
|
|
937
|
+
request_ids_to_seq_ids = {
|
|
938
|
+
data.request_id: data.seq_ids
|
|
939
|
+
for data in self.inter_data_list
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
cuda_graph_pad_size = self._get_cuda_graph_pad_size(
|
|
943
|
+
num_seqs=len(seq_lens),
|
|
944
|
+
max_decode_seq_len=max_decode_seq_len,
|
|
945
|
+
max_encoder_seq_len=max_encoder_seq_len)
|
|
946
|
+
|
|
947
|
+
batch_size = len(input_tokens)
|
|
948
|
+
if cuda_graph_pad_size != -1:
|
|
949
|
+
# If cuda graph can be used, pad tensors accordingly.
|
|
950
|
+
# See `capture_model` API for more details.
|
|
951
|
+
# vLLM uses cuda graph only for decoding requests.
|
|
952
|
+
batch_size += cuda_graph_pad_size
|
|
953
|
+
|
|
954
|
+
# Tokens and positions.
|
|
955
|
+
if cuda_graph_pad_size:
|
|
956
|
+
input_tokens.extend(itertools.repeat(0, cuda_graph_pad_size))
|
|
957
|
+
assert self.runner.device is not None
|
|
958
|
+
input_tokens_tensor = async_tensor_h2d(input_tokens, torch.long,
|
|
959
|
+
self.runner.device,
|
|
960
|
+
self.runner.pin_memory)
|
|
961
|
+
|
|
962
|
+
token_types_tensor = async_tensor_h2d(token_types, torch.long,
|
|
963
|
+
self.runner.device,
|
|
964
|
+
self.runner.pin_memory) \
|
|
965
|
+
if token_types else None
|
|
966
|
+
|
|
967
|
+
if mrope_input_positions is not None:
|
|
968
|
+
for idx in range(3):
|
|
969
|
+
mrope_input_positions[idx].extend(
|
|
970
|
+
itertools.repeat(0, cuda_graph_pad_size))
|
|
971
|
+
input_positions_tensor = async_tensor_h2d(mrope_input_positions,
|
|
972
|
+
torch.long,
|
|
973
|
+
self.runner.device,
|
|
974
|
+
self.runner.pin_memory)
|
|
975
|
+
else:
|
|
976
|
+
input_positions.extend(itertools.repeat(0, cuda_graph_pad_size))
|
|
977
|
+
input_positions_tensor = async_tensor_h2d(input_positions,
|
|
978
|
+
torch.long,
|
|
979
|
+
self.runner.device,
|
|
980
|
+
self.runner.pin_memory)
|
|
981
|
+
# Sequence and query lengths.
|
|
982
|
+
if cuda_graph_pad_size:
|
|
983
|
+
seq_lens.extend(itertools.repeat(1, cuda_graph_pad_size))
|
|
984
|
+
|
|
985
|
+
# Attention metadata.
|
|
986
|
+
attn_metadata = self.attn_metadata_builder.build(
|
|
987
|
+
seq_lens, query_lens, cuda_graph_pad_size, batch_size)
|
|
988
|
+
|
|
989
|
+
# LoRA data.
|
|
990
|
+
lora_requests = set()
|
|
991
|
+
lora_mapping = None
|
|
992
|
+
if self.enable_lora:
|
|
993
|
+
lora_requests = set(r for data in self.inter_data_list
|
|
994
|
+
for r in data.lora_requests)
|
|
995
|
+
lora_index_mapping = flatten_2d_lists([
|
|
996
|
+
flatten_2d_lists(inter_data.lora_index_mapping)
|
|
997
|
+
for inter_data in self.inter_data_list
|
|
998
|
+
])
|
|
999
|
+
if cuda_graph_pad_size:
|
|
1000
|
+
lora_index_mapping.extend(
|
|
1001
|
+
itertools.repeat(0, cuda_graph_pad_size))
|
|
1002
|
+
lora_prompt_mapping = flatten_2d_lists([
|
|
1003
|
+
flatten_2d_lists(inter_data.lora_prompt_mapping)
|
|
1004
|
+
for inter_data in self.inter_data_list
|
|
1005
|
+
])
|
|
1006
|
+
|
|
1007
|
+
lora_mapping = LoRAMapping(
|
|
1008
|
+
**dict(index_mapping=lora_index_mapping,
|
|
1009
|
+
prompt_mapping=lora_prompt_mapping,
|
|
1010
|
+
is_prefill=not self.decode_only))
|
|
1011
|
+
|
|
1012
|
+
# Prompt adapter data.
|
|
1013
|
+
prompt_adapter_requests: Set[PromptAdapterRequest] = set()
|
|
1014
|
+
prompt_adapter_mapping = None
|
|
1015
|
+
if self.enable_prompt_adapter:
|
|
1016
|
+
prompt_adapter_requests = set(
|
|
1017
|
+
data.prompt_adapter_request for data in self.inter_data_list
|
|
1018
|
+
if data.prompt_adapter_request is not None)
|
|
1019
|
+
prompt_adapter_index_mapping = flatten_2d_lists([
|
|
1020
|
+
inter_data.prompt_adapter_index_mapping
|
|
1021
|
+
for inter_data in self.inter_data_list
|
|
1022
|
+
])
|
|
1023
|
+
if cuda_graph_pad_size:
|
|
1024
|
+
prompt_adapter_index_mapping.extend(
|
|
1025
|
+
itertools.repeat(0, cuda_graph_pad_size))
|
|
1026
|
+
prompt_adapter_prompt_mapping = flatten_2d_lists([
|
|
1027
|
+
inter_data.prompt_adapter_prompt_mapping
|
|
1028
|
+
for inter_data in self.inter_data_list
|
|
1029
|
+
])
|
|
1030
|
+
prompt_adapter_mapping = PromptAdapterMapping(
|
|
1031
|
+
prompt_adapter_index_mapping,
|
|
1032
|
+
prompt_adapter_prompt_mapping,
|
|
1033
|
+
)
|
|
1034
|
+
|
|
1035
|
+
# Multi-modal data.
|
|
1036
|
+
multi_modal_kwargs_list = [
|
|
1037
|
+
data.multi_modal_kwargs for data in self.inter_data_list
|
|
1038
|
+
if data.multi_modal_kwargs is not None
|
|
1039
|
+
]
|
|
1040
|
+
multi_modal_kwargs = MultiModalKwargs.batch(multi_modal_kwargs_list)
|
|
1041
|
+
|
|
1042
|
+
return self.model_input_cls(
|
|
1043
|
+
input_tokens=input_tokens_tensor,
|
|
1044
|
+
inputs_embeds=inputs_embeds,
|
|
1045
|
+
input_positions=input_positions_tensor,
|
|
1046
|
+
token_types=token_types_tensor,
|
|
1047
|
+
attn_metadata=attn_metadata,
|
|
1048
|
+
seq_lens=seq_lens,
|
|
1049
|
+
query_lens=query_lens,
|
|
1050
|
+
lora_mapping=lora_mapping,
|
|
1051
|
+
lora_requests=lora_requests,
|
|
1052
|
+
multi_modal_kwargs=multi_modal_kwargs,
|
|
1053
|
+
request_ids_to_seq_ids=request_ids_to_seq_ids,
|
|
1054
|
+
finished_requests_ids=self.finished_requests_ids,
|
|
1055
|
+
prompt_adapter_mapping=prompt_adapter_mapping,
|
|
1056
|
+
prompt_adapter_requests=prompt_adapter_requests)
|
|
1057
|
+
|
|
1058
|
+
|
|
1059
|
+
class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
|
|
1060
|
+
"""
|
|
1061
|
+
Helper class for shared methods between GPU model runners.
|
|
1062
|
+
"""
|
|
1063
|
+
_model_input_cls: Type[TModelInputForGPU]
|
|
1064
|
+
_builder_cls: Type[ModelInputForGPUBuilder]
|
|
1065
|
+
builder: ModelInputForGPUBuilder
|
|
1066
|
+
|
|
1067
|
+
def __init__(
|
|
1068
|
+
self,
|
|
1069
|
+
vllm_config: VllmConfig,
|
|
1070
|
+
kv_cache_dtype: Optional[str] = "auto",
|
|
1071
|
+
is_driver_worker: bool = False,
|
|
1072
|
+
return_hidden_states: bool = False,
|
|
1073
|
+
input_registry: InputRegistry = INPUT_REGISTRY,
|
|
1074
|
+
mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
|
|
1075
|
+
):
|
|
1076
|
+
|
|
1077
|
+
ModelRunnerBase.__init__(self, vllm_config)
|
|
1078
|
+
model_config = self.model_config
|
|
1079
|
+
cache_config = self.cache_config
|
|
1080
|
+
|
|
1081
|
+
self.is_driver_worker = is_driver_worker
|
|
1082
|
+
self.return_hidden_states = return_hidden_states
|
|
1083
|
+
|
|
1084
|
+
self.device = self.device_config.device
|
|
1085
|
+
self.pin_memory = is_pin_memory_available()
|
|
1086
|
+
|
|
1087
|
+
self.kv_cache_dtype = kv_cache_dtype
|
|
1088
|
+
self.sliding_window = model_config.get_sliding_window()
|
|
1089
|
+
self.block_size = cache_config.block_size
|
|
1090
|
+
self.max_seq_len_to_capture = self.model_config.max_seq_len_to_capture
|
|
1091
|
+
self.max_batchsize_to_capture = \
|
|
1092
|
+
self.vllm_config.compilation_config.max_capture_size
|
|
1093
|
+
|
|
1094
|
+
#
|
|
1095
|
+
self.graph_runners: List[Dict[Tuple[int, bool], CUDAGraphRunner]] = [
|
|
1096
|
+
{} for _ in range(self.parallel_config.pipeline_parallel_size)
|
|
1097
|
+
]
|
|
1098
|
+
self.graph_memory_pool: Optional[Tuple[
|
|
1099
|
+
int, int]] = None # Set during graph capture.
|
|
1100
|
+
|
|
1101
|
+
self.has_inner_state = model_config.has_inner_state
|
|
1102
|
+
|
|
1103
|
+
self.in_profile_run = False
|
|
1104
|
+
|
|
1105
|
+
# When using CUDA graph, the input block tables must be padded to
|
|
1106
|
+
# max_seq_len_to_capture. However, creating the block table in
|
|
1107
|
+
# Python can be expensive. To optimize this, we cache the block table
|
|
1108
|
+
# in numpy and only copy the actual input content at every iteration.
|
|
1109
|
+
# The shape of the cached block table will be
|
|
1110
|
+
# (max batch size to capture, max seq len to capture / block size).
|
|
1111
|
+
self.graph_block_tables = np.zeros(
|
|
1112
|
+
(self.max_batchsize_to_capture, self.get_max_block_per_batch()),
|
|
1113
|
+
dtype=np.int32)
|
|
1114
|
+
|
|
1115
|
+
# Attention-free but stateful models like Mamba need a placeholder attn
|
|
1116
|
+
# backend, as the attention metadata is needed to manage internal state.
|
|
1117
|
+
# However we must bypass attention selection altogether for some models
|
|
1118
|
+
# used for speculative decoding to avoid a divide-by-zero in
|
|
1119
|
+
# model_config.get_head_size()
|
|
1120
|
+
num_attn_heads = self.model_config.get_num_attention_heads(
|
|
1121
|
+
self.parallel_config)
|
|
1122
|
+
needs_attn_backend = (num_attn_heads != 0
|
|
1123
|
+
or self.model_config.is_attention_free)
|
|
1124
|
+
|
|
1125
|
+
self.attn_backend = get_attn_backend(
|
|
1126
|
+
self.model_config.get_head_size(),
|
|
1127
|
+
self.model_config.dtype,
|
|
1128
|
+
self.kv_cache_dtype,
|
|
1129
|
+
self.block_size,
|
|
1130
|
+
self.model_config.is_attention_free,
|
|
1131
|
+
use_mla=self.model_config.use_mla,
|
|
1132
|
+
) if needs_attn_backend else None
|
|
1133
|
+
if self.attn_backend:
|
|
1134
|
+
self.attn_state = self.attn_backend.get_state_cls()(
|
|
1135
|
+
weakref.proxy(self))
|
|
1136
|
+
else:
|
|
1137
|
+
self.attn_state = CommonAttentionState(weakref.proxy(self))
|
|
1138
|
+
|
|
1139
|
+
# Multi-modal data support
|
|
1140
|
+
self.input_registry = input_registry
|
|
1141
|
+
self.mm_registry = mm_registry
|
|
1142
|
+
|
|
1143
|
+
# Lazy initialization
|
|
1144
|
+
self.model: nn.Module # Set after load_model
|
|
1145
|
+
# Set after load_model.
|
|
1146
|
+
self.lora_manager: Optional[LRUCacheWorkerLoRAManager] = None
|
|
1147
|
+
self.prompt_adapter_manager: LRUCacheWorkerPromptAdapterManager = None
|
|
1148
|
+
self.sampler = get_sampler()
|
|
1149
|
+
|
|
1150
|
+
set_cpu_offload_max_bytes(
|
|
1151
|
+
int(self.cache_config.cpu_offload_gb * 1024**3))
|
|
1152
|
+
|
|
1153
|
+
# Used to cache python objects
|
|
1154
|
+
self.inter_data_cache: Dict[int, PyObjectCache] = {}
|
|
1155
|
+
|
|
1156
|
+
# Using the PythonizationCache in Pipeline-Parallel clobbers the
|
|
1157
|
+
# SequenceGroupToSample object. In Pipeline-Parallel, we have
|
|
1158
|
+
# more than 1 Scheduler, resulting in a potential back-to-back
|
|
1159
|
+
# prepare_model_inputs() call. This clobbers the cached
|
|
1160
|
+
# SequenceGroupToSample objects, as we reset the cache during
|
|
1161
|
+
# every prepare_model_inputs() call.
|
|
1162
|
+
self.sampling_metadata_cache: SamplingMetadataCache = \
|
|
1163
|
+
SamplingMetadataCache() \
|
|
1164
|
+
if self.parallel_config.pipeline_parallel_size == 1 else None
|
|
1165
|
+
|
|
1166
|
+
if hasattr(self, "_builder_cls"):
|
|
1167
|
+
# multi-step model runner does not have `_builder_cls`
|
|
1168
|
+
self.builder = self._builder_cls(weakref.proxy(self))
|
|
1169
|
+
|
|
1170
|
+
def load_model(self) -> None:
|
|
1171
|
+
logger.info("Starting to load model %s...", self.model_config.model)
|
|
1172
|
+
with DeviceMemoryProfiler(self.device) as m:
|
|
1173
|
+
time_before_load = time.perf_counter()
|
|
1174
|
+
self.model = get_model(vllm_config=self.vllm_config)
|
|
1175
|
+
if self.lora_config:
|
|
1176
|
+
assert supports_lora(
|
|
1177
|
+
self.model
|
|
1178
|
+
), f"{self.model.__class__.__name__} does not support LoRA yet."
|
|
1179
|
+
|
|
1180
|
+
if supports_multimodal(self.model):
|
|
1181
|
+
logger.warning(
|
|
1182
|
+
"Regarding multimodal models, vLLM currently "
|
|
1183
|
+
"only supports adding LoRA to language model.")
|
|
1184
|
+
|
|
1185
|
+
# Use get_text_config() in case of multimodal models
|
|
1186
|
+
text_config = self.model_config.hf_config.get_text_config()
|
|
1187
|
+
|
|
1188
|
+
self.lora_manager = LRUCacheWorkerLoRAManager(
|
|
1189
|
+
self.scheduler_config.max_num_seqs,
|
|
1190
|
+
self.scheduler_config.max_num_batched_tokens,
|
|
1191
|
+
self.vocab_size,
|
|
1192
|
+
self.lora_config,
|
|
1193
|
+
self.device,
|
|
1194
|
+
self.model.embedding_modules,
|
|
1195
|
+
self.model.embedding_padding_modules,
|
|
1196
|
+
max_position_embeddings=text_config.
|
|
1197
|
+
max_position_embeddings,
|
|
1198
|
+
)
|
|
1199
|
+
self.model = self.lora_manager.create_lora_manager(self.model)
|
|
1200
|
+
time_after_load = time.perf_counter()
|
|
1201
|
+
|
|
1202
|
+
self.model_memory_usage = m.consumed_memory
|
|
1203
|
+
logger.info("Model loading took %.4f GiB and %.6f seconds",
|
|
1204
|
+
self.model_memory_usage / GiB_bytes,
|
|
1205
|
+
time_after_load - time_before_load)
|
|
1206
|
+
if self.prompt_adapter_config:
|
|
1207
|
+
self.prompt_adapter_manager = LRUCacheWorkerPromptAdapterManager(
|
|
1208
|
+
self.scheduler_config.max_num_seqs,
|
|
1209
|
+
self.scheduler_config.max_num_batched_tokens, self.device,
|
|
1210
|
+
self.prompt_adapter_config)
|
|
1211
|
+
self.model = (
|
|
1212
|
+
self.prompt_adapter_manager.create_prompt_adapter_manager(
|
|
1213
|
+
self.model))
|
|
1214
|
+
|
|
1215
|
+
if self.vllm_config.compilation_config.level ==\
|
|
1216
|
+
CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
|
|
1217
|
+
backend = self.vllm_config.compilation_config.init_backend(
|
|
1218
|
+
self.vllm_config)
|
|
1219
|
+
self.model = torch.compile(
|
|
1220
|
+
self.model,
|
|
1221
|
+
fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE,
|
|
1222
|
+
backend=backend)
|
|
1223
|
+
|
|
1224
|
+
def get_model(self) -> nn.Module:
|
|
1225
|
+
return self.model
|
|
1226
|
+
|
|
1227
|
+
def save_sharded_state(
|
|
1228
|
+
self,
|
|
1229
|
+
path: str,
|
|
1230
|
+
pattern: Optional[str] = None,
|
|
1231
|
+
max_size: Optional[int] = None,
|
|
1232
|
+
) -> None:
|
|
1233
|
+
from vllm.model_executor.model_loader import ShardedStateLoader
|
|
1234
|
+
ShardedStateLoader.save_model(
|
|
1235
|
+
self.model,
|
|
1236
|
+
path,
|
|
1237
|
+
pattern=pattern,
|
|
1238
|
+
max_size=max_size,
|
|
1239
|
+
)
|
|
1240
|
+
|
|
1241
|
+
def save_tensorized_model(
|
|
1242
|
+
self,
|
|
1243
|
+
tensorizer_config: TensorizerConfig,
|
|
1244
|
+
) -> None:
|
|
1245
|
+
from vllm.model_executor.model_loader import TensorizerLoader
|
|
1246
|
+
TensorizerLoader.save_model(
|
|
1247
|
+
self.model,
|
|
1248
|
+
tensorizer_config=tensorizer_config,
|
|
1249
|
+
)
|
|
1250
|
+
|
|
1251
|
+
def get_max_block_per_batch(self) -> int:
|
|
1252
|
+
block_size = self.block_size
|
|
1253
|
+
return (self.max_seq_len_to_capture + block_size - 1) // block_size
|
|
1254
|
+
|
|
1255
|
+
def _prepare_model_input_tensors(
|
|
1256
|
+
self,
|
|
1257
|
+
seq_group_metadata_list: List[SequenceGroupMetadata],
|
|
1258
|
+
finished_requests_ids: Optional[List[str]] = None
|
|
1259
|
+
) -> TModelInputForGPU:
|
|
1260
|
+
"""Helper method to prepare the model input based on a given sequence
|
|
1261
|
+
group. Prepares metadata needed for the base model forward pass but not
|
|
1262
|
+
metadata for possible additional steps, e.g., sampling.
|
|
1263
|
+
|
|
1264
|
+
The API assumes seq_group_metadata_list is sorted by prefill -> decode.
|
|
1265
|
+
|
|
1266
|
+
The result tensors and data structure also batches input in prefill
|
|
1267
|
+
-> decode order. For example,
|
|
1268
|
+
|
|
1269
|
+
- input_tokens[:num_prefill_tokens] contains prefill tokens.
|
|
1270
|
+
- input_tokens[num_prefill_tokens:] contains decode tokens.
|
|
1271
|
+
|
|
1272
|
+
If cuda graph is required, this API automatically pads inputs.
|
|
1273
|
+
"""
|
|
1274
|
+
self.builder.prepare(finished_requests_ids)
|
|
1275
|
+
for seq_group_metadata in seq_group_metadata_list:
|
|
1276
|
+
try:
|
|
1277
|
+
self.builder.add_seq_group(seq_group_metadata)
|
|
1278
|
+
except Exception as e:
|
|
1279
|
+
# Raise an exception that tracks the ID of the bad request
|
|
1280
|
+
raise InputProcessingError(seq_group_metadata.request_id,
|
|
1281
|
+
str(e)) from e
|
|
1282
|
+
|
|
1283
|
+
self.builder.reset_cached_inter_data()
|
|
1284
|
+
|
|
1285
|
+
return self.builder.build() # type: ignore
|
|
1286
|
+
|
|
1287
|
+
@contextmanager
|
|
1288
|
+
def set_in_profile_run(self):
|
|
1289
|
+
self.in_profile_run = True
|
|
1290
|
+
try:
|
|
1291
|
+
yield
|
|
1292
|
+
finally:
|
|
1293
|
+
self.in_profile_run = False
|
|
1294
|
+
|
|
1295
|
+
@torch.inference_mode()
|
|
1296
|
+
def profile_run(self) -> None:
|
|
1297
|
+
max_num_batched_tokens = \
|
|
1298
|
+
self.scheduler_config.max_num_batched_tokens
|
|
1299
|
+
max_num_seqs = self.scheduler_config.max_num_seqs
|
|
1300
|
+
self._dummy_run(max_num_batched_tokens, max_num_seqs)
|
|
1301
|
+
|
|
1302
|
+
def _add_dummy_loras(self, num_loras: int) -> list[LoRARequest]:
|
|
1303
|
+
assert num_loras > 0
|
|
1304
|
+
assert self.lora_manager is not None
|
|
1305
|
+
|
|
1306
|
+
dummy_lora_requests: list[LoRARequest] = []
|
|
1307
|
+
with self.lora_manager.dummy_lora_cache():
|
|
1308
|
+
for idx in range(num_loras):
|
|
1309
|
+
lora_id = idx + 1
|
|
1310
|
+
dummy_lora_request = LoRARequest(
|
|
1311
|
+
lora_name=f"warmup_{lora_id}",
|
|
1312
|
+
lora_int_id=lora_id,
|
|
1313
|
+
lora_path="/not/a/real/path",
|
|
1314
|
+
)
|
|
1315
|
+
self.lora_manager.add_dummy_lora(dummy_lora_request,
|
|
1316
|
+
rank=LORA_WARMUP_RANK)
|
|
1317
|
+
dummy_lora_requests.append(dummy_lora_request)
|
|
1318
|
+
return dummy_lora_requests
|
|
1319
|
+
|
|
1320
|
+
def _remove_dummy_loras(self):
|
|
1321
|
+
# Remove dummy loras.
|
|
1322
|
+
assert self.lora_manager is not None
|
|
1323
|
+
self.remove_all_loras()
|
|
1324
|
+
|
|
1325
|
+
def _dummy_run(self,
|
|
1326
|
+
max_num_batched_tokens: int,
|
|
1327
|
+
max_num_seqs: int = 1) -> None:
|
|
1328
|
+
with self.set_in_profile_run():
|
|
1329
|
+
# Enable top-k sampling to reflect the accurate memory usage.
|
|
1330
|
+
sampling_params = \
|
|
1331
|
+
SamplingParams(top_p=0.99, top_k=self.vocab_size - 1)
|
|
1332
|
+
|
|
1333
|
+
# This represents the maximum number of different requests
|
|
1334
|
+
# that will have unique loras, and therefore the max amount of
|
|
1335
|
+
# memory consumption. Create dummy lora request copies from the
|
|
1336
|
+
# lora request passed in, which contains a lora from the lora
|
|
1337
|
+
# warmup path.
|
|
1338
|
+
dummy_lora_requests: List[LoRARequest] = []
|
|
1339
|
+
dummy_lora_requests_per_seq: List[LoRARequest] = []
|
|
1340
|
+
if self.lora_config:
|
|
1341
|
+
dummy_lora_requests = self._add_dummy_loras(
|
|
1342
|
+
self.lora_config.max_loras)
|
|
1343
|
+
assert len(dummy_lora_requests) == self.lora_config.max_loras
|
|
1344
|
+
dummy_lora_requests_per_seq = [
|
|
1345
|
+
dummy_lora_requests[idx % len(dummy_lora_requests)]
|
|
1346
|
+
for idx in range(max_num_seqs)
|
|
1347
|
+
]
|
|
1348
|
+
|
|
1349
|
+
# Profile memory usage with max_num_sequences sequences and the
|
|
1350
|
+
# total number of tokens equal to max_num_batched_tokens.
|
|
1351
|
+
seqs: List[SequenceGroupMetadata] = []
|
|
1352
|
+
# Additional GPU memory may be needed for multi-modal encoding,
|
|
1353
|
+
# which needs to be accounted for when calculating the GPU blocks
|
|
1354
|
+
# for vLLM blocker manager.
|
|
1355
|
+
# To exercise the worst scenario for GPU memory consumption,
|
|
1356
|
+
# the number of seqs (batch_size) is chosen to maximize the number
|
|
1357
|
+
# of images processed.
|
|
1358
|
+
|
|
1359
|
+
max_mm_tokens = self.mm_registry.get_max_multimodal_tokens(
|
|
1360
|
+
self.model_config)
|
|
1361
|
+
if max_mm_tokens > 0:
|
|
1362
|
+
max_num_seqs_orig = max_num_seqs
|
|
1363
|
+
max_num_seqs = min(max_num_seqs,
|
|
1364
|
+
max_num_batched_tokens // max_mm_tokens)
|
|
1365
|
+
if max_num_seqs < 1:
|
|
1366
|
+
expr = (f"min({max_num_seqs_orig}, "
|
|
1367
|
+
f"{max_num_batched_tokens} // {max_mm_tokens})")
|
|
1368
|
+
logger.warning(
|
|
1369
|
+
"Computed max_num_seqs (%s) to be less than 1. "
|
|
1370
|
+
"Setting it to the minimum value of 1.", expr)
|
|
1371
|
+
max_num_seqs = 1
|
|
1372
|
+
|
|
1373
|
+
batch_size = 0
|
|
1374
|
+
for group_id in range(max_num_seqs):
|
|
1375
|
+
seq_len = (max_num_batched_tokens // max_num_seqs +
|
|
1376
|
+
(group_id < max_num_batched_tokens % max_num_seqs))
|
|
1377
|
+
batch_size += seq_len
|
|
1378
|
+
|
|
1379
|
+
dummy_data = self.input_registry \
|
|
1380
|
+
.dummy_data_for_profiling(self.model_config,
|
|
1381
|
+
seq_len,
|
|
1382
|
+
self.mm_registry)
|
|
1383
|
+
|
|
1384
|
+
seq = SequenceGroupMetadata(
|
|
1385
|
+
request_id=str(group_id),
|
|
1386
|
+
is_prompt=True,
|
|
1387
|
+
seq_data={group_id: dummy_data.seq_data},
|
|
1388
|
+
sampling_params=sampling_params,
|
|
1389
|
+
block_tables=None,
|
|
1390
|
+
lora_request=dummy_lora_requests_per_seq[group_id]
|
|
1391
|
+
if dummy_lora_requests_per_seq else None,
|
|
1392
|
+
multi_modal_data=dummy_data.multi_modal_data,
|
|
1393
|
+
multi_modal_placeholders=dummy_data.
|
|
1394
|
+
multi_modal_placeholders,
|
|
1395
|
+
)
|
|
1396
|
+
seqs.append(seq)
|
|
1397
|
+
|
|
1398
|
+
# Run the model with the dummy inputs.
|
|
1399
|
+
num_layers = self.model_config.get_num_layers(self.parallel_config)
|
|
1400
|
+
# use an empty tensor instead of `None`` to force Dynamo to pass
|
|
1401
|
+
# it by reference, rather by specializing on the value ``None``.
|
|
1402
|
+
# the `dtype` argument does not matter, and we use `float32` as
|
|
1403
|
+
# a placeholder (it has wide hardware support).
|
|
1404
|
+
# it is important to create tensors inside the loop, rather than
|
|
1405
|
+
# multiplying the list, to avoid Dynamo from treating them as
|
|
1406
|
+
# tensor aliasing.
|
|
1407
|
+
kv_caches = [
|
|
1408
|
+
torch.tensor([], dtype=torch.float32, device=self.device)
|
|
1409
|
+
for _ in range(num_layers)
|
|
1410
|
+
]
|
|
1411
|
+
finished_requests_ids = [seq.request_id for seq in seqs]
|
|
1412
|
+
model_input = self.prepare_model_input(
|
|
1413
|
+
seqs, finished_requests_ids=finished_requests_ids)
|
|
1414
|
+
intermediate_tensors = None
|
|
1415
|
+
if not get_pp_group().is_first_rank:
|
|
1416
|
+
intermediate_tensors = \
|
|
1417
|
+
self.model.make_empty_intermediate_tensors(
|
|
1418
|
+
batch_size=batch_size,
|
|
1419
|
+
dtype=self.model_config.dtype,
|
|
1420
|
+
device=self.device)
|
|
1421
|
+
|
|
1422
|
+
# Disable KV Scale Calculation for dummy data during profile run
|
|
1423
|
+
if model_input.attn_metadata is not None:
|
|
1424
|
+
model_input.attn_metadata.enable_kv_scales_calculation = False
|
|
1425
|
+
|
|
1426
|
+
self.execute_model(model_input, kv_caches, intermediate_tensors)
|
|
1427
|
+
torch.cuda.synchronize()
|
|
1428
|
+
if self.lora_config:
|
|
1429
|
+
self._remove_dummy_loras()
|
|
1430
|
+
|
|
1431
|
+
return
|
|
1432
|
+
|
|
1433
|
+
def remove_all_loras(self):
|
|
1434
|
+
if not self.lora_manager:
|
|
1435
|
+
raise RuntimeError("LoRA is not enabled.")
|
|
1436
|
+
self.lora_manager.remove_all_adapters()
|
|
1437
|
+
|
|
1438
|
+
def set_active_loras(self, lora_requests: Set[LoRARequest],
|
|
1439
|
+
lora_mapping: LoRAMapping) -> None:
|
|
1440
|
+
if not self.lora_manager:
|
|
1441
|
+
raise RuntimeError("LoRA is not enabled.")
|
|
1442
|
+
self.lora_manager.set_active_adapters(lora_requests, lora_mapping)
|
|
1443
|
+
|
|
1444
|
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
|
1445
|
+
if not self.lora_manager:
|
|
1446
|
+
raise RuntimeError("LoRA is not enabled.")
|
|
1447
|
+
return self.lora_manager.add_adapter(lora_request)
|
|
1448
|
+
|
|
1449
|
+
def remove_lora(self, lora_id: int) -> bool:
|
|
1450
|
+
if not self.lora_manager:
|
|
1451
|
+
raise RuntimeError("LoRA is not enabled.")
|
|
1452
|
+
return self.lora_manager.remove_adapter(lora_id)
|
|
1453
|
+
|
|
1454
|
+
def pin_lora(self, lora_id: int) -> bool:
|
|
1455
|
+
if not self.lora_manager:
|
|
1456
|
+
raise RuntimeError("LoRA is not enabled.")
|
|
1457
|
+
return self.lora_manager.pin_adapter(lora_id)
|
|
1458
|
+
|
|
1459
|
+
def list_loras(self) -> Set[int]:
|
|
1460
|
+
if not self.lora_manager:
|
|
1461
|
+
raise RuntimeError("LoRA is not enabled.")
|
|
1462
|
+
return self.lora_manager.list_adapters()
|
|
1463
|
+
|
|
1464
|
+
def remove_all_prompt_adapters(self):
|
|
1465
|
+
if not self.prompt_adapter_manager:
|
|
1466
|
+
raise RuntimeError("PromptAdapter is not enabled.")
|
|
1467
|
+
self.prompt_adapter_manager.remove_all_adapters()
|
|
1468
|
+
|
|
1469
|
+
def set_active_prompt_adapters(
|
|
1470
|
+
self, prompt_adapter_requests: Set[PromptAdapterRequest],
|
|
1471
|
+
prompt_adapter_mapping: PromptAdapterMapping) -> None:
|
|
1472
|
+
if not self.prompt_adapter_manager:
|
|
1473
|
+
raise RuntimeError("PromptAdapter is not enabled.")
|
|
1474
|
+
self.prompt_adapter_manager.set_active_adapters(
|
|
1475
|
+
prompt_adapter_requests, prompt_adapter_mapping)
|
|
1476
|
+
|
|
1477
|
+
def add_prompt_adapter(
|
|
1478
|
+
self, prompt_adapter_request: PromptAdapterRequest) -> bool:
|
|
1479
|
+
if not self.prompt_adapter_manager:
|
|
1480
|
+
raise RuntimeError("PromptAdapter is not enabled.")
|
|
1481
|
+
return self.prompt_adapter_manager.add_adapter(prompt_adapter_request)
|
|
1482
|
+
|
|
1483
|
+
def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool:
|
|
1484
|
+
if not self.prompt_adapter_manager:
|
|
1485
|
+
raise RuntimeError("PromptAdapter is not enabled.")
|
|
1486
|
+
return self.prompt_adapter_manager.remove_adapter(prompt_adapter_id)
|
|
1487
|
+
|
|
1488
|
+
def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool:
|
|
1489
|
+
if not self.prompt_adapter_manager:
|
|
1490
|
+
raise RuntimeError("PromptAdapter is not enabled.")
|
|
1491
|
+
return self.prompt_adapter_manager.pin_adapter(prompt_adapter_id)
|
|
1492
|
+
|
|
1493
|
+
def list_prompt_adapters(self) -> Set[int]:
|
|
1494
|
+
if not self.prompt_adapter_manager:
|
|
1495
|
+
raise RuntimeError("PromptAdapter is not enabled.")
|
|
1496
|
+
return self.prompt_adapter_manager.list_adapters()
|
|
1497
|
+
|
|
1498
|
+
@torch.inference_mode()
|
|
1499
|
+
def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> None:
|
|
1500
|
+
"""Cuda graph capture a model.
|
|
1501
|
+
|
|
1502
|
+
Note that CUDA graph's performance gain is negligible if number
|
|
1503
|
+
of batched tokens are larger than 200. And since CUDA graph
|
|
1504
|
+
requires fixed sized tensors, supporting large/variable batch
|
|
1505
|
+
size requires high GPU memory overhead. Thus, vLLM only captures
|
|
1506
|
+
decoding requests. Mixed batch (chunked prefill + decoding) or
|
|
1507
|
+
prefill requests are not captured.
|
|
1508
|
+
|
|
1509
|
+
Since it is used for decoding-only, it assumes there's only 1 token
|
|
1510
|
+
per sequence in the batch.
|
|
1511
|
+
"""
|
|
1512
|
+
assert not self.model_config.enforce_eager
|
|
1513
|
+
logger.info("Capturing cudagraphs for decoding. This may lead to "
|
|
1514
|
+
"unexpected consequences if the model is not static. To "
|
|
1515
|
+
"run the model in eager mode, set 'enforce_eager=True' or "
|
|
1516
|
+
"use '--enforce-eager' in the CLI. "
|
|
1517
|
+
"If out-of-memory error occurs during cudagraph capture,"
|
|
1518
|
+
" consider decreasing `gpu_memory_utilization` or "
|
|
1519
|
+
"switching to eager mode. You can also reduce the "
|
|
1520
|
+
"`max_num_seqs` as needed to decrease memory usage.")
|
|
1521
|
+
start_time = time.perf_counter()
|
|
1522
|
+
start_free_gpu_memory = torch.cuda.mem_get_info()[0]
|
|
1523
|
+
|
|
1524
|
+
# Prepare dummy inputs. These will be reused for all batch sizes.
|
|
1525
|
+
max_batch_size = self.max_batchsize_to_capture
|
|
1526
|
+
input_tokens = torch.zeros(max_batch_size,
|
|
1527
|
+
dtype=torch.long,
|
|
1528
|
+
device=self.device)
|
|
1529
|
+
input_positions = torch.zeros(max_batch_size,
|
|
1530
|
+
dtype=torch.long,
|
|
1531
|
+
device=self.device)
|
|
1532
|
+
inputs_embeds = torch.zeros(
|
|
1533
|
+
(max_batch_size, self.model_config.get_hidden_size()),
|
|
1534
|
+
dtype=self.model_config.dtype,
|
|
1535
|
+
device=self.device)
|
|
1536
|
+
if self.model_config.uses_mrope:
|
|
1537
|
+
input_positions = torch.tile(input_positions,
|
|
1538
|
+
(3, 1)).cuda(device=self.device)
|
|
1539
|
+
# Prepare dummy previous_hidden_states only if needed by the model.
|
|
1540
|
+
# This is used by draft models such as EAGLE.
|
|
1541
|
+
previous_hidden_states = None
|
|
1542
|
+
if "previous_hidden_states" in inspect.signature(
|
|
1543
|
+
self.model.forward).parameters:
|
|
1544
|
+
previous_hidden_states = torch.empty(
|
|
1545
|
+
[max_batch_size,
|
|
1546
|
+
self.model_config.get_hidden_size()],
|
|
1547
|
+
dtype=self.model_config.dtype,
|
|
1548
|
+
device=self.device)
|
|
1549
|
+
|
|
1550
|
+
intermediate_inputs = None
|
|
1551
|
+
if not get_pp_group().is_first_rank:
|
|
1552
|
+
intermediate_inputs = self.model.make_empty_intermediate_tensors(
|
|
1553
|
+
batch_size=max_batch_size,
|
|
1554
|
+
dtype=self.model_config.dtype,
|
|
1555
|
+
device=self.device)
|
|
1556
|
+
|
|
1557
|
+
dummy_lora_id: Optional[int] = None
|
|
1558
|
+
dummy_lora_request: LoRARequest = []
|
|
1559
|
+
if self.lora_config:
|
|
1560
|
+
# The goal is to capture the LoRA kernels in cuda graphs.
|
|
1561
|
+
# for this purpose, as single dummy lora is sufficient.
|
|
1562
|
+
dummy_lora_requests = self._add_dummy_loras(num_loras=1)
|
|
1563
|
+
assert len(dummy_lora_requests) == 1
|
|
1564
|
+
dummy_lora_request = dummy_lora_requests[0]
|
|
1565
|
+
dummy_lora_id = dummy_lora_request.lora_int_id
|
|
1566
|
+
|
|
1567
|
+
with self.attn_state.graph_capture(max_batch_size), graph_capture(
|
|
1568
|
+
self.device) as graph_capture_context:
|
|
1569
|
+
# NOTE: Capturing the largest batch size first may help reduce the
|
|
1570
|
+
# memory usage of CUDA graph.
|
|
1571
|
+
for virtual_engine in range(
|
|
1572
|
+
self.parallel_config.pipeline_parallel_size):
|
|
1573
|
+
# We need to not only iterate over batch sizes, but also whether
|
|
1574
|
+
# to use inputs_embeds or not, hence we use the cartesian
|
|
1575
|
+
# product.
|
|
1576
|
+
cudagraph_capture_sizes = self.vllm_config.compilation_config\
|
|
1577
|
+
.cudagraph_capture_sizes
|
|
1578
|
+
cudagraph_inputs_embeds = ((
|
|
1579
|
+
True, False) if self.model_config.enable_prompt_embeds else
|
|
1580
|
+
(False, ))
|
|
1581
|
+
compilation_cases = itertools.product(
|
|
1582
|
+
cudagraph_capture_sizes,
|
|
1583
|
+
cudagraph_inputs_embeds,
|
|
1584
|
+
)
|
|
1585
|
+
# Only rank 0 should print progress bar during capture
|
|
1586
|
+
if get_tensor_model_parallel_rank() == 0:
|
|
1587
|
+
compilation_cases = tqdm(
|
|
1588
|
+
list(compilation_cases),
|
|
1589
|
+
desc="Capturing CUDA graph shapes")
|
|
1590
|
+
for batch_size, use_inputs_embeds in compilation_cases:
|
|
1591
|
+
attn_metadata = (
|
|
1592
|
+
self.attn_state.graph_capture_get_metadata_for_batch(
|
|
1593
|
+
batch_size,
|
|
1594
|
+
is_encoder_decoder_model=self.model_config.
|
|
1595
|
+
is_encoder_decoder))
|
|
1596
|
+
# Disable KV Scale Calculation for graph capture
|
|
1597
|
+
attn_metadata.enable_kv_scales_calculation = False
|
|
1598
|
+
if self.lora_config:
|
|
1599
|
+
lora_mapping = LoRAMapping(
|
|
1600
|
+
**dict(index_mapping=[dummy_lora_id] * batch_size,
|
|
1601
|
+
prompt_mapping=[dummy_lora_id] * batch_size,
|
|
1602
|
+
is_prefill=False))
|
|
1603
|
+
self.set_active_loras(set([dummy_lora_request]),
|
|
1604
|
+
lora_mapping)
|
|
1605
|
+
|
|
1606
|
+
if self.prompt_adapter_config:
|
|
1607
|
+
prompt_adapter_mapping = PromptAdapterMapping(
|
|
1608
|
+
[-1] * batch_size,
|
|
1609
|
+
[-1] * batch_size,
|
|
1610
|
+
)
|
|
1611
|
+
self.set_active_prompt_adapters(
|
|
1612
|
+
set(), prompt_adapter_mapping)
|
|
1613
|
+
graph_runner = CUDAGraphRunner(
|
|
1614
|
+
self.model, self.attn_backend.get_name(),
|
|
1615
|
+
self.attn_state.graph_clone(batch_size),
|
|
1616
|
+
self.model_config.is_encoder_decoder)
|
|
1617
|
+
|
|
1618
|
+
capture_inputs = {
|
|
1619
|
+
"input_ids":
|
|
1620
|
+
input_tokens[:batch_size],
|
|
1621
|
+
"inputs_embeds":
|
|
1622
|
+
inputs_embeds[:batch_size]
|
|
1623
|
+
if use_inputs_embeds else None,
|
|
1624
|
+
"positions":
|
|
1625
|
+
input_positions[..., :batch_size],
|
|
1626
|
+
"intermediate_inputs":
|
|
1627
|
+
intermediate_inputs[:batch_size]
|
|
1628
|
+
if intermediate_inputs is not None else None,
|
|
1629
|
+
"kv_caches":
|
|
1630
|
+
kv_caches[virtual_engine],
|
|
1631
|
+
"attn_metadata":
|
|
1632
|
+
attn_metadata,
|
|
1633
|
+
"memory_pool":
|
|
1634
|
+
self.graph_memory_pool,
|
|
1635
|
+
"stream":
|
|
1636
|
+
graph_capture_context.stream
|
|
1637
|
+
}
|
|
1638
|
+
if previous_hidden_states is not None:
|
|
1639
|
+
capture_inputs[
|
|
1640
|
+
"previous_hidden_states"] = previous_hidden_states[:
|
|
1641
|
+
batch_size]
|
|
1642
|
+
|
|
1643
|
+
if self.has_inner_state:
|
|
1644
|
+
# Only used by Mamba-based models CUDA graph atm (Jamba)
|
|
1645
|
+
capture_inputs.update({
|
|
1646
|
+
"seqlen_agnostic_capture_inputs":
|
|
1647
|
+
self.model.get_seqlen_agnostic_capture_inputs(
|
|
1648
|
+
batch_size)
|
|
1649
|
+
})
|
|
1650
|
+
if self.model_config.is_encoder_decoder:
|
|
1651
|
+
# add the additional inputs to capture for
|
|
1652
|
+
# encoder-decoder models.
|
|
1653
|
+
self._update_inputs_to_capture_for_enc_dec_model(
|
|
1654
|
+
capture_inputs)
|
|
1655
|
+
|
|
1656
|
+
with set_forward_context(attn_metadata, self.vllm_config,
|
|
1657
|
+
virtual_engine):
|
|
1658
|
+
graph_runner.capture(**capture_inputs)
|
|
1659
|
+
self.graph_memory_pool = graph_runner.graph.pool()
|
|
1660
|
+
self.graph_runners[virtual_engine][(
|
|
1661
|
+
batch_size, use_inputs_embeds)] = graph_runner
|
|
1662
|
+
|
|
1663
|
+
if self.lora_config:
|
|
1664
|
+
self._remove_dummy_loras()
|
|
1665
|
+
|
|
1666
|
+
end_time = time.perf_counter()
|
|
1667
|
+
end_free_gpu_memory = torch.cuda.mem_get_info()[0]
|
|
1668
|
+
elapsed_time = end_time - start_time
|
|
1669
|
+
cuda_graph_size = start_free_gpu_memory - end_free_gpu_memory
|
|
1670
|
+
# This usually takes < 10 seconds.
|
|
1671
|
+
logger.info("Graph capturing finished in %.0f secs, took %.2f GiB",
|
|
1672
|
+
elapsed_time, cuda_graph_size / GiB_bytes)
|
|
1673
|
+
|
|
1674
|
+
def _update_inputs_to_capture_for_enc_dec_model(self,
|
|
1675
|
+
capture_inputs: Dict[str,
|
|
1676
|
+
Any]):
|
|
1677
|
+
"""
|
|
1678
|
+
Updates the set of input tensors needed for CUDA graph capture in an
|
|
1679
|
+
encoder-decoder model.
|
|
1680
|
+
|
|
1681
|
+
This method modifies the provided `capture_inputs` dictionary by
|
|
1682
|
+
adding tensors specific to encoder-decoder specific models that
|
|
1683
|
+
need to be captured for CUDA Graph replay.
|
|
1684
|
+
"""
|
|
1685
|
+
# During the decode phase encoder_input_ids and encoder_positions are
|
|
1686
|
+
# unset. Do the same thing for graph capture.
|
|
1687
|
+
capture_inputs["encoder_input_ids"] = torch.tensor([],
|
|
1688
|
+
dtype=torch.long,
|
|
1689
|
+
device=self.device)
|
|
1690
|
+
capture_inputs["encoder_positions"] = torch.tensor([],
|
|
1691
|
+
dtype=torch.long,
|
|
1692
|
+
device=self.device)
|
|
1693
|
+
|
|
1694
|
+
@property
|
|
1695
|
+
def vocab_size(self) -> int:
|
|
1696
|
+
return self.model_config.get_vocab_size()
|
|
1697
|
+
|
|
1698
|
+
|
|
1699
|
+
class ModelRunner(GPUModelRunnerBase[ModelInputForGPUWithSamplingMetadata]):
|
|
1700
|
+
"""
|
|
1701
|
+
GPU model runner with sampling step.
|
|
1702
|
+
"""
|
|
1703
|
+
_model_input_cls: Type[ModelInputForGPUWithSamplingMetadata] = (
|
|
1704
|
+
ModelInputForGPUWithSamplingMetadata)
|
|
1705
|
+
_builder_cls: Type[ModelInputForGPUBuilder] = ModelInputForGPUBuilder
|
|
1706
|
+
|
|
1707
|
+
def make_model_input_from_broadcasted_tensor_dict(
|
|
1708
|
+
self,
|
|
1709
|
+
tensor_dict: Dict[str, Any],
|
|
1710
|
+
) -> ModelInputForGPUWithSamplingMetadata:
|
|
1711
|
+
model_input = \
|
|
1712
|
+
ModelInputForGPUWithSamplingMetadata.from_broadcasted_tensor_dict(
|
|
1713
|
+
tensor_dict,
|
|
1714
|
+
attn_backend=self.attn_backend,
|
|
1715
|
+
)
|
|
1716
|
+
return model_input
|
|
1717
|
+
|
|
1718
|
+
def prepare_model_input(
|
|
1719
|
+
self,
|
|
1720
|
+
seq_group_metadata_list: List[SequenceGroupMetadata],
|
|
1721
|
+
virtual_engine: int = 0,
|
|
1722
|
+
finished_requests_ids: Optional[List[str]] = None,
|
|
1723
|
+
) -> ModelInputForGPUWithSamplingMetadata:
|
|
1724
|
+
"""Prepare the model input based on a given sequence group, including
|
|
1725
|
+
metadata for the sampling step.
|
|
1726
|
+
|
|
1727
|
+
The API assumes seq_group_metadata_list is sorted by prefill -> decode.
|
|
1728
|
+
|
|
1729
|
+
The result tensors and data structure also batches input in prefill
|
|
1730
|
+
-> decode order. For example,
|
|
1731
|
+
|
|
1732
|
+
- input_tokens[:num_prefill_tokens] contains prefill tokens.
|
|
1733
|
+
- input_tokens[num_prefill_tokens:] contains decode tokens.
|
|
1734
|
+
|
|
1735
|
+
If cuda graph is required, this API automatically pads inputs.
|
|
1736
|
+
"""
|
|
1737
|
+
model_input = self._prepare_model_input_tensors(
|
|
1738
|
+
seq_group_metadata_list, finished_requests_ids)
|
|
1739
|
+
if get_pp_group().is_last_rank:
|
|
1740
|
+
# Sampling metadata is only required for the final pp group
|
|
1741
|
+
generators = self.get_generators(finished_requests_ids)
|
|
1742
|
+
sampling_metadata = SamplingMetadata.prepare(
|
|
1743
|
+
seq_group_metadata_list, model_input.seq_lens,
|
|
1744
|
+
model_input.query_lens, self.device, self.pin_memory,
|
|
1745
|
+
generators, self.sampling_metadata_cache)
|
|
1746
|
+
else:
|
|
1747
|
+
sampling_metadata = None
|
|
1748
|
+
is_prompt = (seq_group_metadata_list[0].is_prompt
|
|
1749
|
+
if seq_group_metadata_list else None)
|
|
1750
|
+
return dataclasses.replace(model_input,
|
|
1751
|
+
sampling_metadata=sampling_metadata,
|
|
1752
|
+
is_prompt=is_prompt,
|
|
1753
|
+
virtual_engine=virtual_engine)
|
|
1754
|
+
|
|
1755
|
+
@torch.inference_mode()
|
|
1756
|
+
def execute_model(
|
|
1757
|
+
self,
|
|
1758
|
+
model_input: ModelInputForGPUWithSamplingMetadata,
|
|
1759
|
+
kv_caches: List[torch.Tensor],
|
|
1760
|
+
intermediate_tensors: Optional[IntermediateTensors] = None,
|
|
1761
|
+
num_steps: int = 1,
|
|
1762
|
+
**kwargs,
|
|
1763
|
+
) -> Optional[Union[List[SamplerOutput], IntermediateTensors]]:
|
|
1764
|
+
if num_steps > 1:
|
|
1765
|
+
raise ValueError("num_steps > 1 is not supported in ModelRunner")
|
|
1766
|
+
|
|
1767
|
+
if self.lora_config:
|
|
1768
|
+
assert model_input.lora_requests is not None
|
|
1769
|
+
assert model_input.lora_mapping is not None
|
|
1770
|
+
self.set_active_loras(model_input.lora_requests,
|
|
1771
|
+
model_input.lora_mapping)
|
|
1772
|
+
|
|
1773
|
+
if self.prompt_adapter_config:
|
|
1774
|
+
assert model_input.prompt_adapter_requests is not None
|
|
1775
|
+
assert model_input.prompt_adapter_mapping is not None
|
|
1776
|
+
self.set_active_prompt_adapters(
|
|
1777
|
+
model_input.prompt_adapter_requests,
|
|
1778
|
+
model_input.prompt_adapter_mapping)
|
|
1779
|
+
|
|
1780
|
+
self.attn_state.begin_forward(model_input)
|
|
1781
|
+
|
|
1782
|
+
# Currently cuda graph is only supported by the decode phase.
|
|
1783
|
+
assert model_input.attn_metadata is not None
|
|
1784
|
+
prefill_meta = model_input.attn_metadata.prefill_metadata
|
|
1785
|
+
decode_meta = model_input.attn_metadata.decode_metadata
|
|
1786
|
+
# TODO(andoorve): We can remove this once all
|
|
1787
|
+
# virtual engines share the same kv cache.
|
|
1788
|
+
virtual_engine = model_input.virtual_engine
|
|
1789
|
+
previous_hidden_states = kwargs.get("previous_hidden_states")
|
|
1790
|
+
if prefill_meta is None and decode_meta.use_cuda_graph:
|
|
1791
|
+
assert model_input.input_tokens is not None
|
|
1792
|
+
graph_batch_size = model_input.input_tokens.shape[0]
|
|
1793
|
+
use_inputs_embeds = model_input.inputs_embeds is not None
|
|
1794
|
+
model_executable = self.graph_runners[virtual_engine][(
|
|
1795
|
+
graph_batch_size, use_inputs_embeds)]
|
|
1796
|
+
if previous_hidden_states is not None:
|
|
1797
|
+
previous_hidden_states = torch.cat([
|
|
1798
|
+
previous_hidden_states,
|
|
1799
|
+
torch.empty([
|
|
1800
|
+
graph_batch_size - previous_hidden_states.shape[0],
|
|
1801
|
+
*previous_hidden_states.shape[1:]
|
|
1802
|
+
],
|
|
1803
|
+
dtype=previous_hidden_states.dtype,
|
|
1804
|
+
device=previous_hidden_states.device)
|
|
1805
|
+
])
|
|
1806
|
+
else:
|
|
1807
|
+
model_executable = self.model
|
|
1808
|
+
|
|
1809
|
+
# Receive KV cache in distributed KV cache transfer setting
|
|
1810
|
+
# In disagg prefill setting, it will also recv hidden states and bypass
|
|
1811
|
+
# model forwarding
|
|
1812
|
+
# In KV cache database setting, it will change the model input so that
|
|
1813
|
+
# we can skip prefilling on tokens that successfully received KV caches
|
|
1814
|
+
# NOTE: The receive operation is blocking
|
|
1815
|
+
bypass_model_exec = False
|
|
1816
|
+
if self.need_recv_kv(model_input, kv_caches):
|
|
1817
|
+
hidden_or_intermediate_states, bypass_model_exec, model_input = \
|
|
1818
|
+
get_kv_transfer_group().recv_kv_caches_and_hidden_states(
|
|
1819
|
+
# model is used to know which layer the current worker
|
|
1820
|
+
# is working on, so that we can receive KV for only those
|
|
1821
|
+
# layers.
|
|
1822
|
+
model_executable,
|
|
1823
|
+
model_input,
|
|
1824
|
+
kv_caches=kv_caches
|
|
1825
|
+
)
|
|
1826
|
+
|
|
1827
|
+
multi_modal_kwargs = model_input.multi_modal_kwargs or {}
|
|
1828
|
+
seqlen_agnostic_kwargs = {
|
|
1829
|
+
"finished_requests_ids": model_input.finished_requests_ids,
|
|
1830
|
+
"request_ids_to_seq_ids": model_input.request_ids_to_seq_ids,
|
|
1831
|
+
} if self.has_inner_state else {}
|
|
1832
|
+
model_kwargs = {}
|
|
1833
|
+
if previous_hidden_states is not None:
|
|
1834
|
+
model_kwargs["previous_hidden_states"] = previous_hidden_states
|
|
1835
|
+
if (self.observability_config is not None
|
|
1836
|
+
and self.observability_config.collect_model_forward_time):
|
|
1837
|
+
model_forward_start = torch.cuda.Event(enable_timing=True)
|
|
1838
|
+
model_forward_end = torch.cuda.Event(enable_timing=True)
|
|
1839
|
+
model_forward_start.record()
|
|
1840
|
+
|
|
1841
|
+
if not bypass_model_exec:
|
|
1842
|
+
with set_forward_context(model_input.attn_metadata,
|
|
1843
|
+
self.vllm_config, virtual_engine):
|
|
1844
|
+
hidden_or_intermediate_states = model_executable(
|
|
1845
|
+
input_ids=model_input.input_tokens,
|
|
1846
|
+
inputs_embeds=model_input.inputs_embeds,
|
|
1847
|
+
positions=model_input.input_positions,
|
|
1848
|
+
intermediate_tensors=intermediate_tensors,
|
|
1849
|
+
**MultiModalKwargs.as_kwargs(
|
|
1850
|
+
multi_modal_kwargs,
|
|
1851
|
+
device=self.device,
|
|
1852
|
+
),
|
|
1853
|
+
**seqlen_agnostic_kwargs,
|
|
1854
|
+
**model_kwargs,
|
|
1855
|
+
)
|
|
1856
|
+
|
|
1857
|
+
if (self.observability_config is not None
|
|
1858
|
+
and self.observability_config.collect_model_forward_time):
|
|
1859
|
+
model_forward_end.record()
|
|
1860
|
+
|
|
1861
|
+
# Sending KV cache in distributed KV cache transfer setting
|
|
1862
|
+
# NOTE: the send operation is non-blocking
|
|
1863
|
+
if self.need_send_kv(model_input, kv_caches):
|
|
1864
|
+
get_kv_transfer_group().send_kv_caches_and_hidden_states(
|
|
1865
|
+
# model_executable is used to know which layer the current
|
|
1866
|
+
# worker is working on, so that we can send KV for only those
|
|
1867
|
+
# layers.
|
|
1868
|
+
model_executable,
|
|
1869
|
+
model_input,
|
|
1870
|
+
kv_caches,
|
|
1871
|
+
hidden_or_intermediate_states,
|
|
1872
|
+
)
|
|
1873
|
+
|
|
1874
|
+
# Compute the logits in the last pipeline stage.
|
|
1875
|
+
if not get_pp_group().is_last_rank:
|
|
1876
|
+
if (self.is_driver_worker
|
|
1877
|
+
and hidden_or_intermediate_states is not None
|
|
1878
|
+
and isinstance(hidden_or_intermediate_states,
|
|
1879
|
+
IntermediateTensors)
|
|
1880
|
+
and self.observability_config is not None
|
|
1881
|
+
and self.observability_config.collect_model_forward_time):
|
|
1882
|
+
model_forward_end.synchronize()
|
|
1883
|
+
model_forward_time = model_forward_start.elapsed_time(
|
|
1884
|
+
model_forward_end)
|
|
1885
|
+
orig_model_forward_time = 0.0
|
|
1886
|
+
if intermediate_tensors is not None:
|
|
1887
|
+
orig_model_forward_time = intermediate_tensors.tensors.get(
|
|
1888
|
+
"model_forward_time", torch.tensor(0.0)).item()
|
|
1889
|
+
hidden_or_intermediate_states.tensors["model_forward_time"] = (
|
|
1890
|
+
torch.tensor(model_forward_time + orig_model_forward_time))
|
|
1891
|
+
return hidden_or_intermediate_states
|
|
1892
|
+
|
|
1893
|
+
logits = self.model.compute_logits(hidden_or_intermediate_states,
|
|
1894
|
+
model_input.sampling_metadata)
|
|
1895
|
+
|
|
1896
|
+
if self.is_driver_worker:
|
|
1897
|
+
if model_input.async_callback is not None:
|
|
1898
|
+
model_input.async_callback()
|
|
1899
|
+
|
|
1900
|
+
# Sample the next token.
|
|
1901
|
+
assert isinstance(self.sampler, Sampler)
|
|
1902
|
+
orig_include_gpu_probs = self.sampler.include_gpu_probs_tensor
|
|
1903
|
+
if model_input.inputs_embeds is not None:
|
|
1904
|
+
self.sampler.include_gpu_probs_tensor = True
|
|
1905
|
+
|
|
1906
|
+
output: SamplerOutput = self.sampler(
|
|
1907
|
+
logits=logits,
|
|
1908
|
+
sampling_metadata=model_input.sampling_metadata,
|
|
1909
|
+
)
|
|
1910
|
+
if (self.observability_config is not None
|
|
1911
|
+
and self.observability_config.collect_model_forward_time
|
|
1912
|
+
and output is not None):
|
|
1913
|
+
model_forward_end.synchronize()
|
|
1914
|
+
model_forward_time = model_forward_start.elapsed_time(
|
|
1915
|
+
model_forward_end)
|
|
1916
|
+
orig_model_forward_time = 0.0
|
|
1917
|
+
if intermediate_tensors is not None:
|
|
1918
|
+
orig_model_forward_time = intermediate_tensors.tensors.get(
|
|
1919
|
+
"model_forward_time", torch.tensor(0.0)).item()
|
|
1920
|
+
# If there are multiple workers, we are still tracking the
|
|
1921
|
+
# latency from the start time of the driver worker to the end
|
|
1922
|
+
# time of the driver worker. The model forward time will then
|
|
1923
|
+
# end up covering the communication time as well.
|
|
1924
|
+
output.model_forward_time = (orig_model_forward_time +
|
|
1925
|
+
model_forward_time)
|
|
1926
|
+
|
|
1927
|
+
if model_input.inputs_embeds is not None:
|
|
1928
|
+
if self.is_driver_worker:
|
|
1929
|
+
sampled = broadcast_tensor_dict(
|
|
1930
|
+
{"token_ids": output.sampled_token_ids})
|
|
1931
|
+
else:
|
|
1932
|
+
sampled = broadcast_tensor_dict()
|
|
1933
|
+
if sampled["token_ids"] is not None:
|
|
1934
|
+
sampled_token_embeds = self.model.get_input_embeddings(
|
|
1935
|
+
sampled["token_ids"].squeeze(1))
|
|
1936
|
+
if self.is_driver_worker:
|
|
1937
|
+
self.sampler.include_gpu_probs_tensor = \
|
|
1938
|
+
orig_include_gpu_probs
|
|
1939
|
+
|
|
1940
|
+
output.sampled_token_embeds = sampled_token_embeds
|
|
1941
|
+
|
|
1942
|
+
for token_embed, sequence_group_output in zip(
|
|
1943
|
+
output.sampled_token_embeds, output.outputs):
|
|
1944
|
+
assert len(sequence_group_output.samples) == 1
|
|
1945
|
+
sequence_group_output.samples[
|
|
1946
|
+
0].output_embed = token_embed
|
|
1947
|
+
|
|
1948
|
+
if not self.is_driver_worker:
|
|
1949
|
+
return []
|
|
1950
|
+
|
|
1951
|
+
if self.return_hidden_states:
|
|
1952
|
+
# we only need to pass hidden states of most recent token
|
|
1953
|
+
assert model_input.sampling_metadata is not None
|
|
1954
|
+
indices = model_input.sampling_metadata.selected_token_indices
|
|
1955
|
+
if model_input.is_prompt:
|
|
1956
|
+
hidden_states = hidden_or_intermediate_states.index_select(
|
|
1957
|
+
0, indices)
|
|
1958
|
+
output.prefill_hidden_states = hidden_or_intermediate_states
|
|
1959
|
+
elif decode_meta.use_cuda_graph:
|
|
1960
|
+
hidden_states = hidden_or_intermediate_states[:len(indices)]
|
|
1961
|
+
else:
|
|
1962
|
+
hidden_states = hidden_or_intermediate_states
|
|
1963
|
+
|
|
1964
|
+
output.hidden_states = hidden_states
|
|
1965
|
+
|
|
1966
|
+
return [output]
|
|
1967
|
+
|
|
1968
|
+
def need_recv_kv(self, model_input, kv_caches) -> bool:
|
|
1969
|
+
"""Check if we need to receive kv-cache from the other worker.
|
|
1970
|
+
We need to receive KV when
|
|
1971
|
+
1. current vLLM instance is KV cache consumer/decode vLLM instance
|
|
1972
|
+
2. this batch is not a profiling run
|
|
1973
|
+
3. this batch is a prefill run
|
|
1974
|
+
|
|
1975
|
+
Args:
|
|
1976
|
+
model_input: input to the model executable
|
|
1977
|
+
kv_caches: vLLM's paged memory
|
|
1978
|
+
"""
|
|
1979
|
+
|
|
1980
|
+
if self.vllm_config.kv_transfer_config is None:
|
|
1981
|
+
return False
|
|
1982
|
+
|
|
1983
|
+
prefill_meta = model_input.attn_metadata.prefill_metadata
|
|
1984
|
+
|
|
1985
|
+
# check if the current run is profiling
|
|
1986
|
+
is_profile_run = (kv_caches[0].numel() == 0)
|
|
1987
|
+
# check if the current run is prefill
|
|
1988
|
+
is_prefill_run = prefill_meta is not None
|
|
1989
|
+
|
|
1990
|
+
return self.vllm_config.kv_transfer_config.is_kv_consumer and (
|
|
1991
|
+
not is_profile_run) and is_prefill_run
|
|
1992
|
+
|
|
1993
|
+
def need_send_kv(self, model_input, kv_caches) -> bool:
|
|
1994
|
+
"""Check if we need to send kv-cache to the other worker.
|
|
1995
|
+
We need to send KV when
|
|
1996
|
+
1. current vLLM instance is KV cache producer/prefill vLLM instance
|
|
1997
|
+
2. this batch is not a profiling run
|
|
1998
|
+
3. this batch is a prefill run
|
|
1999
|
+
|
|
2000
|
+
Args:
|
|
2001
|
+
model_input: input to the model executable
|
|
2002
|
+
kv_caches: vLLM's paged memory
|
|
2003
|
+
"""
|
|
2004
|
+
|
|
2005
|
+
if self.vllm_config.kv_transfer_config is None:
|
|
2006
|
+
return False
|
|
2007
|
+
|
|
2008
|
+
prefill_meta = model_input.attn_metadata.prefill_metadata
|
|
2009
|
+
|
|
2010
|
+
# check if the current run is profiling
|
|
2011
|
+
is_profile_run = (kv_caches[0].numel() == 0)
|
|
2012
|
+
# check if the current run is prefill
|
|
2013
|
+
is_prefill_run = prefill_meta is not None
|
|
2014
|
+
|
|
2015
|
+
return self.vllm_config.kv_transfer_config.is_kv_producer and (
|
|
2016
|
+
not is_profile_run) and is_prefill_run
|
|
2017
|
+
|
|
2018
|
+
|
|
2019
|
+
# NOTE: this is nn.Module so the profiler can properly capture/group
|
|
2020
|
+
# kernels calls made within the graph
|
|
2021
|
+
class CUDAGraphRunner(nn.Module):
|
|
2022
|
+
|
|
2023
|
+
def __init__(self, model: nn.Module, backend_name: str,
|
|
2024
|
+
attn_state: AttentionState, is_encoder_decoder_model: bool):
|
|
2025
|
+
super().__init__()
|
|
2026
|
+
self.model = model
|
|
2027
|
+
self.backend_name = backend_name
|
|
2028
|
+
self.attn_state = attn_state
|
|
2029
|
+
|
|
2030
|
+
self.input_buffers: Dict[str, torch.Tensor] = {}
|
|
2031
|
+
self.output_buffers: Dict[str, torch.Tensor] = {}
|
|
2032
|
+
|
|
2033
|
+
self._graph: Optional[torch.cuda.CUDAGraph] = None
|
|
2034
|
+
self._is_encoder_decoder_model = is_encoder_decoder_model
|
|
2035
|
+
|
|
2036
|
+
@property
|
|
2037
|
+
def graph(self):
|
|
2038
|
+
assert self._graph is not None
|
|
2039
|
+
return self._graph
|
|
2040
|
+
|
|
2041
|
+
def capture(
|
|
2042
|
+
self,
|
|
2043
|
+
input_ids: torch.Tensor,
|
|
2044
|
+
inputs_embeds: Optional[torch.Tensor],
|
|
2045
|
+
positions: torch.Tensor,
|
|
2046
|
+
intermediate_inputs: Optional[IntermediateTensors],
|
|
2047
|
+
kv_caches: List[torch.Tensor],
|
|
2048
|
+
attn_metadata: AttentionMetadata,
|
|
2049
|
+
memory_pool: Optional[Tuple[int, int]],
|
|
2050
|
+
stream: torch.cuda.Stream,
|
|
2051
|
+
**kwargs,
|
|
2052
|
+
):
|
|
2053
|
+
assert self._graph is None
|
|
2054
|
+
# Run the model a few times without capturing the graph.
|
|
2055
|
+
# This is to make sure that the captured graph does not include the
|
|
2056
|
+
# kernel launches for initial benchmarking (e.g., Triton autotune).
|
|
2057
|
+
# Note one iteration is not enough for torch.compile
|
|
2058
|
+
for _ in range(_NUM_WARMUP_ITERS):
|
|
2059
|
+
self.model(
|
|
2060
|
+
input_ids=input_ids,
|
|
2061
|
+
inputs_embeds=inputs_embeds,
|
|
2062
|
+
positions=positions,
|
|
2063
|
+
intermediate_tensors=intermediate_inputs,
|
|
2064
|
+
**kwargs,
|
|
2065
|
+
)
|
|
2066
|
+
# Wait for the warm up operations to finish before proceeding with
|
|
2067
|
+
# Graph Capture.
|
|
2068
|
+
torch.cuda.synchronize()
|
|
2069
|
+
# Capture the graph.
|
|
2070
|
+
self._graph = torch.cuda.CUDAGraph()
|
|
2071
|
+
with torch.cuda.graph(self._graph, pool=memory_pool, stream=stream):
|
|
2072
|
+
output_hidden_or_intermediate_states = self.model(
|
|
2073
|
+
input_ids=input_ids,
|
|
2074
|
+
**({
|
|
2075
|
+
"inputs_embeds": inputs_embeds,
|
|
2076
|
+
} if inputs_embeds is not None else {}),
|
|
2077
|
+
positions=positions,
|
|
2078
|
+
intermediate_tensors=intermediate_inputs,
|
|
2079
|
+
**kwargs,
|
|
2080
|
+
)
|
|
2081
|
+
|
|
2082
|
+
if isinstance(output_hidden_or_intermediate_states, torch.Tensor):
|
|
2083
|
+
hidden_or_intermediate_states = weak_ref_tensor(
|
|
2084
|
+
output_hidden_or_intermediate_states)
|
|
2085
|
+
elif isinstance(output_hidden_or_intermediate_states,
|
|
2086
|
+
IntermediateTensors):
|
|
2087
|
+
hidden_or_intermediate_states = IntermediateTensors(
|
|
2088
|
+
tensors={
|
|
2089
|
+
key: weak_ref_tensor(value)
|
|
2090
|
+
for key, value in
|
|
2091
|
+
output_hidden_or_intermediate_states.tensors.items()
|
|
2092
|
+
})
|
|
2093
|
+
|
|
2094
|
+
del output_hidden_or_intermediate_states
|
|
2095
|
+
# make sure `output_hidden_or_intermediate_states` is deleted
|
|
2096
|
+
# in the graph's memory pool
|
|
2097
|
+
gc.collect()
|
|
2098
|
+
torch.cuda.synchronize()
|
|
2099
|
+
|
|
2100
|
+
# Save the input and output buffers.
|
|
2101
|
+
self.input_buffers = {
|
|
2102
|
+
"input_ids":
|
|
2103
|
+
input_ids,
|
|
2104
|
+
**({
|
|
2105
|
+
"inputs_embeds": inputs_embeds,
|
|
2106
|
+
} if inputs_embeds is not None else {}),
|
|
2107
|
+
"positions":
|
|
2108
|
+
positions,
|
|
2109
|
+
"kv_caches":
|
|
2110
|
+
kv_caches,
|
|
2111
|
+
**self.attn_state.get_graph_input_buffers(
|
|
2112
|
+
attn_metadata, self._is_encoder_decoder_model),
|
|
2113
|
+
**kwargs,
|
|
2114
|
+
}
|
|
2115
|
+
if intermediate_inputs is not None:
|
|
2116
|
+
self.input_buffers.update(intermediate_inputs.tensors)
|
|
2117
|
+
if get_pp_group().is_last_rank:
|
|
2118
|
+
self.output_buffers = {
|
|
2119
|
+
"hidden_states": hidden_or_intermediate_states
|
|
2120
|
+
}
|
|
2121
|
+
else:
|
|
2122
|
+
self.output_buffers = hidden_or_intermediate_states
|
|
2123
|
+
|
|
2124
|
+
def forward(
|
|
2125
|
+
self,
|
|
2126
|
+
input_ids: torch.Tensor,
|
|
2127
|
+
inputs_embeds: Optional[torch.Tensor],
|
|
2128
|
+
positions: torch.Tensor,
|
|
2129
|
+
intermediate_tensors: Optional[IntermediateTensors],
|
|
2130
|
+
**kwargs,
|
|
2131
|
+
) -> torch.Tensor:
|
|
2132
|
+
attn_metadata: AttentionMetadata = get_forward_context().attn_metadata
|
|
2133
|
+
|
|
2134
|
+
# Copy the input tensors to the input buffers.
|
|
2135
|
+
self.input_buffers["input_ids"].copy_(input_ids, non_blocking=True)
|
|
2136
|
+
if positions is not None:
|
|
2137
|
+
# in some case like MLA, it will reuse positions in metadata
|
|
2138
|
+
# but truncate them to the original size
|
|
2139
|
+
# so the shape is not padded, we need to copy partial only
|
|
2140
|
+
self.input_buffers["positions"][:positions.shape[0]].copy_(
|
|
2141
|
+
positions, non_blocking=True)
|
|
2142
|
+
if inputs_embeds is not None:
|
|
2143
|
+
self.input_buffers["inputs_embeds"][:inputs_embeds.shape[0]].copy_(
|
|
2144
|
+
inputs_embeds, non_blocking=True)
|
|
2145
|
+
|
|
2146
|
+
if self.backend_name != "NO_ATTENTION":
|
|
2147
|
+
self.input_buffers["slot_mapping"].copy_(
|
|
2148
|
+
attn_metadata.slot_mapping, non_blocking=True)
|
|
2149
|
+
|
|
2150
|
+
self.attn_state.prepare_graph_input_buffers(
|
|
2151
|
+
self.input_buffers, attn_metadata, self._is_encoder_decoder_model)
|
|
2152
|
+
|
|
2153
|
+
if "seqlen_agnostic_capture_inputs" in self.input_buffers:
|
|
2154
|
+
self.model.copy_inputs_before_cuda_graphs(self.input_buffers,
|
|
2155
|
+
**kwargs)
|
|
2156
|
+
|
|
2157
|
+
if "previous_hidden_states" in self.input_buffers:
|
|
2158
|
+
self.input_buffers["previous_hidden_states"].copy_(
|
|
2159
|
+
kwargs["previous_hidden_states"], non_blocking=True)
|
|
2160
|
+
|
|
2161
|
+
if intermediate_tensors is not None:
|
|
2162
|
+
for key in intermediate_tensors.tensors:
|
|
2163
|
+
if key != "model_execute_time" and key != "model_forward_time":
|
|
2164
|
+
self.input_buffers[key].copy_(intermediate_tensors[key],
|
|
2165
|
+
non_blocking=True)
|
|
2166
|
+
if self._is_encoder_decoder_model:
|
|
2167
|
+
self.input_buffers["encoder_input_ids"].copy_(
|
|
2168
|
+
kwargs['encoder_input_ids'], non_blocking=True)
|
|
2169
|
+
self.input_buffers["encoder_positions"].copy_(
|
|
2170
|
+
kwargs['encoder_positions'], non_blocking=True)
|
|
2171
|
+
|
|
2172
|
+
# Run the graph.
|
|
2173
|
+
self.graph.replay()
|
|
2174
|
+
# Return the output tensor.
|
|
2175
|
+
if get_pp_group().is_last_rank:
|
|
2176
|
+
return self.output_buffers["hidden_states"]
|
|
2177
|
+
|
|
2178
|
+
return self.output_buffers
|