vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +53 -0
- vllm/_custom_ops.py +1828 -0
- vllm/_ipex_ops.py +244 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +34 -0
- vllm/assets/video.py +115 -0
- vllm/attention/__init__.py +20 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +308 -0
- vllm/attention/backends/blocksparse_attn.py +461 -0
- vllm/attention/backends/cpu_mla.py +307 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
- vllm/attention/backends/flash_attn.py +1003 -0
- vllm/attention/backends/flashinfer.py +1104 -0
- vllm/attention/backends/flashmla.py +244 -0
- vllm/attention/backends/hpu_attn.py +313 -0
- vllm/attention/backends/ipex_attn.py +398 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1385 -0
- vllm/attention/backends/pallas.py +351 -0
- vllm/attention/backends/placeholder_attn.py +400 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +975 -0
- vllm/attention/backends/torch_sdpa.py +703 -0
- vllm/attention/backends/triton_mla.py +115 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +802 -0
- vllm/attention/layer.py +468 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
- vllm/attention/ops/blocksparse_attention/interface.py +239 -0
- vllm/attention/ops/blocksparse_attention/utils.py +246 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
- vllm/attention/ops/flashmla.py +116 -0
- vllm/attention/ops/hpu_paged_attn.py +88 -0
- vllm/attention/ops/ipex_attn.py +195 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/nki_flash_attn.py +906 -0
- vllm/attention/ops/paged_attn.py +256 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +100 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +674 -0
- vllm/attention/ops/triton_flash_attention.py +979 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +334 -0
- vllm/attention/selector.py +187 -0
- vllm/attention/utils/fa_utils.py +55 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +1185 -0
- vllm/benchmarks/endpoint_request_func.py +381 -0
- vllm/benchmarks/latency.py +168 -0
- vllm/benchmarks/serve.py +1135 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +70 -0
- vllm/collect_env.py +820 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +89 -0
- vllm/compilation/backends.py +563 -0
- vllm/compilation/base_piecewise_backend.py +72 -0
- vllm/compilation/collective_fusion.py +127 -0
- vllm/compilation/compiler_interface.py +544 -0
- vllm/compilation/counter.py +38 -0
- vllm/compilation/cuda_piecewise_backend.py +214 -0
- vllm/compilation/decorators.py +250 -0
- vllm/compilation/fix_functionalization.py +191 -0
- vllm/compilation/fusion.py +618 -0
- vllm/compilation/fx_utils.py +62 -0
- vllm/compilation/inductor_pass.py +115 -0
- vllm/compilation/monitor.py +39 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +137 -0
- vllm/compilation/pass_manager.py +78 -0
- vllm/compilation/sequence_parallelism.py +268 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +67 -0
- vllm/compilation/wrapper.py +135 -0
- vllm/config.py +4746 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +441 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +521 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +135 -0
- vllm/core/placeholder_block_space_manager.py +100 -0
- vllm/core/scheduler.py +2093 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +281 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +264 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +145 -0
- vllm/distributed/device_communicators/cuda_communicator.py +176 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
- vllm/distributed/device_communicators/hpu_communicator.py +46 -0
- vllm/distributed/device_communicators/neuron_communicator.py +20 -0
- vllm/distributed/device_communicators/pynccl.py +218 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/tpu_communicator.py +103 -0
- vllm/distributed/device_communicators/xpu_communicator.py +55 -0
- vllm/distributed/kv_events.py +356 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +12 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
- vllm/distributed/parallel_state.py +1296 -0
- vllm/distributed/tpu_distributed_utils.py +177 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1708 -0
- vllm/engine/async_llm_engine.py +1200 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +2097 -0
- vllm/engine/metrics.py +629 -0
- vllm/engine/metrics_types.py +94 -0
- vllm/engine/multiprocessing/__init__.py +148 -0
- vllm/engine/multiprocessing/client.py +681 -0
- vllm/engine/multiprocessing/engine.py +460 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +75 -0
- vllm/engine/output_processor/multi_step.py +216 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +317 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1299 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +39 -0
- vllm/entrypoints/cli/benchmark/latency.py +30 -0
- vllm/entrypoints/cli/benchmark/main.py +54 -0
- vllm/entrypoints/cli/benchmark/serve.py +30 -0
- vllm/entrypoints/cli/benchmark/throughput.py +30 -0
- vllm/entrypoints/cli/collect_env.py +35 -0
- vllm/entrypoints/cli/main.py +65 -0
- vllm/entrypoints/cli/openai.py +205 -0
- vllm/entrypoints/cli/run_batch.py +62 -0
- vllm/entrypoints/cli/serve.py +328 -0
- vllm/entrypoints/cli/types.py +25 -0
- vllm/entrypoints/launcher.py +147 -0
- vllm/entrypoints/llm.py +1544 -0
- vllm/entrypoints/logger.py +50 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1387 -0
- vllm/entrypoints/openai/cli_args.py +315 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +1913 -0
- vllm/entrypoints/openai/run_batch.py +463 -0
- vllm/entrypoints/openai/serving_chat.py +1221 -0
- vllm/entrypoints/openai/serving_classification.py +160 -0
- vllm/entrypoints/openai/serving_completion.py +592 -0
- vllm/entrypoints/openai/serving_embedding.py +201 -0
- vllm/entrypoints/openai/serving_engine.py +986 -0
- vllm/entrypoints/openai/serving_models.py +315 -0
- vllm/entrypoints/openai/serving_pooling.py +232 -0
- vllm/entrypoints/openai/serving_score.py +433 -0
- vllm/entrypoints/openai/serving_tokenization.py +157 -0
- vllm/entrypoints/openai/serving_transcription.py +424 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/score_utils.py +50 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/utils.py +233 -0
- vllm/env_override.py +41 -0
- vllm/envs.py +944 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +401 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +30 -0
- vllm/executor/multiproc_worker_utils.py +313 -0
- vllm/executor/ray_distributed_executor.py +701 -0
- vllm/executor/ray_utils.py +399 -0
- vllm/executor/uniproc_executor.py +139 -0
- vllm/forward_context.py +179 -0
- vllm/inputs/__init__.py +41 -0
- vllm/inputs/data.py +331 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +909 -0
- vllm/inputs/registry.py +237 -0
- vllm/jsontree.py +80 -0
- vllm/logger.py +212 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +85 -0
- vllm/logging_utils/formatter.py +18 -0
- vllm/logits_process.py +119 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +355 -0
- vllm/lora/layers.py +1285 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +818 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
- vllm/lora/ops/triton_ops/utils.py +120 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +136 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +485 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +290 -0
- vllm/lora/punica_wrapper/punica_hpu.py +145 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +405 -0
- vllm/lora/punica_wrapper/utils.py +164 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +240 -0
- vllm/lora/worker_manager.py +259 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +152 -0
- vllm/model_executor/guided_decoding/__init__.py +181 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
- vllm/model_executor/guided_decoding/guided_fields.py +41 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
- vllm/model_executor/guided_decoding/utils.py +242 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +369 -0
- vllm/model_executor/layers/fused_moe/__init__.py +54 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
- vllm/model_executor/layers/fused_moe/layer.py +1535 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/utils.py +98 -0
- vllm/model_executor/layers/layernorm.py +288 -0
- vllm/model_executor/layers/lightning_attn.py +652 -0
- vllm/model_executor/layers/linear.py +1524 -0
- vllm/model_executor/layers/logits_processor.py +197 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
- vllm/model_executor/layers/pooler.py +350 -0
- vllm/model_executor/layers/quantization/__init__.py +157 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/auto_round.py +310 -0
- vllm/model_executor/layers/quantization/awq.py +194 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +151 -0
- vllm/model_executor/layers/quantization/bitblas.py +461 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
- vllm/model_executor/layers/quantization/experts_int8.py +196 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +906 -0
- vllm/model_executor/layers/quantization/gguf.py +565 -0
- vllm/model_executor/layers/quantization/gptq.py +278 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/marlin.py +261 -0
- vllm/model_executor/layers/quantization/modelopt.py +737 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
- vllm/model_executor/layers/quantization/qqq.py +275 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +441 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +161 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
- vllm/model_executor/layers/rejection_sampler.py +406 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding.py +1862 -0
- vllm/model_executor/layers/sampler.py +1204 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
- vllm/model_executor/layers/utils.py +95 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +76 -0
- vllm/model_executor/model_loader/base_loader.py +43 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
- vllm/model_executor/model_loader/default_loader.py +282 -0
- vllm/model_executor/model_loader/dummy_loader.py +27 -0
- vllm/model_executor/model_loader/gguf_loader.py +120 -0
- vllm/model_executor/model_loader/neuron.py +476 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
- vllm/model_executor/model_loader/tensorizer.py +600 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
- vllm/model_executor/model_loader/tpu.py +112 -0
- vllm/model_executor/model_loader/utils.py +302 -0
- vllm/model_executor/model_loader/weight_utils.py +782 -0
- vllm/model_executor/models/__init__.py +28 -0
- vllm/model_executor/models/adapters.py +248 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +657 -0
- vllm/model_executor/models/aya_vision.py +466 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bamba.py +543 -0
- vllm/model_executor/models/bart.py +938 -0
- vllm/model_executor/models/bert.py +523 -0
- vllm/model_executor/models/bert_with_rope.py +769 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +718 -0
- vllm/model_executor/models/bloom.py +373 -0
- vllm/model_executor/models/chameleon.py +1136 -0
- vllm/model_executor/models/chatglm.py +478 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/commandr.py +472 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +472 -0
- vllm/model_executor/models/deepseek.py +486 -0
- vllm/model_executor/models/deepseek_mtp.py +269 -0
- vllm/model_executor/models/deepseek_v2.py +843 -0
- vllm/model_executor/models/deepseek_vl2.py +648 -0
- vllm/model_executor/models/eagle.py +260 -0
- vllm/model_executor/models/exaone.py +551 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +510 -0
- vllm/model_executor/models/falcon_h1.py +685 -0
- vllm/model_executor/models/florence2.py +1103 -0
- vllm/model_executor/models/fuyu.py +389 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +425 -0
- vllm/model_executor/models/gemma3.py +533 -0
- vllm/model_executor/models/gemma3_mm.py +709 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4v.py +648 -0
- vllm/model_executor/models/gpt2.py +328 -0
- vllm/model_executor/models/gpt_bigcode.py +335 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +332 -0
- vllm/model_executor/models/granite.py +493 -0
- vllm/model_executor/models/granite_speech.py +779 -0
- vllm/model_executor/models/granitemoe.py +437 -0
- vllm/model_executor/models/granitemoehybrid.py +586 -0
- vllm/model_executor/models/granitemoeshared.py +341 -0
- vllm/model_executor/models/gritlm.py +224 -0
- vllm/model_executor/models/grok1.py +546 -0
- vllm/model_executor/models/h2ovl.py +546 -0
- vllm/model_executor/models/idefics2_vision_model.py +389 -0
- vllm/model_executor/models/idefics3.py +776 -0
- vllm/model_executor/models/interfaces.py +572 -0
- vllm/model_executor/models/interfaces_base.py +164 -0
- vllm/model_executor/models/intern_vit.py +480 -0
- vllm/model_executor/models/internlm2.py +455 -0
- vllm/model_executor/models/internlm2_ve.py +147 -0
- vllm/model_executor/models/internvl.py +1418 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +592 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/llama.py +644 -0
- vllm/model_executor/models/llama4.py +532 -0
- vllm/model_executor/models/llama_eagle.py +165 -0
- vllm/model_executor/models/llama_eagle3.py +263 -0
- vllm/model_executor/models/llava.py +866 -0
- vllm/model_executor/models/llava_next.py +586 -0
- vllm/model_executor/models/llava_next_video.py +471 -0
- vllm/model_executor/models/llava_onevision.py +956 -0
- vllm/model_executor/models/mamba.py +273 -0
- vllm/model_executor/models/mamba2.py +308 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/mimo.py +192 -0
- vllm/model_executor/models/mimo_mtp.py +285 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +759 -0
- vllm/model_executor/models/minicpmv.py +1287 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1301 -0
- vllm/model_executor/models/minimax_vl_01.py +364 -0
- vllm/model_executor/models/mistral3.py +604 -0
- vllm/model_executor/models/mixtral.py +488 -0
- vllm/model_executor/models/mixtral_quant.py +453 -0
- vllm/model_executor/models/mllama.py +1624 -0
- vllm/model_executor/models/mllama4.py +938 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +331 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1568 -0
- vllm/model_executor/models/moonvit.py +630 -0
- vllm/model_executor/models/mpt.py +331 -0
- vllm/model_executor/models/nemotron.py +508 -0
- vllm/model_executor/models/nemotron_h.py +573 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +389 -0
- vllm/model_executor/models/olmo2.py +414 -0
- vllm/model_executor/models/olmoe.py +468 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +349 -0
- vllm/model_executor/models/ovis.py +567 -0
- vllm/model_executor/models/paligemma.py +398 -0
- vllm/model_executor/models/persimmon.py +344 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3_small.py +465 -0
- vllm/model_executor/models/phi3v.py +723 -0
- vllm/model_executor/models/phi4mm.py +1246 -0
- vllm/model_executor/models/phi4mm_audio.py +1233 -0
- vllm/model_executor/models/phi4mm_utils.py +1884 -0
- vllm/model_executor/models/phimoe.py +665 -0
- vllm/model_executor/models/pixtral.py +1316 -0
- vllm/model_executor/models/plamo2.py +738 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
- vllm/model_executor/models/qwen.py +362 -0
- vllm/model_executor/models/qwen2.py +497 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
- vllm/model_executor/models/qwen2_5_vl.py +1166 -0
- vllm/model_executor/models/qwen2_audio.py +410 -0
- vllm/model_executor/models/qwen2_moe.py +540 -0
- vllm/model_executor/models/qwen2_rm.py +132 -0
- vllm/model_executor/models/qwen2_vl.py +1405 -0
- vllm/model_executor/models/qwen3.py +321 -0
- vllm/model_executor/models/qwen3_moe.py +535 -0
- vllm/model_executor/models/qwen_vl.py +785 -0
- vllm/model_executor/models/registry.py +622 -0
- vllm/model_executor/models/roberta.py +276 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/skyworkr1v.py +951 -0
- vllm/model_executor/models/smolvlm.py +52 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +343 -0
- vllm/model_executor/models/starcoder2.py +356 -0
- vllm/model_executor/models/tarsier.py +643 -0
- vllm/model_executor/models/telechat2.py +140 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/transformers.py +508 -0
- vllm/model_executor/models/ultravox.py +656 -0
- vllm/model_executor/models/utils.py +731 -0
- vllm/model_executor/models/vision.py +147 -0
- vllm/model_executor/models/whisper.py +747 -0
- vllm/model_executor/models/zamba2.py +1009 -0
- vllm/model_executor/parameter.py +459 -0
- vllm/model_executor/pooling_metadata.py +72 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +77 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +106 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/hasher.py +118 -0
- vllm/multimodal/image.py +97 -0
- vllm/multimodal/inputs.py +876 -0
- vllm/multimodal/parse.py +461 -0
- vllm/multimodal/processing.py +1895 -0
- vllm/multimodal/profiling.py +258 -0
- vllm/multimodal/registry.py +331 -0
- vllm/multimodal/utils.py +436 -0
- vllm/multimodal/video.py +198 -0
- vllm/outputs.py +512 -0
- vllm/platforms/__init__.py +291 -0
- vllm/platforms/cpu.py +266 -0
- vllm/platforms/cuda.py +526 -0
- vllm/platforms/hpu.py +106 -0
- vllm/platforms/interface.py +538 -0
- vllm/platforms/neuron.py +150 -0
- vllm/platforms/rocm.py +435 -0
- vllm/platforms/tpu.py +216 -0
- vllm/platforms/xpu.py +156 -0
- vllm/plugins/__init__.py +94 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +54 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +83 -0
- vllm/prompt_adapter/models.py +358 -0
- vllm/prompt_adapter/request.py +37 -0
- vllm/prompt_adapter/utils.py +98 -0
- vllm/prompt_adapter/worker_manager.py +179 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +15 -0
- vllm/reasoning/abs_reasoning_parsers.py +192 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/sampling_params.py +602 -0
- vllm/scalar_type.py +347 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1568 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +506 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +99 -0
- vllm/spec_decode/medusa_worker.py +138 -0
- vllm/spec_decode/metrics.py +213 -0
- vllm/spec_decode/mlp_speculator_worker.py +94 -0
- vllm/spec_decode/mqa_scorer.py +160 -0
- vllm/spec_decode/multi_step_worker.py +423 -0
- vllm/spec_decode/ngram_worker.py +196 -0
- vllm/spec_decode/proposer_worker_base.py +59 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
- vllm/spec_decode/spec_decode_worker.py +1326 -0
- vllm/spec_decode/target_model_runner.py +45 -0
- vllm/spec_decode/top1_proposer.py +275 -0
- vllm/spec_decode/util.py +277 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +131 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +60 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +887 -0
- vllm/transformers_utils/configs/__init__.py +61 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/cohere2.py +195 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +85 -0
- vllm/transformers_utils/configs/exaone.py +190 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/h2ovl.py +16 -0
- vllm/transformers_utils/configs/internvl.py +54 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/minimax_text_01.py +70 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
- vllm/transformers_utils/configs/mllama.py +31 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/mpt.py +180 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +258 -0
- vllm/transformers_utils/configs/nvlm_d.py +15 -0
- vllm/transformers_utils/configs/ovis.py +184 -0
- vllm/transformers_utils/configs/skyworkr1v.py +54 -0
- vllm/transformers_utils/configs/solar.py +247 -0
- vllm/transformers_utils/configs/telechat2.py +64 -0
- vllm/transformers_utils/configs/ultravox.py +108 -0
- vllm/transformers_utils/detokenizer.py +168 -0
- vllm/transformers_utils/detokenizer_utils.py +189 -0
- vllm/transformers_utils/processor.py +221 -0
- vllm/transformers_utils/processors/__init__.py +8 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/s3_utils.py +162 -0
- vllm/transformers_utils/tokenizer.py +302 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +120 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +493 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +14 -0
- vllm/triton_utils/importing.py +50 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +256 -0
- vllm/utils.py +2910 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +163 -0
- vllm/v1/attention/backends/flash_attn.py +869 -0
- vllm/v1/attention/backends/flashinfer.py +651 -0
- vllm/v1/attention/backends/flex_attention.py +477 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +931 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
- vllm/v1/attention/backends/mla/flashmla.py +152 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
- vllm/v1/attention/backends/mla/triton_mla.py +120 -0
- vllm/v1/attention/backends/pallas.py +240 -0
- vllm/v1/attention/backends/triton_attn.py +285 -0
- vllm/v1/attention/backends/utils.py +52 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +349 -0
- vllm/v1/core/encoder_cache_manager.py +150 -0
- vllm/v1/core/kv_cache_coordinator.py +363 -0
- vllm/v1/core/kv_cache_manager.py +392 -0
- vllm/v1/core/kv_cache_utils.py +996 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +150 -0
- vllm/v1/core/sched/output.py +154 -0
- vllm/v1/core/sched/scheduler.py +1044 -0
- vllm/v1/core/sched/utils.py +23 -0
- vllm/v1/core/single_type_kv_cache_manager.py +403 -0
- vllm/v1/engine/__init__.py +173 -0
- vllm/v1/engine/async_llm.py +558 -0
- vllm/v1/engine/coordinator.py +253 -0
- vllm/v1/engine/core.py +961 -0
- vllm/v1/engine/core_client.py +1129 -0
- vllm/v1/engine/detokenizer.py +261 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +317 -0
- vllm/v1/engine/logprobs.py +199 -0
- vllm/v1/engine/mm_input_cache.py +91 -0
- vllm/v1/engine/output_processor.py +428 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +407 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +113 -0
- vllm/v1/executor/multiproc_executor.py +537 -0
- vllm/v1/executor/ray_distributed_executor.py +62 -0
- vllm/v1/kv_cache_interface.py +194 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +523 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +131 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +239 -0
- vllm/v1/outputs.py +116 -0
- vllm/v1/request.py +193 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/penalties.py +59 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +286 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +145 -0
- vllm/v1/serial_utils.py +315 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +432 -0
- vllm/v1/spec_decode/medusa.py +62 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +178 -0
- vllm/v1/spec_decode/ngram_proposer.py +132 -0
- vllm/v1/spec_decode/utils.py +46 -0
- vllm/v1/structured_output/__init__.py +222 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +318 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +175 -0
- vllm/v1/utils.py +743 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +142 -0
- vllm/v1/worker/cpu_model_runner.py +86 -0
- vllm/v1/worker/cpu_worker.py +152 -0
- vllm/v1/worker/gpu_input_batch.py +681 -0
- vllm/v1/worker/gpu_model_runner.py +2320 -0
- vllm/v1/worker/gpu_worker.py +393 -0
- vllm/v1/worker/lora_model_runner_mixin.py +173 -0
- vllm/v1/worker/tpu_model_runner.py +1673 -0
- vllm/v1/worker/tpu_worker.py +299 -0
- vllm/v1/worker/utils.py +111 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +450 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2320 -0
- vllm/worker/hpu_worker.py +484 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +282 -0
- vllm/worker/multi_step_hpu_worker.py +123 -0
- vllm/worker/multi_step_model_runner.py +911 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +108 -0
- vllm/worker/multi_step_worker.py +197 -0
- vllm/worker/neuron_model_runner.py +460 -0
- vllm/worker/neuron_worker.py +193 -0
- vllm/worker/neuronx_distributed_model_runner.py +294 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +909 -0
- vllm/worker/tpu_worker.py +337 -0
- vllm/worker/utils.py +53 -0
- vllm/worker/worker.py +577 -0
- vllm/worker/worker_base.py +646 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +186 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1673 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
import bisect
|
|
4
|
+
import gc
|
|
5
|
+
import time
|
|
6
|
+
from typing import TYPE_CHECKING, Optional, cast
|
|
7
|
+
from unittest.mock import patch
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import torch
|
|
11
|
+
import torch.nn as nn
|
|
12
|
+
# TPU XLA related
|
|
13
|
+
import torch_xla.core.xla_model as xm
|
|
14
|
+
import torch_xla.distributed.spmd as xs
|
|
15
|
+
import torch_xla.runtime as xr
|
|
16
|
+
|
|
17
|
+
import vllm.envs as envs
|
|
18
|
+
from vllm.attention.backends.abstract import AttentionType
|
|
19
|
+
from vllm.attention.layer import Attention
|
|
20
|
+
from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher
|
|
21
|
+
from vllm.config import ParallelConfig, VllmConfig, get_layers_from_vllm_config
|
|
22
|
+
from vllm.forward_context import set_forward_context
|
|
23
|
+
from vllm.logger import init_logger
|
|
24
|
+
from vllm.lora.layers import BaseLayerWithLoRA
|
|
25
|
+
from vllm.model_executor.model_loader import get_model_loader
|
|
26
|
+
from vllm.model_executor.model_loader.tpu import TPUModelLoader
|
|
27
|
+
from vllm.multimodal import MULTIMODAL_REGISTRY
|
|
28
|
+
from vllm.multimodal.inputs import (BatchedTensorInputs, MultiModalKwargs,
|
|
29
|
+
PlaceholderRange)
|
|
30
|
+
from vllm.multimodal.utils import group_mm_inputs_by_modality
|
|
31
|
+
from vllm.sequence import IntermediateTensors
|
|
32
|
+
from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType, cdiv,
|
|
33
|
+
is_pin_memory_available)
|
|
34
|
+
from vllm.v1.attention.backends.pallas import (PallasAttentionBackend,
|
|
35
|
+
PallasMetadata)
|
|
36
|
+
from vllm.v1.core.encoder_cache_manager import compute_encoder_budget
|
|
37
|
+
from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec,
|
|
38
|
+
KVCacheConfig, KVCacheSpec,
|
|
39
|
+
SlidingWindowSpec)
|
|
40
|
+
from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors,
|
|
41
|
+
ModelRunnerOutput)
|
|
42
|
+
from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata
|
|
43
|
+
from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler
|
|
44
|
+
from vllm.v1.utils import bind_kv_cache
|
|
45
|
+
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
|
|
46
|
+
from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
|
|
47
|
+
|
|
48
|
+
from .utils import (initialize_kv_cache_for_kv_sharing,
|
|
49
|
+
sanity_check_mm_encoder_outputs)
|
|
50
|
+
|
|
51
|
+
if TYPE_CHECKING:
|
|
52
|
+
from vllm.v1.core.sched.output import SchedulerOutput
|
|
53
|
+
|
|
54
|
+
logger = init_logger(__name__)
|
|
55
|
+
|
|
56
|
+
# Here we utilize the behavior that out-of-bound index is ignored.
|
|
57
|
+
# FIXME(woosuk): Find a more reliable way to prevent possible bugs.
|
|
58
|
+
_PAD_SLOT_ID = 1_000_000_000
|
|
59
|
+
INVALID_TOKEN_ID = -1
|
|
60
|
+
# Smallest output size
|
|
61
|
+
MIN_NUM_SEQS = 8
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
#########################################################
|
|
65
|
+
# Ways to avoid recompilation
|
|
66
|
+
#########################################################
|
|
67
|
+
#
|
|
68
|
+
# The model executor has two primary components:
|
|
69
|
+
# 1. preparing the model and sampler inputs
|
|
70
|
+
# 2. executing the model and sampler.
|
|
71
|
+
# The core idea is to avoid any TPU computation during input preparation. For
|
|
72
|
+
# better compilation tracking and increased flexibility, the model execution and
|
|
73
|
+
# sampler are divided into several distinct components.
|
|
74
|
+
#
|
|
75
|
+
# Below are the detailed steps:
|
|
76
|
+
#
|
|
77
|
+
# Step 1
|
|
78
|
+
# It is recommended to avoid TPU operations when preparing the model and sampler
|
|
79
|
+
# inputs. CPU tensors can be prepared and transferred to the XLA device using
|
|
80
|
+
# cpu_tensor.to(xla_device), which only triggers CPU to TPU transfers and avoids
|
|
81
|
+
# compilation.
|
|
82
|
+
#
|
|
83
|
+
# Step 2
|
|
84
|
+
# The TPU execution should be decomposed into subgraphs (4 at the moment):
|
|
85
|
+
# 1. the main model
|
|
86
|
+
# 2. selecting hidden states for each request
|
|
87
|
+
# 3. sampler
|
|
88
|
+
# 4. encoder.
|
|
89
|
+
# Each subgraph should be decorated in a torch.compile. This is used to make
|
|
90
|
+
# sure that we have the same subgraph topology in both dummy_run and
|
|
91
|
+
# xecute_model. The results from these subgraphs should either be passed to
|
|
92
|
+
# other subgraphs, or transferred from TPU to CPU using xla_tensor.cpu() for
|
|
93
|
+
# subsequent processing on the CPU.
|
|
94
|
+
#
|
|
95
|
+
# Step 3
|
|
96
|
+
# The dummy_run should be comprehensive, ensuring all potential input shapes and
|
|
97
|
+
# branch predictions are included as subgraph inputs to facilitate
|
|
98
|
+
# pre-compilation.
|
|
99
|
+
class TPUModelRunner(LoRAModelRunnerMixin):
|
|
100
|
+
|
|
101
|
+
def __init__(
|
|
102
|
+
self,
|
|
103
|
+
vllm_config: VllmConfig,
|
|
104
|
+
device: torch.device,
|
|
105
|
+
original_parallel_config: Optional[ParallelConfig] = None,
|
|
106
|
+
):
|
|
107
|
+
self.vllm_config = vllm_config
|
|
108
|
+
self.model_config = vllm_config.model_config
|
|
109
|
+
self.cache_config = vllm_config.cache_config
|
|
110
|
+
self.lora_config = vllm_config.lora_config
|
|
111
|
+
self.load_config = vllm_config.load_config
|
|
112
|
+
self.parallel_config = vllm_config.parallel_config
|
|
113
|
+
self.original_parallel_config = original_parallel_config
|
|
114
|
+
self.scheduler_config = vllm_config.scheduler_config
|
|
115
|
+
self.speculative_config = vllm_config.speculative_config
|
|
116
|
+
self.prompt_adapter_config = vllm_config.prompt_adapter_config
|
|
117
|
+
self.observability_config = vllm_config.observability_config
|
|
118
|
+
self.device_config = vllm_config.device_config
|
|
119
|
+
|
|
120
|
+
model_config = self.model_config
|
|
121
|
+
cache_config = self.cache_config
|
|
122
|
+
scheduler_config = self.scheduler_config
|
|
123
|
+
parallel_config = self.parallel_config
|
|
124
|
+
self.device = device
|
|
125
|
+
self.check_recompilation = envs.VLLM_XLA_CHECK_RECOMPILATION
|
|
126
|
+
|
|
127
|
+
# SPMD Related
|
|
128
|
+
self.use_spmd = envs.VLLM_XLA_USE_SPMD
|
|
129
|
+
if self.use_spmd:
|
|
130
|
+
num_devices = xr.global_runtime_device_count()
|
|
131
|
+
mesh_shape = (num_devices, 1)
|
|
132
|
+
device_ids = np.array(range(num_devices))
|
|
133
|
+
self.mesh = xs.Mesh(device_ids, mesh_shape, ('x', 'y'))
|
|
134
|
+
|
|
135
|
+
self.enforce_eager = model_config.enforce_eager
|
|
136
|
+
|
|
137
|
+
self.num_xla_graphs = 0
|
|
138
|
+
self._update_num_xla_graphs("init")
|
|
139
|
+
|
|
140
|
+
self.pin_memory = is_pin_memory_available()
|
|
141
|
+
self.dtype = self.model_config.dtype
|
|
142
|
+
if cache_config.cache_dtype == "auto":
|
|
143
|
+
self.kv_cache_dtype = self.dtype
|
|
144
|
+
else:
|
|
145
|
+
self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[
|
|
146
|
+
cache_config.cache_dtype]
|
|
147
|
+
self._hidden_states_dtype = self.dtype
|
|
148
|
+
|
|
149
|
+
self.is_multimodal_model = model_config.is_multimodal_model
|
|
150
|
+
self.sliding_window = model_config.get_sliding_window()
|
|
151
|
+
self.block_size = cache_config.block_size
|
|
152
|
+
self.max_model_len = model_config.max_model_len
|
|
153
|
+
self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size)
|
|
154
|
+
# InputBatch needs to work with sampling tensors greater than padding
|
|
155
|
+
# to avoid dynamic shapes. Also, avoid suboptimal alignment.
|
|
156
|
+
self.max_num_reqs = max(scheduler_config.max_num_seqs, MIN_NUM_SEQS)
|
|
157
|
+
self.num_tokens_paddings = _get_token_paddings(
|
|
158
|
+
min_token_size=16,
|
|
159
|
+
max_token_size=scheduler_config.max_num_batched_tokens,
|
|
160
|
+
padding_gap=envs.VLLM_TPU_BUCKET_PADDING_GAP)
|
|
161
|
+
# In case `max_num_tokens < max(num_tokens_paddings)` use the actual
|
|
162
|
+
# padded max value to pre-allocate data structures and pre-compile.
|
|
163
|
+
self.max_num_tokens = self.num_tokens_paddings[-1]
|
|
164
|
+
|
|
165
|
+
# Model-related.
|
|
166
|
+
self.num_attn_layers = model_config.get_num_layers_by_block_type(
|
|
167
|
+
parallel_config, LayerBlockType.attention)
|
|
168
|
+
self.num_query_heads = model_config.get_num_attention_heads(
|
|
169
|
+
parallel_config)
|
|
170
|
+
self.num_kv_heads = model_config.get_num_kv_heads(parallel_config)
|
|
171
|
+
self.head_size = model_config.get_head_size()
|
|
172
|
+
self.hidden_size = model_config.get_hidden_size()
|
|
173
|
+
self.vocab_size = model_config.get_vocab_size()
|
|
174
|
+
|
|
175
|
+
if self.lora_config is not None:
|
|
176
|
+
self.vocab_size += self.lora_config.lora_extra_vocab_size
|
|
177
|
+
|
|
178
|
+
# Multi-modal data support
|
|
179
|
+
self.mm_registry = MULTIMODAL_REGISTRY
|
|
180
|
+
self.uses_mrope = model_config.uses_mrope
|
|
181
|
+
# TODO: Support M-RoPE (e.g, Qwen2-VL)
|
|
182
|
+
assert not self.uses_mrope, "TPU does not support M-RoPE yet."
|
|
183
|
+
|
|
184
|
+
encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
|
|
185
|
+
model_config=model_config,
|
|
186
|
+
scheduler_config=scheduler_config,
|
|
187
|
+
mm_registry=self.mm_registry,
|
|
188
|
+
)
|
|
189
|
+
self.max_num_encoder_input_tokens = encoder_compute_budget
|
|
190
|
+
self.encoder_cache_size = encoder_cache_size
|
|
191
|
+
|
|
192
|
+
# Lazy initialization
|
|
193
|
+
self.model: nn.Module # Set after load_model
|
|
194
|
+
self.kv_caches: list[torch.Tensor] = []
|
|
195
|
+
# req_id -> (input_id -> encoder_output)
|
|
196
|
+
self.encoder_cache: dict[str, dict[int, torch.Tensor]] = {}
|
|
197
|
+
|
|
198
|
+
# Request states.
|
|
199
|
+
self.requests: dict[str, CachedRequestState] = {}
|
|
200
|
+
|
|
201
|
+
# Initialize input batch early to avoid AttributeError in _update_states
|
|
202
|
+
self.input_batch = InputBatch(
|
|
203
|
+
max_num_reqs=self.max_num_reqs,
|
|
204
|
+
max_model_len=self.max_model_len,
|
|
205
|
+
max_num_batched_tokens=self.max_num_tokens,
|
|
206
|
+
device=self.device,
|
|
207
|
+
pin_memory=self.pin_memory,
|
|
208
|
+
vocab_size=self.model_config.get_vocab_size(),
|
|
209
|
+
block_sizes=[self.block_size],
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# Cached torch/numpy tensor
|
|
213
|
+
# The pytorch tensor and numpy array share the same buffer.
|
|
214
|
+
# Sometimes the numpy op is faster so we create both.
|
|
215
|
+
self.input_ids_cpu = torch.zeros(self.max_num_tokens,
|
|
216
|
+
dtype=torch.int32,
|
|
217
|
+
device="cpu")
|
|
218
|
+
|
|
219
|
+
self.positions_cpu = torch.zeros(self.max_num_tokens,
|
|
220
|
+
dtype=torch.int32,
|
|
221
|
+
device="cpu")
|
|
222
|
+
self.positions_np = self.positions_cpu.numpy()
|
|
223
|
+
|
|
224
|
+
self.block_table_cpu = torch.zeros(
|
|
225
|
+
(self.max_num_reqs, self.max_num_blocks_per_req),
|
|
226
|
+
dtype=torch.int32,
|
|
227
|
+
device="cpu")
|
|
228
|
+
|
|
229
|
+
self.query_start_loc_cpu = torch.zeros(self.max_num_tokens + 1,
|
|
230
|
+
dtype=torch.int32,
|
|
231
|
+
device="cpu",
|
|
232
|
+
pin_memory=self.pin_memory)
|
|
233
|
+
self.query_start_loc_np = self.query_start_loc_cpu.numpy()
|
|
234
|
+
|
|
235
|
+
self.seq_lens_cpu = torch.zeros(self.max_num_tokens,
|
|
236
|
+
dtype=torch.int32,
|
|
237
|
+
device="cpu",
|
|
238
|
+
pin_memory=self.pin_memory)
|
|
239
|
+
self.seq_lens_np = self.seq_lens_cpu.numpy()
|
|
240
|
+
|
|
241
|
+
# Range tensor with values [0 .. self.max_num_tokens - 1].
|
|
242
|
+
# Used to initialize positions / context_lens / seq_lens
|
|
243
|
+
# Keep in int64 to avoid overflow with long context
|
|
244
|
+
self.arange_np = np.arange(self.max_num_tokens, dtype=np.int64)
|
|
245
|
+
self.num_reqs_paddings = _get_req_paddings(
|
|
246
|
+
min_req_size=MIN_NUM_SEQS, max_req_size=self.max_num_reqs)
|
|
247
|
+
|
|
248
|
+
# Layer pairings for cross-layer KV sharing.
|
|
249
|
+
# If an Attention layer `layer_name` is in the keys of this dict, it
|
|
250
|
+
# means this layer will perform attention using the keys and values
|
|
251
|
+
# from the KV cache of `shared_kv_cache_layers[layer_name]`.
|
|
252
|
+
self.shared_kv_cache_layers: dict[str, str] = {}
|
|
253
|
+
|
|
254
|
+
# tensors for structured decoding
|
|
255
|
+
self.grammar_bitmask_cpu = torch.zeros(
|
|
256
|
+
(self.max_num_reqs, cdiv(self.vocab_size, 32)),
|
|
257
|
+
dtype=torch.int32,
|
|
258
|
+
device="cpu",
|
|
259
|
+
pin_memory=self.pin_memory)
|
|
260
|
+
self.require_structured_out_cpu = torch.zeros(
|
|
261
|
+
(self.max_num_reqs, 1),
|
|
262
|
+
dtype=torch.bool,
|
|
263
|
+
device="cpu",
|
|
264
|
+
pin_memory=self.pin_memory)
|
|
265
|
+
self.structured_decode_arange = torch.arange(
|
|
266
|
+
0, 32, device="cpu", pin_memory=self.pin_memory)
|
|
267
|
+
|
|
268
|
+
# Get maximum number of mm items per modality (batch size).
|
|
269
|
+
self.max_num_mm_items_by_modality = dict()
|
|
270
|
+
if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
|
|
271
|
+
and self.encoder_cache_size > 0):
|
|
272
|
+
max_tokens_by_modality_dict = (
|
|
273
|
+
MULTIMODAL_REGISTRY.
|
|
274
|
+
get_max_tokens_per_item_by_nonzero_modality(self.model_config))
|
|
275
|
+
for modality, max_tokens in max_tokens_by_modality_dict.items():
|
|
276
|
+
# Check how many items of this modality can be supported by
|
|
277
|
+
# the encoder budget.
|
|
278
|
+
encoder_budget = min(self.max_num_encoder_input_tokens,
|
|
279
|
+
self.encoder_cache_size)
|
|
280
|
+
|
|
281
|
+
max_num_mm_items_encoder_budget = cdiv(encoder_budget,
|
|
282
|
+
max_tokens)
|
|
283
|
+
|
|
284
|
+
# Check how many items of this modality can be supported by
|
|
285
|
+
# the decoder budget.
|
|
286
|
+
max_mm_items_per_req = self.mm_registry.\
|
|
287
|
+
get_mm_limits_per_prompt(self.model_config)[modality]
|
|
288
|
+
|
|
289
|
+
# NOTE: We do not consider max_num_batched_tokens on purpose
|
|
290
|
+
# because the multimodal embeddings can be generated in advance
|
|
291
|
+
# and chunked prefilled.
|
|
292
|
+
max_num_mm_items_decoder_budget = self.max_num_reqs * \
|
|
293
|
+
max_mm_items_per_req
|
|
294
|
+
|
|
295
|
+
max_num_mm_items = min(max_num_mm_items_encoder_budget,
|
|
296
|
+
max_num_mm_items_decoder_budget)
|
|
297
|
+
self.max_num_mm_items_by_modality[modality] = max_num_mm_items
|
|
298
|
+
|
|
299
|
+
if not self.use_spmd:
|
|
300
|
+
self.sample_from_logits_func = torch.compile(
|
|
301
|
+
self.sample_from_logits,
|
|
302
|
+
backend="openxla",
|
|
303
|
+
fullgraph=True,
|
|
304
|
+
dynamic=False)
|
|
305
|
+
else:
|
|
306
|
+
self.sample_from_logits_func = self.sample_from_logits
|
|
307
|
+
|
|
308
|
+
def _update_num_xla_graphs(self, case_str):
|
|
309
|
+
check_comp = self.check_recompilation and not self.enforce_eager
|
|
310
|
+
if not check_comp:
|
|
311
|
+
return
|
|
312
|
+
|
|
313
|
+
total_cached_graphs = xr.get_num_cached_compilation_graph()
|
|
314
|
+
new_compiled_graphs = total_cached_graphs - self.num_xla_graphs
|
|
315
|
+
if new_compiled_graphs == 0:
|
|
316
|
+
return
|
|
317
|
+
|
|
318
|
+
logger.info("Add new %d compiled XLA graphs due to %s",
|
|
319
|
+
new_compiled_graphs, case_str)
|
|
320
|
+
self.num_xla_graphs += new_compiled_graphs
|
|
321
|
+
|
|
322
|
+
def _verify_num_xla_graphs(self, case_str):
|
|
323
|
+
check_comp = self.check_recompilation and not self.enforce_eager
|
|
324
|
+
if not check_comp:
|
|
325
|
+
return
|
|
326
|
+
|
|
327
|
+
curr_cached_graph = xr.get_num_cached_compilation_graph()
|
|
328
|
+
assert self.num_xla_graphs == curr_cached_graph, (
|
|
329
|
+
"Recompilation after warm up is detected during {}."
|
|
330
|
+
" num_xla_graphs = {} curr_cached_graph = {}".format(
|
|
331
|
+
case_str, self.num_xla_graphs, curr_cached_graph))
|
|
332
|
+
|
|
333
|
+
def _update_states(self, scheduler_output: "SchedulerOutput") -> bool:
|
|
334
|
+
"""Update the cached states and the persistent batch with the scheduler
|
|
335
|
+
output.
|
|
336
|
+
|
|
337
|
+
The updated states are used by the `_prepare_inputs` function to create
|
|
338
|
+
the input GPU tensors for the model.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
True if there is a new/resumed/paused/finished request.
|
|
342
|
+
If False, we can skip copying SamplingMetadata to the GPU.
|
|
343
|
+
"""
|
|
344
|
+
# Remove finished requests from the cached states.
|
|
345
|
+
for req_id in scheduler_output.finished_req_ids:
|
|
346
|
+
self.requests.pop(req_id, None)
|
|
347
|
+
self.encoder_cache.pop(req_id, None)
|
|
348
|
+
|
|
349
|
+
# Remove the finished requests from the persistent batch.
|
|
350
|
+
# NOTE(woosuk): There could be an edge case where finished_req_ids and
|
|
351
|
+
# scheduled_req_ids overlap. This happens when a request is aborted and
|
|
352
|
+
# then resubmitted with the same ID. In this case, we treat them as two
|
|
353
|
+
# distinct requests - clearing the cached states for the first request
|
|
354
|
+
# and handling the second as a new request.
|
|
355
|
+
removed_req_indices: list[int] = []
|
|
356
|
+
for req_id in scheduler_output.finished_req_ids:
|
|
357
|
+
req_index = self.input_batch.remove_request(req_id)
|
|
358
|
+
if req_index is not None:
|
|
359
|
+
removed_req_indices.append(req_index)
|
|
360
|
+
|
|
361
|
+
# Free the cached encoder outputs.
|
|
362
|
+
for req_id, input_id in scheduler_output.free_encoder_input_ids:
|
|
363
|
+
encoder_outputs = self.encoder_cache.get(req_id)
|
|
364
|
+
if encoder_outputs is not None:
|
|
365
|
+
encoder_outputs.pop(input_id, None)
|
|
366
|
+
if not encoder_outputs:
|
|
367
|
+
self.encoder_cache.pop(req_id, None)
|
|
368
|
+
|
|
369
|
+
# Remove the unscheduled requests from the persistent batch.
|
|
370
|
+
# NOTE(woosuk): The unscheduled requests are either preempted requests
|
|
371
|
+
# or running requests that are not scheduled in this step. We remove
|
|
372
|
+
# them from the persistent batch but keep their cached states since
|
|
373
|
+
# they will be scheduled again sometime in the future.
|
|
374
|
+
scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
|
|
375
|
+
cached_req_ids = self.input_batch.req_id_to_index.keys()
|
|
376
|
+
unscheduled_req_ids = cached_req_ids - scheduled_req_ids
|
|
377
|
+
# NOTE(woosuk): The persistent batch optimization assumes that
|
|
378
|
+
# consecutive batches contain mostly the same requests. If batches
|
|
379
|
+
# have low request overlap (e.g., alternating between two distinct
|
|
380
|
+
# sets of requests), this optimization becomes very inefficient.
|
|
381
|
+
for req_id in unscheduled_req_ids:
|
|
382
|
+
req_index = self.input_batch.remove_request(req_id)
|
|
383
|
+
assert req_index is not None
|
|
384
|
+
removed_req_indices.append(req_index)
|
|
385
|
+
|
|
386
|
+
req_ids_to_add: list[str] = []
|
|
387
|
+
# Add new requests to the cached states.
|
|
388
|
+
for new_req_data in scheduler_output.scheduled_new_reqs:
|
|
389
|
+
req_id = new_req_data.req_id
|
|
390
|
+
sampling_params = new_req_data.sampling_params
|
|
391
|
+
|
|
392
|
+
self.requests[req_id] = CachedRequestState(
|
|
393
|
+
req_id=req_id,
|
|
394
|
+
prompt_token_ids=new_req_data.prompt_token_ids,
|
|
395
|
+
mm_inputs=new_req_data.mm_inputs,
|
|
396
|
+
mm_positions=new_req_data.mm_positions,
|
|
397
|
+
sampling_params=sampling_params,
|
|
398
|
+
generator=None,
|
|
399
|
+
block_ids=new_req_data.block_ids,
|
|
400
|
+
num_computed_tokens=new_req_data.num_computed_tokens,
|
|
401
|
+
output_token_ids=[],
|
|
402
|
+
lora_request=new_req_data.lora_request,
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
req_ids_to_add.append(req_id)
|
|
406
|
+
|
|
407
|
+
# Update the states of the running/resumed requests.
|
|
408
|
+
for req_data in scheduler_output.scheduled_cached_reqs:
|
|
409
|
+
req_id = req_data.req_id
|
|
410
|
+
req_state = self.requests[req_id]
|
|
411
|
+
|
|
412
|
+
# Update the cached states.
|
|
413
|
+
req_state.num_computed_tokens = req_data.num_computed_tokens
|
|
414
|
+
if not req_data.resumed_from_preemption:
|
|
415
|
+
# Append the new blocks to the existing block IDs.
|
|
416
|
+
for block_ids, new_block_ids in zip( # type: ignore[call-overload]
|
|
417
|
+
req_state.block_ids,
|
|
418
|
+
req_data.new_block_ids,
|
|
419
|
+
strict=True):
|
|
420
|
+
block_ids.extend(new_block_ids)
|
|
421
|
+
else:
|
|
422
|
+
# The request is resumed from preemption.
|
|
423
|
+
# Replace the existing block IDs with the new ones.
|
|
424
|
+
req_state.block_ids = req_data.new_block_ids
|
|
425
|
+
|
|
426
|
+
req_index = self.input_batch.req_id_to_index.get(req_id)
|
|
427
|
+
if req_index is None:
|
|
428
|
+
# The request is not in the persistent batch.
|
|
429
|
+
# The request was either preempted and resumed later, or was not
|
|
430
|
+
# scheduled in the previous step and needs to be added again.
|
|
431
|
+
req_ids_to_add.append(req_id)
|
|
432
|
+
continue
|
|
433
|
+
|
|
434
|
+
# Update the persistent batch.
|
|
435
|
+
self.input_batch.num_computed_tokens_cpu[req_index] = (
|
|
436
|
+
req_data.num_computed_tokens)
|
|
437
|
+
self.input_batch.block_table.append_row(req_data.new_block_ids,
|
|
438
|
+
req_index)
|
|
439
|
+
|
|
440
|
+
# Add the new or resumed requests to the persistent batch.
|
|
441
|
+
# The smaller empty indices are filled first.
|
|
442
|
+
removed_req_indices = sorted(removed_req_indices, reverse=True)
|
|
443
|
+
for req_id in req_ids_to_add:
|
|
444
|
+
req_state = self.requests[req_id]
|
|
445
|
+
if removed_req_indices:
|
|
446
|
+
# Fill the empty index.
|
|
447
|
+
req_index = removed_req_indices.pop()
|
|
448
|
+
else:
|
|
449
|
+
# Append to the end.
|
|
450
|
+
req_index = None
|
|
451
|
+
self.input_batch.add_request(req_state, req_index)
|
|
452
|
+
|
|
453
|
+
# Condense the batched states if there are empty indices.
|
|
454
|
+
if removed_req_indices:
|
|
455
|
+
self.input_batch.condense(removed_req_indices)
|
|
456
|
+
|
|
457
|
+
return len(unscheduled_req_ids) > 0 or len(req_ids_to_add) > 0
|
|
458
|
+
|
|
459
|
+
def get_model(self) -> nn.Module:
|
|
460
|
+
return self.model
|
|
461
|
+
|
|
462
|
+
def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
|
|
463
|
+
"""
|
|
464
|
+
Generates the KVCacheSpec by parsing the kv cache format from each
|
|
465
|
+
Attention module in the static forward context.
|
|
466
|
+
Returns:
|
|
467
|
+
KVCacheSpec: A dictionary mapping layer names to their KV cache
|
|
468
|
+
format. Layers that do not need KV cache are not included.
|
|
469
|
+
"""
|
|
470
|
+
|
|
471
|
+
layers = get_layers_from_vllm_config(self.vllm_config, Attention)
|
|
472
|
+
block_size = self.vllm_config.cache_config.block_size
|
|
473
|
+
kv_cache_spec: dict[str, KVCacheSpec] = {}
|
|
474
|
+
for layer_name, attn_module in layers.items():
|
|
475
|
+
if (kv_tgt_layer :=
|
|
476
|
+
attn_module.kv_sharing_target_layer_name) is not None:
|
|
477
|
+
# The layer doesn't need its own KV cache and will use that of
|
|
478
|
+
# the target layer. We skip creating a KVCacheSpec for it, so
|
|
479
|
+
# that KV cache management logic will act as this layer does
|
|
480
|
+
# not exist, and doesn't allocate KV cache for the layer. This
|
|
481
|
+
# enables the memory saving of cross-layer kv sharing, allowing
|
|
482
|
+
# a given amount of memory to accommodate longer context lengths
|
|
483
|
+
# or enable more requests to be processed simultaneously.
|
|
484
|
+
self.shared_kv_cache_layers[layer_name] = kv_tgt_layer
|
|
485
|
+
continue
|
|
486
|
+
|
|
487
|
+
if attn_module.attn_type == AttentionType.DECODER:
|
|
488
|
+
if attn_module.sliding_window is not None:
|
|
489
|
+
kv_cache_spec[layer_name] = SlidingWindowSpec(
|
|
490
|
+
block_size=block_size,
|
|
491
|
+
num_kv_heads=attn_module.num_kv_heads,
|
|
492
|
+
head_size=attn_module.head_size,
|
|
493
|
+
dtype=self.kv_cache_dtype,
|
|
494
|
+
sliding_window=attn_module.sliding_window,
|
|
495
|
+
use_mla=False,
|
|
496
|
+
)
|
|
497
|
+
else:
|
|
498
|
+
kv_cache_spec[layer_name] = FullAttentionSpec(
|
|
499
|
+
block_size=block_size,
|
|
500
|
+
num_kv_heads=attn_module.num_kv_heads,
|
|
501
|
+
head_size=attn_module.head_size,
|
|
502
|
+
dtype=self.kv_cache_dtype,
|
|
503
|
+
use_mla=False,
|
|
504
|
+
)
|
|
505
|
+
elif attn_module.attn_type in (AttentionType.ENCODER,
|
|
506
|
+
AttentionType.ENCODER_ONLY):
|
|
507
|
+
# encoder-only attention does not need KV cache.
|
|
508
|
+
continue
|
|
509
|
+
elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
|
|
510
|
+
raise NotImplementedError
|
|
511
|
+
else:
|
|
512
|
+
raise ValueError(
|
|
513
|
+
f"Unknown attention type: {attn_module.attn_type}")
|
|
514
|
+
|
|
515
|
+
return kv_cache_spec
|
|
516
|
+
|
|
517
|
+
def _prepare_inputs(self, scheduler_output: "SchedulerOutput"):
|
|
518
|
+
total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
|
|
519
|
+
assert total_num_scheduled_tokens > 0
|
|
520
|
+
num_reqs = self.input_batch.num_reqs
|
|
521
|
+
assert num_reqs > 0
|
|
522
|
+
|
|
523
|
+
# Get the number of scheduled tokens for each request.
|
|
524
|
+
num_scheduled_tokens_per_req = []
|
|
525
|
+
max_num_scheduled_tokens_all_reqs = 0
|
|
526
|
+
for req_id in self.input_batch.req_ids[:num_reqs]:
|
|
527
|
+
assert req_id is not None
|
|
528
|
+
num_tokens = scheduler_output.num_scheduled_tokens[req_id]
|
|
529
|
+
num_scheduled_tokens_per_req.append(num_tokens)
|
|
530
|
+
max_num_scheduled_tokens_all_reqs = max(
|
|
531
|
+
max_num_scheduled_tokens_all_reqs, num_tokens)
|
|
532
|
+
num_scheduled_tokens_per_req = np.array(num_scheduled_tokens_per_req,
|
|
533
|
+
dtype=np.int32)
|
|
534
|
+
assert max_num_scheduled_tokens_all_reqs > 0
|
|
535
|
+
|
|
536
|
+
# Get request indices.
|
|
537
|
+
# E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
|
|
538
|
+
# For each scheduled token, what are the corresponding req index.
|
|
539
|
+
req_indices = np.repeat(self.arange_np[:num_reqs],
|
|
540
|
+
num_scheduled_tokens_per_req)
|
|
541
|
+
|
|
542
|
+
# Get batched arange.
|
|
543
|
+
# E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
|
544
|
+
# For each scheduled token, what is its position in corresponding req.
|
|
545
|
+
arange = np.concatenate(
|
|
546
|
+
[self.arange_np[:n] for n in num_scheduled_tokens_per_req])
|
|
547
|
+
|
|
548
|
+
# Get positions.
|
|
549
|
+
positions_np = self.positions_np[:total_num_scheduled_tokens]
|
|
550
|
+
np.add(self.input_batch.num_computed_tokens_cpu[req_indices],
|
|
551
|
+
arange,
|
|
552
|
+
out=positions_np)
|
|
553
|
+
|
|
554
|
+
# Get token indices.
|
|
555
|
+
# E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
|
556
|
+
# -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
|
|
557
|
+
# where M is the max_model_len.
|
|
558
|
+
token_indices = (positions_np +
|
|
559
|
+
req_indices * self.input_batch.token_ids_cpu.shape[1])
|
|
560
|
+
|
|
561
|
+
# NOTE(woosuk): We use torch.index_select instead of np.take here
|
|
562
|
+
# because torch.index_select is much faster than np.take for large
|
|
563
|
+
# tensors.
|
|
564
|
+
torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(),
|
|
565
|
+
0,
|
|
566
|
+
torch.from_numpy(token_indices),
|
|
567
|
+
out=self.input_ids_cpu[:total_num_scheduled_tokens])
|
|
568
|
+
|
|
569
|
+
# Calculate the slot mapping.
|
|
570
|
+
# E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
|
571
|
+
# -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1]
|
|
572
|
+
# where K is the max_num_blocks_per_req and the block size is 2.
|
|
573
|
+
# NOTE(woosuk): We can't simply use `token_indices // block_size` here
|
|
574
|
+
# because M (max_model_len) is not necessarily divisible by block_size.
|
|
575
|
+
# req_indices: # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
|
|
576
|
+
block_table_indices = (req_indices * self.max_num_blocks_per_req +
|
|
577
|
+
positions_np // self.block_size)
|
|
578
|
+
# NOTE(woosuk): We use torch.index_select instead of np.take here
|
|
579
|
+
# because torch.index_select is much faster than np.take for large
|
|
580
|
+
# tensors.
|
|
581
|
+
block_table_cpu = self.input_batch.block_table[0].get_cpu_tensor()
|
|
582
|
+
block_numbers = block_table_cpu.flatten()[block_table_indices].numpy()
|
|
583
|
+
block_offsets = positions_np % self.block_size
|
|
584
|
+
np.add(block_numbers * self.block_size,
|
|
585
|
+
block_offsets,
|
|
586
|
+
out=self.input_batch.block_table[0].
|
|
587
|
+
slot_mapping_np[:total_num_scheduled_tokens])
|
|
588
|
+
|
|
589
|
+
# Prepare the attention metadata.
|
|
590
|
+
self.query_start_loc_np[0] = 0
|
|
591
|
+
np.cumsum(num_scheduled_tokens_per_req,
|
|
592
|
+
out=self.query_start_loc_np[1:num_reqs + 1])
|
|
593
|
+
self.query_start_loc_np[num_reqs + 1:] = 1
|
|
594
|
+
|
|
595
|
+
self.seq_lens_np[:num_reqs] = (
|
|
596
|
+
self.input_batch.num_computed_tokens_cpu[:num_reqs] +
|
|
597
|
+
num_scheduled_tokens_per_req)
|
|
598
|
+
|
|
599
|
+
# Do the padding and copy the tensors to the TPU.
|
|
600
|
+
padded_total_num_scheduled_tokens = _get_padded_token_len(
|
|
601
|
+
self.num_tokens_paddings, total_num_scheduled_tokens)
|
|
602
|
+
# Zero out to avoid spurious values from prev iteration (last cp chunk)
|
|
603
|
+
self.input_ids_cpu[
|
|
604
|
+
total_num_scheduled_tokens:padded_total_num_scheduled_tokens] = 0
|
|
605
|
+
self.input_ids = self.input_ids_cpu[:
|
|
606
|
+
padded_total_num_scheduled_tokens].to(
|
|
607
|
+
self.device)
|
|
608
|
+
self.position_ids = self.positions_cpu[:
|
|
609
|
+
padded_total_num_scheduled_tokens].to(
|
|
610
|
+
self.device)
|
|
611
|
+
self.input_batch.block_table[0].slot_mapping_cpu[
|
|
612
|
+
total_num_scheduled_tokens:] = _PAD_SLOT_ID
|
|
613
|
+
slot_mapping = (
|
|
614
|
+
self.input_batch.block_table[0].
|
|
615
|
+
slot_mapping_cpu[:padded_total_num_scheduled_tokens].to(
|
|
616
|
+
self.device))
|
|
617
|
+
block_tables = self.block_table_cpu[:self.max_num_reqs]
|
|
618
|
+
block_tables[:num_reqs, :self.max_num_blocks_per_req] = (
|
|
619
|
+
self.input_batch.block_table[0].get_cpu_tensor()[:num_reqs])
|
|
620
|
+
block_tables = block_tables.to(self.device)
|
|
621
|
+
query_start_loc = self.query_start_loc_cpu[:self.max_num_reqs + 1].to(
|
|
622
|
+
self.device)
|
|
623
|
+
seq_lens = self.seq_lens_cpu[:self.max_num_reqs].to(self.device)
|
|
624
|
+
|
|
625
|
+
if self.lora_config is not None:
|
|
626
|
+
# We need to respect padding when activating LoRA adapters
|
|
627
|
+
padded_num_scheduled_tokens_per_req = np.copy(
|
|
628
|
+
num_scheduled_tokens_per_req
|
|
629
|
+
) # Copying to avoid accidental state corruption bugs
|
|
630
|
+
padded_num_scheduled_tokens_per_req[-1] += \
|
|
631
|
+
padded_total_num_scheduled_tokens - total_num_scheduled_tokens
|
|
632
|
+
|
|
633
|
+
self.set_active_loras(self.input_batch,
|
|
634
|
+
padded_num_scheduled_tokens_per_req)
|
|
635
|
+
|
|
636
|
+
attn_metadata = PallasMetadata(
|
|
637
|
+
slot_mapping=slot_mapping,
|
|
638
|
+
block_tables=block_tables,
|
|
639
|
+
context_lens=seq_lens,
|
|
640
|
+
query_start_loc=query_start_loc,
|
|
641
|
+
num_seqs=torch.tensor([num_reqs],
|
|
642
|
+
dtype=torch.int32,
|
|
643
|
+
device=self.device),
|
|
644
|
+
)
|
|
645
|
+
# NOTE(woosuk): Due to chunked prefills, there can be at most 1 partial
|
|
646
|
+
# request in the batch. While we should not sample any token from this
|
|
647
|
+
# partial request, we do so for simplicity. We will ignore the sampled
|
|
648
|
+
# token from the partial request.
|
|
649
|
+
# TODO: Support prompt logprobs.
|
|
650
|
+
padded_num_reqs = _get_padded_num_reqs_with_upper_limit(
|
|
651
|
+
num_reqs, self.max_num_reqs)
|
|
652
|
+
# Indices at which we sample (positions of last token in the sequence).
|
|
653
|
+
# Padded to avoid recompiling when `num_reqs` varies.
|
|
654
|
+
logits_indices = self.query_start_loc_cpu[1:padded_num_reqs + 1] - 1
|
|
655
|
+
logits_indices = logits_indices.to(self.device)
|
|
656
|
+
|
|
657
|
+
if self.lora_config is not None:
|
|
658
|
+
# We need to respect padding when activating LoRA adapters
|
|
659
|
+
padded_num_scheduled_tokens_per_req = np.copy(
|
|
660
|
+
num_scheduled_tokens_per_req
|
|
661
|
+
) # Copying to avoid accidental state corruption bugs
|
|
662
|
+
padded_num_scheduled_tokens_per_req[-1] += \
|
|
663
|
+
padded_total_num_scheduled_tokens - total_num_scheduled_tokens
|
|
664
|
+
|
|
665
|
+
self.set_active_loras(self.input_batch,
|
|
666
|
+
padded_num_scheduled_tokens_per_req)
|
|
667
|
+
|
|
668
|
+
layer_names = get_layers_from_vllm_config(self.vllm_config,
|
|
669
|
+
Attention).keys()
|
|
670
|
+
per_layer_attn_metadata = {
|
|
671
|
+
layer_name: attn_metadata
|
|
672
|
+
for layer_name in layer_names
|
|
673
|
+
}
|
|
674
|
+
return per_layer_attn_metadata, logits_indices, padded_num_reqs
|
|
675
|
+
|
|
676
|
+
def _scatter_placeholders(
|
|
677
|
+
self,
|
|
678
|
+
embeds: torch.Tensor,
|
|
679
|
+
is_embed: Optional[torch.Tensor],
|
|
680
|
+
) -> torch.Tensor:
|
|
681
|
+
if is_embed is None:
|
|
682
|
+
return embeds
|
|
683
|
+
|
|
684
|
+
placeholders = embeds.new_full(
|
|
685
|
+
(is_embed.shape[0], embeds.shape[-1]),
|
|
686
|
+
fill_value=torch.nan,
|
|
687
|
+
)
|
|
688
|
+
placeholders[is_embed] = embeds
|
|
689
|
+
return placeholders
|
|
690
|
+
|
|
691
|
+
def _gather_placeholders(
|
|
692
|
+
self,
|
|
693
|
+
placeholders: torch.Tensor,
|
|
694
|
+
is_embed: Optional[torch.Tensor],
|
|
695
|
+
) -> torch.Tensor:
|
|
696
|
+
if is_embed is None:
|
|
697
|
+
return placeholders
|
|
698
|
+
|
|
699
|
+
return placeholders[is_embed]
|
|
700
|
+
|
|
701
|
+
def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
|
|
702
|
+
scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
|
|
703
|
+
if not scheduled_encoder_inputs:
|
|
704
|
+
return
|
|
705
|
+
|
|
706
|
+
# Batch the multi-modal inputs.
|
|
707
|
+
mm_inputs = list[MultiModalKwargs]()
|
|
708
|
+
req_ids_pos = list[tuple[str, int, PlaceholderRange]]()
|
|
709
|
+
for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
|
|
710
|
+
req_state = self.requests[req_id]
|
|
711
|
+
|
|
712
|
+
for mm_input_id in encoder_input_ids:
|
|
713
|
+
mm_inputs.append(req_state.mm_inputs[mm_input_id])
|
|
714
|
+
req_ids_pos.append(
|
|
715
|
+
(req_id, mm_input_id, req_state.mm_positions[mm_input_id]))
|
|
716
|
+
|
|
717
|
+
# Batch mm inputs as much as we can: if a request in the batch has
|
|
718
|
+
# multiple modalities or a different modality than the previous one,
|
|
719
|
+
# we process it separately to preserve item order.
|
|
720
|
+
# FIXME(ywang96): This is a hacky way to deal with multiple modalities
|
|
721
|
+
# in the same batch while still being able to benefit from batching
|
|
722
|
+
# multimodal inputs. The proper solution should be reordering the
|
|
723
|
+
# encoder outputs.
|
|
724
|
+
grouped_mm_inputs_list = group_mm_inputs_by_modality(mm_inputs)
|
|
725
|
+
|
|
726
|
+
encoder_outputs = []
|
|
727
|
+
for grouped_mm_inputs in grouped_mm_inputs_list:
|
|
728
|
+
batched_mm_inputs = MultiModalKwargs.batch(grouped_mm_inputs)
|
|
729
|
+
batched_mm_inputs = MultiModalKwargs.as_kwargs(
|
|
730
|
+
batched_mm_inputs,
|
|
731
|
+
device=self.device,
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
# Run the encoder.
|
|
735
|
+
# `curr_group_outputs` is either of the following:
|
|
736
|
+
# 1. A tensor of shape (num_items, feature_size, hidden_size)
|
|
737
|
+
# in case feature_size is fixed across all multimodal items.
|
|
738
|
+
# 2. A list or tuple (length: num_items) of tensors, each of shape
|
|
739
|
+
# (feature_size, hidden_size) in case the feature size is dynamic
|
|
740
|
+
# depending on the input multimodal items.
|
|
741
|
+
xm.mark_step()
|
|
742
|
+
curr_group_outputs = self.model.get_multimodal_embeddings(
|
|
743
|
+
**batched_mm_inputs)
|
|
744
|
+
xm.mark_step()
|
|
745
|
+
|
|
746
|
+
sanity_check_mm_encoder_outputs(
|
|
747
|
+
curr_group_outputs,
|
|
748
|
+
expected_num_items=len(grouped_mm_inputs),
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
if isinstance(curr_group_outputs, torch.Tensor):
|
|
752
|
+
encoder_outputs.append(curr_group_outputs)
|
|
753
|
+
else:
|
|
754
|
+
assert isinstance(curr_group_outputs, (list, tuple))
|
|
755
|
+
for output in curr_group_outputs:
|
|
756
|
+
encoder_outputs.append(output)
|
|
757
|
+
|
|
758
|
+
# Cache the encoder outputs.
|
|
759
|
+
# NOTE (NickLucche) here we diverge from logic in other runners, as we
|
|
760
|
+
# assume to only have whole mm items to process. Hence we avoid the
|
|
761
|
+
# intrinsic dynamism that `scatter_mm_placeholders` introduces.
|
|
762
|
+
for (req_id, input_id, pos_info), output in zip(
|
|
763
|
+
req_ids_pos,
|
|
764
|
+
encoder_outputs,
|
|
765
|
+
):
|
|
766
|
+
if req_id not in self.encoder_cache:
|
|
767
|
+
self.encoder_cache[req_id] = {}
|
|
768
|
+
assert pos_info.is_embed is None, "Expected all positions to be"\
|
|
769
|
+
" contiguous and embeddings."
|
|
770
|
+
self.encoder_cache[req_id][input_id] = output
|
|
771
|
+
|
|
772
|
+
def _gather_mm_embeddings(
|
|
773
|
+
self,
|
|
774
|
+
scheduler_output: "SchedulerOutput",
|
|
775
|
+
) -> list[torch.Tensor]:
|
|
776
|
+
mm_embeds: list[torch.Tensor] = []
|
|
777
|
+
for req_id in self.input_batch.req_ids:
|
|
778
|
+
num_scheduled_tokens = scheduler_output.num_scheduled_tokens[
|
|
779
|
+
req_id]
|
|
780
|
+
req_state = self.requests[req_id]
|
|
781
|
+
num_computed_tokens = req_state.num_computed_tokens
|
|
782
|
+
mm_positions = req_state.mm_positions
|
|
783
|
+
# TODO unroll loop and assume/enforce --disable_chunked_mm_input
|
|
784
|
+
# NOTE (NickLucche) here we diverge from logic in other runners, as
|
|
785
|
+
# we assume to only have whole mm items to process. Hence we avoid
|
|
786
|
+
# the intrinsic dynamism that `gather_mm_placeholders` introduces.
|
|
787
|
+
for i, pos_info in enumerate(mm_positions):
|
|
788
|
+
start_pos = pos_info.offset
|
|
789
|
+
num_encoder_tokens = pos_info.length
|
|
790
|
+
|
|
791
|
+
# The encoder output is needed if the two ranges overlap:
|
|
792
|
+
# [num_computed_tokens,
|
|
793
|
+
# num_computed_tokens + num_scheduled_tokens) and
|
|
794
|
+
# [start_pos, start_pos + num_encoder_tokens)
|
|
795
|
+
if start_pos >= num_computed_tokens + num_scheduled_tokens:
|
|
796
|
+
# The encoder output is not needed in this step.
|
|
797
|
+
break
|
|
798
|
+
if start_pos + num_encoder_tokens <= num_computed_tokens:
|
|
799
|
+
# The encoder output is already processed and stored
|
|
800
|
+
# in the decoder's KV cache.
|
|
801
|
+
continue
|
|
802
|
+
|
|
803
|
+
assert req_id in self.encoder_cache
|
|
804
|
+
assert i in self.encoder_cache[req_id]
|
|
805
|
+
assert pos_info.is_embed is None, "Expected all positions to"\
|
|
806
|
+
" be contiguous and embeddings."
|
|
807
|
+
encoder_output = self.encoder_cache[req_id][i]
|
|
808
|
+
mm_embeds.append(encoder_output)
|
|
809
|
+
return mm_embeds
|
|
810
|
+
|
|
811
|
+
def _get_model_inputs(self, input_ids: torch.Tensor,
|
|
812
|
+
mm_embeds: list[torch.Tensor]):
|
|
813
|
+
if self.is_multimodal_model:
|
|
814
|
+
# NOTE(woosuk): To unify token ids and soft tokens (vision
|
|
815
|
+
# embeddings), we always use embeddings (rather than token ids)
|
|
816
|
+
# as input to the multimodal model, even when the input is text.
|
|
817
|
+
if mm_embeds:
|
|
818
|
+
inputs_embeds = self.model.get_input_embeddings(
|
|
819
|
+
input_ids, mm_embeds)
|
|
820
|
+
else:
|
|
821
|
+
inputs_embeds = self.model.get_input_embeddings(input_ids)
|
|
822
|
+
return None, inputs_embeds
|
|
823
|
+
else:
|
|
824
|
+
# For text-only models, we use token ids as input.
|
|
825
|
+
# While it is possible to use embeddings as input just like the
|
|
826
|
+
# multimodal models, it is not desirable for performance since
|
|
827
|
+
# then the embedding layer is not included in the CUDA graph.
|
|
828
|
+
return input_ids, None
|
|
829
|
+
|
|
830
|
+
@torch.no_grad()
|
|
831
|
+
def execute_model(
|
|
832
|
+
self,
|
|
833
|
+
scheduler_output: "SchedulerOutput",
|
|
834
|
+
intermediate_tensors: Optional[IntermediateTensors] = None,
|
|
835
|
+
) -> ModelRunnerOutput:
|
|
836
|
+
# Update cached state
|
|
837
|
+
self._update_states(scheduler_output)
|
|
838
|
+
if not scheduler_output.total_num_scheduled_tokens:
|
|
839
|
+
# Return empty ModelRunnerOutput if there's no work to do.
|
|
840
|
+
return EMPTY_MODEL_RUNNER_OUTPUT
|
|
841
|
+
|
|
842
|
+
if self.is_multimodal_model:
|
|
843
|
+
# Run the multimodal encoder if any.
|
|
844
|
+
self._execute_mm_encoder(scheduler_output)
|
|
845
|
+
mm_embeds = self._gather_mm_embeddings(scheduler_output)
|
|
846
|
+
else:
|
|
847
|
+
mm_embeds = []
|
|
848
|
+
xm.mark_step()
|
|
849
|
+
# Prepare inputs
|
|
850
|
+
attn_metadata, logits_indices, padded_num_reqs = self._prepare_inputs(
|
|
851
|
+
scheduler_output)
|
|
852
|
+
input_ids, inputs_embeds = self._get_model_inputs(
|
|
853
|
+
self.input_ids, mm_embeds)
|
|
854
|
+
xm.mark_step()
|
|
855
|
+
num_reqs = self.input_batch.num_reqs
|
|
856
|
+
# Run the decoder
|
|
857
|
+
with set_forward_context(
|
|
858
|
+
attn_metadata,
|
|
859
|
+
self.vllm_config,
|
|
860
|
+
num_tokens=scheduler_output.total_num_scheduled_tokens):
|
|
861
|
+
hidden_states = self.model(
|
|
862
|
+
input_ids=input_ids,
|
|
863
|
+
positions=self.position_ids,
|
|
864
|
+
inputs_embeds=inputs_embeds,
|
|
865
|
+
)
|
|
866
|
+
hidden_states = self.select_hidden_states(hidden_states,
|
|
867
|
+
logits_indices)
|
|
868
|
+
logits = self.compute_logits(hidden_states)
|
|
869
|
+
tpu_sampling_metadata = TPUSupportedSamplingMetadata.\
|
|
870
|
+
from_input_batch(self.input_batch, padded_num_reqs, self.device)
|
|
871
|
+
if scheduler_output.grammar_bitmask is not None:
|
|
872
|
+
require_struct_decoding, grammar_bitmask_padded, arange = \
|
|
873
|
+
self.prepare_structured_decoding_input(logits, scheduler_output)
|
|
874
|
+
logits = self.structured_decode(require_struct_decoding,
|
|
875
|
+
grammar_bitmask_padded, logits,
|
|
876
|
+
arange)
|
|
877
|
+
selected_token_ids = self.sample_from_logits_func(
|
|
878
|
+
logits, tpu_sampling_metadata)
|
|
879
|
+
# NOTE (NickLucche) Use the original logits (before any penalties or
|
|
880
|
+
# temperature scaling) for the top-k logprobs. We can't enforce it due
|
|
881
|
+
# to recompilations outside torch.compiled code, so just make sure
|
|
882
|
+
# `sample_from_logits` does not modify the logits in-place.
|
|
883
|
+
logprobs = self.gather_logprobs(logits, selected_token_ids) \
|
|
884
|
+
if tpu_sampling_metadata.logprobs else None
|
|
885
|
+
|
|
886
|
+
# Remove padding on cpu and keep dynamic op outside of xla graph.
|
|
887
|
+
selected_token_ids = selected_token_ids.cpu()[:num_reqs]
|
|
888
|
+
logprobs_lists = logprobs.tolists() \
|
|
889
|
+
if tpu_sampling_metadata.logprobs else None
|
|
890
|
+
|
|
891
|
+
# Update the cache state concurrently. Code above will not block until
|
|
892
|
+
# we use `selected_token_ids`. Add mark_step if post-processing changes
|
|
893
|
+
request_seq_lens: list[tuple[int, CachedRequestState, int]] = []
|
|
894
|
+
discard_sampled_tokens_req_indices = []
|
|
895
|
+
for i, req_id in zip(range(num_reqs), self.input_batch.req_ids):
|
|
896
|
+
assert req_id is not None
|
|
897
|
+
req_state = self.requests[req_id]
|
|
898
|
+
seq_len = (req_state.num_computed_tokens +
|
|
899
|
+
scheduler_output.num_scheduled_tokens[req_id])
|
|
900
|
+
if seq_len >= req_state.num_tokens:
|
|
901
|
+
request_seq_lens.append((i, req_state, seq_len))
|
|
902
|
+
else:
|
|
903
|
+
# Ignore the sampled token from the partial request.
|
|
904
|
+
# Rewind the generator state as if the token was not sampled.
|
|
905
|
+
generator = self.input_batch.generators.get(i)
|
|
906
|
+
if generator is not None:
|
|
907
|
+
# This relies on cuda-specific torch-internal impl details
|
|
908
|
+
generator.set_offset(generator.get_offset() - 4)
|
|
909
|
+
|
|
910
|
+
# Record the index of the request that should not be sampled,
|
|
911
|
+
# so that we could clear the sampled tokens before returning.
|
|
912
|
+
discard_sampled_tokens_req_indices.append(i)
|
|
913
|
+
|
|
914
|
+
assert all(
|
|
915
|
+
req_id is not None for req_id in
|
|
916
|
+
self.input_batch.req_ids[:num_reqs]), "req_ids contains None"
|
|
917
|
+
req_ids = cast(list[str], self.input_batch.req_ids[:num_reqs])
|
|
918
|
+
|
|
919
|
+
prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] = {}
|
|
920
|
+
for req_id in self.input_batch.req_ids[:num_reqs]:
|
|
921
|
+
prompt_logprobs_dict[req_id] = None
|
|
922
|
+
|
|
923
|
+
max_gen_len = selected_token_ids.shape[-1]
|
|
924
|
+
if max_gen_len == 1:
|
|
925
|
+
valid_sampled_token_ids = selected_token_ids.tolist()
|
|
926
|
+
|
|
927
|
+
# Mask out the sampled tokens that should not be sampled.
|
|
928
|
+
# TODO: Keep in sync with gpu_model_runner.py, in particular
|
|
929
|
+
# the "else" case here
|
|
930
|
+
for i in discard_sampled_tokens_req_indices:
|
|
931
|
+
valid_sampled_token_ids[i].clear()
|
|
932
|
+
|
|
933
|
+
# Append sampled tokens
|
|
934
|
+
for i, req_state, seq_len in request_seq_lens:
|
|
935
|
+
token_id = valid_sampled_token_ids[i][0]
|
|
936
|
+
self.input_batch.token_ids_cpu[i, seq_len] = token_id
|
|
937
|
+
req_state.output_token_ids.append(token_id)
|
|
938
|
+
self.input_batch.num_tokens[i] += 1
|
|
939
|
+
|
|
940
|
+
else:
|
|
941
|
+
valid_mask = selected_token_ids != INVALID_TOKEN_ID
|
|
942
|
+
gen_lens = valid_mask.sum(dim=1).tolist()
|
|
943
|
+
valid_sampled_token_ids = [
|
|
944
|
+
seq.tolist()
|
|
945
|
+
for seq in selected_token_ids[valid_mask].split(gen_lens)
|
|
946
|
+
]
|
|
947
|
+
self.input_batch.num_tokens[:num_reqs] += gen_lens
|
|
948
|
+
for i, req_state, seq_len in request_seq_lens:
|
|
949
|
+
target_slice = slice(seq_len - gen_lens[i] + 1, seq_len + 1)
|
|
950
|
+
self.input_batch.token_ids_cpu[
|
|
951
|
+
i, target_slice] = valid_sampled_token_ids[i]
|
|
952
|
+
req_state.output_token_ids.extend(valid_sampled_token_ids[i])
|
|
953
|
+
|
|
954
|
+
model_runner_output = ModelRunnerOutput(
|
|
955
|
+
req_ids=req_ids,
|
|
956
|
+
req_id_to_index=self.input_batch.req_id_to_index,
|
|
957
|
+
sampled_token_ids=valid_sampled_token_ids,
|
|
958
|
+
spec_token_ids=None,
|
|
959
|
+
logprobs=logprobs_lists,
|
|
960
|
+
prompt_logprobs_dict=prompt_logprobs_dict,
|
|
961
|
+
)
|
|
962
|
+
|
|
963
|
+
# Check there are no new graphs compiled - all the graphs should be
|
|
964
|
+
# captured and compiled during warm up.
|
|
965
|
+
self._verify_num_xla_graphs("execute_model")
|
|
966
|
+
|
|
967
|
+
return model_runner_output
|
|
968
|
+
|
|
969
|
+
def load_model(self) -> None:
|
|
970
|
+
self.device = self.device_config.device
|
|
971
|
+
|
|
972
|
+
# NOTE(woosuk): While the executor assigns the TP ranks to the worker
|
|
973
|
+
# process, the ranks can be different from the ranks internally assigned
|
|
974
|
+
# by the xm runtime. Therefore, there is a mismatch in the rank
|
|
975
|
+
# assignment between the gloo (cpu) runtime and the xm (tpu) runtime.
|
|
976
|
+
# This is not a problem in linear layers because all-reduce is
|
|
977
|
+
# rank-agnostic. However, it matters for all-gather as the ranks
|
|
978
|
+
# determine the order of concatenating the output tensors.
|
|
979
|
+
# As a workaround, we use the xm's rank assignment only when loading
|
|
980
|
+
# the embedding weights.
|
|
981
|
+
xm_tp_rank = xr.global_ordinal()
|
|
982
|
+
with patch(
|
|
983
|
+
"vllm.model_executor.layers.vocab_parallel_embedding."
|
|
984
|
+
"get_tensor_model_parallel_rank",
|
|
985
|
+
return_value=xm_tp_rank):
|
|
986
|
+
if self.use_spmd:
|
|
987
|
+
tpu_loader = TPUModelLoader(
|
|
988
|
+
load_config=self.vllm_config.load_config)
|
|
989
|
+
model = tpu_loader.load_model(
|
|
990
|
+
vllm_config=self.vllm_config,
|
|
991
|
+
model_config=self.vllm_config.model_config,
|
|
992
|
+
mesh=self.mesh)
|
|
993
|
+
else:
|
|
994
|
+
# model = get_model(vllm_config=self.vllm_config)
|
|
995
|
+
model_loader = get_model_loader(self.load_config)
|
|
996
|
+
if not hasattr(self, "model"):
|
|
997
|
+
logger.info("Loading model from scratch...")
|
|
998
|
+
model = model_loader.load_model(
|
|
999
|
+
vllm_config=self.vllm_config,
|
|
1000
|
+
model_config=self.model_config)
|
|
1001
|
+
else:
|
|
1002
|
+
logger.info("Model was already initialized. \
|
|
1003
|
+
Loading weights inplace...")
|
|
1004
|
+
model_loader.load_weights(self.model,
|
|
1005
|
+
model_config=self.model_config)
|
|
1006
|
+
if self.lora_config is not None:
|
|
1007
|
+
model = self.load_lora_model(model, self.model_config,
|
|
1008
|
+
self.scheduler_config,
|
|
1009
|
+
self.lora_config, self.device)
|
|
1010
|
+
replace_set_lora(model)
|
|
1011
|
+
|
|
1012
|
+
# Sync all pending XLA execution during model initialization and weight
|
|
1013
|
+
# loading.
|
|
1014
|
+
xm.mark_step()
|
|
1015
|
+
xm.wait_device_ops()
|
|
1016
|
+
if not hasattr(self, "model"):
|
|
1017
|
+
self.model = model
|
|
1018
|
+
self.sampler = TPUSampler()
|
|
1019
|
+
|
|
1020
|
+
@torch.no_grad()
|
|
1021
|
+
def _dummy_run(self, num_tokens: int) -> None:
|
|
1022
|
+
if self.is_multimodal_model:
|
|
1023
|
+
input_ids = None
|
|
1024
|
+
inputs_embeds = torch.zeros((num_tokens, self.hidden_size),
|
|
1025
|
+
dtype=self.dtype,
|
|
1026
|
+
device=self.device)
|
|
1027
|
+
else:
|
|
1028
|
+
input_ids = torch.zeros((num_tokens),
|
|
1029
|
+
dtype=torch.int32).to(self.device)
|
|
1030
|
+
inputs_embeds = None
|
|
1031
|
+
actual_num_reqs = min(num_tokens, self.max_num_reqs)
|
|
1032
|
+
position_ids = torch.zeros(num_tokens,
|
|
1033
|
+
dtype=torch.int32).to(self.device)
|
|
1034
|
+
slot_mapping = torch.zeros(num_tokens,
|
|
1035
|
+
dtype=torch.int64).to(self.device)
|
|
1036
|
+
block_tables = torch.zeros(
|
|
1037
|
+
(self.max_num_reqs, self.block_table_cpu.shape[1]),
|
|
1038
|
+
dtype=torch.int32).to(self.device)
|
|
1039
|
+
query_lens = [1] * self.max_num_reqs
|
|
1040
|
+
query_start_loc = torch.cumsum(torch.tensor([0] + query_lens,
|
|
1041
|
+
dtype=torch.int32),
|
|
1042
|
+
dim=0,
|
|
1043
|
+
dtype=torch.int32).to(self.device)
|
|
1044
|
+
context_lens = torch.ones((self.max_num_reqs, ),
|
|
1045
|
+
dtype=torch.int32).to(self.device)
|
|
1046
|
+
num_seqs = torch.tensor([actual_num_reqs],
|
|
1047
|
+
dtype=torch.int32).to(self.device)
|
|
1048
|
+
attn_metadata = PallasMetadata(
|
|
1049
|
+
slot_mapping=slot_mapping,
|
|
1050
|
+
block_tables=block_tables,
|
|
1051
|
+
context_lens=context_lens,
|
|
1052
|
+
query_start_loc=query_start_loc,
|
|
1053
|
+
num_seqs=num_seqs,
|
|
1054
|
+
)
|
|
1055
|
+
|
|
1056
|
+
if self.is_multimodal_model:
|
|
1057
|
+
torch._dynamo.mark_dynamic(inputs_embeds, 0)
|
|
1058
|
+
else:
|
|
1059
|
+
torch._dynamo.mark_dynamic(input_ids, 0)
|
|
1060
|
+
torch._dynamo.mark_dynamic(position_ids, 0)
|
|
1061
|
+
torch._dynamo.mark_dynamic(attn_metadata.slot_mapping, 0)
|
|
1062
|
+
|
|
1063
|
+
layer_names = get_layers_from_vllm_config(self.vllm_config,
|
|
1064
|
+
Attention).keys()
|
|
1065
|
+
per_layer_attn_metadata = {
|
|
1066
|
+
layer_name: attn_metadata
|
|
1067
|
+
for layer_name in layer_names
|
|
1068
|
+
}
|
|
1069
|
+
|
|
1070
|
+
with self.maybe_select_dummy_loras(
|
|
1071
|
+
self.lora_config,
|
|
1072
|
+
np.array([num_tokens], dtype=np.int32)), set_forward_context(
|
|
1073
|
+
per_layer_attn_metadata, self.vllm_config, 0):
|
|
1074
|
+
out = self.model(input_ids=input_ids,
|
|
1075
|
+
positions=position_ids,
|
|
1076
|
+
inputs_embeds=inputs_embeds)
|
|
1077
|
+
self._hidden_states_dtype = out.dtype
|
|
1078
|
+
|
|
1079
|
+
def _set_active_loras(self, prompt_lora_mapping, token_lora_mapping,
|
|
1080
|
+
lora_requests) -> None:
|
|
1081
|
+
xm.mark_step() # Captures input updates
|
|
1082
|
+
super()._set_active_loras(prompt_lora_mapping, token_lora_mapping,
|
|
1083
|
+
lora_requests)
|
|
1084
|
+
xm.mark_step() # Captures metadata updates
|
|
1085
|
+
|
|
1086
|
+
def _precompile_mm_encoder(self) -> None:
|
|
1087
|
+
# Pre-compile MM encoder for all supported data modalities.
|
|
1088
|
+
hf_config = self.vllm_config.model_config.hf_config
|
|
1089
|
+
for mode, max_items_by_mode in \
|
|
1090
|
+
self.max_num_mm_items_by_modality.items():
|
|
1091
|
+
logger.info(
|
|
1092
|
+
"Compiling Multimodal %s Encoder with different input"
|
|
1093
|
+
" shapes.", mode)
|
|
1094
|
+
start = time.perf_counter()
|
|
1095
|
+
# No padding for MM encoder just yet.
|
|
1096
|
+
for num_items in range(1, max_items_by_mode + 1):
|
|
1097
|
+
logger.info(" -- mode: %s items: %d", mode, num_items)
|
|
1098
|
+
batched_dummy_mm_inputs = self._get_mm_dummy_batch(
|
|
1099
|
+
mode, num_items)
|
|
1100
|
+
# Run multimodal encoder.
|
|
1101
|
+
xm.mark_step()
|
|
1102
|
+
mm_embeds = self.model.\
|
|
1103
|
+
get_multimodal_embeddings(**batched_dummy_mm_inputs)
|
|
1104
|
+
xm.mark_step()
|
|
1105
|
+
num_patches = mm_embeds[0].shape[0]
|
|
1106
|
+
items_size = num_patches * num_items
|
|
1107
|
+
|
|
1108
|
+
# NOTE (NickLucche) pre-compile `get_input_embeddings` when mm
|
|
1109
|
+
# embeddings are present. We assume `--disable-mm-chunked`,
|
|
1110
|
+
# hence only whole items can be scheduled. This implies we just
|
|
1111
|
+
# need to compile when `num_items` fit the (padded) `input_ids`
|
|
1112
|
+
for num_tokens in self.num_tokens_paddings:
|
|
1113
|
+
if num_tokens >= items_size:
|
|
1114
|
+
# XLA Workaround: if torch.zeros(..device) is used, XLA
|
|
1115
|
+
# compiles a scalar+expansion op, which won't match
|
|
1116
|
+
# the graph generated at runtime. CPU->TPU must be used
|
|
1117
|
+
placeholders_ids = torch.zeros(num_tokens,
|
|
1118
|
+
dtype=torch.int32,
|
|
1119
|
+
device="cpu")
|
|
1120
|
+
# Align placeholders and actual num mm_embeddings.
|
|
1121
|
+
placeholders_ids[:items_size] = \
|
|
1122
|
+
hf_config.image_token_index
|
|
1123
|
+
|
|
1124
|
+
placeholders_ids = placeholders_ids.to(self.device)
|
|
1125
|
+
# Assign outputs or the graph will be cut short.
|
|
1126
|
+
a, b = self._get_model_inputs(placeholders_ids,
|
|
1127
|
+
[mm_embeds])
|
|
1128
|
+
assert a is None
|
|
1129
|
+
xm.mark_step()
|
|
1130
|
+
|
|
1131
|
+
# Pre-compile `get_input_embeddings` when mm_embeddings are not
|
|
1132
|
+
# present. Chunk is only made of text, no mm_placeholders.
|
|
1133
|
+
for num_tokens in self.num_tokens_paddings:
|
|
1134
|
+
placeholders_ids = torch.zeros(num_tokens,
|
|
1135
|
+
dtype=torch.int32,
|
|
1136
|
+
device="cpu")
|
|
1137
|
+
placeholders_ids = placeholders_ids.to(self.device)
|
|
1138
|
+
a, b = self._get_model_inputs(placeholders_ids, [])
|
|
1139
|
+
assert a is None
|
|
1140
|
+
xm.mark_step()
|
|
1141
|
+
|
|
1142
|
+
xm.wait_device_ops()
|
|
1143
|
+
end = time.perf_counter()
|
|
1144
|
+
logger.info(
|
|
1145
|
+
"Multimodal %s Encoder compilation finished in in %.2f "
|
|
1146
|
+
"[secs].", mode, end - start)
|
|
1147
|
+
|
|
1148
|
+
def _precompile_backbone(self) -> None:
|
|
1149
|
+
logger.info("Compiling the model with different input shapes.")
|
|
1150
|
+
start = time.perf_counter()
|
|
1151
|
+
for num_tokens in self.num_tokens_paddings:
|
|
1152
|
+
logger.info(" -- num_tokens: %d", num_tokens)
|
|
1153
|
+
self._dummy_run(num_tokens)
|
|
1154
|
+
xm.wait_device_ops()
|
|
1155
|
+
end = time.perf_counter()
|
|
1156
|
+
logger.info("Compilation finished in %.2f [secs].", end - start)
|
|
1157
|
+
self._update_num_xla_graphs("model backbone")
|
|
1158
|
+
|
|
1159
|
+
def _precompile_select_hidden_states(self) -> None:
|
|
1160
|
+
# Compile hidden state selection function for bucketed
|
|
1161
|
+
# n_tokens x max_num_reqs. Graph is really small so this is fine.
|
|
1162
|
+
logger.info(
|
|
1163
|
+
"Compiling select_hidden_states with different input shapes.")
|
|
1164
|
+
start = time.perf_counter()
|
|
1165
|
+
hsize = self.model_config.get_hidden_size()
|
|
1166
|
+
for num_tokens in self.num_tokens_paddings:
|
|
1167
|
+
dummy_hidden = torch.zeros((num_tokens, hsize),
|
|
1168
|
+
device=self.device,
|
|
1169
|
+
dtype=self._hidden_states_dtype)
|
|
1170
|
+
torch._dynamo.mark_dynamic(dummy_hidden, 0)
|
|
1171
|
+
for num_reqs in self.num_reqs_paddings:
|
|
1172
|
+
indices = torch.zeros(num_reqs,
|
|
1173
|
+
dtype=torch.int32,
|
|
1174
|
+
device=self.device)
|
|
1175
|
+
torch._dynamo.mark_dynamic(indices, 0)
|
|
1176
|
+
self.select_hidden_states(dummy_hidden, indices)
|
|
1177
|
+
logger.info(" -- num_tokens: %d, num_seqs: %d", num_tokens,
|
|
1178
|
+
num_reqs)
|
|
1179
|
+
# Requests can't be more than tokens. But do compile for the
|
|
1180
|
+
# next bigger value in case num_tokens uses bucketed padding.
|
|
1181
|
+
if num_reqs >= min(num_tokens, self.max_num_reqs):
|
|
1182
|
+
break
|
|
1183
|
+
xm.wait_device_ops()
|
|
1184
|
+
end = time.perf_counter()
|
|
1185
|
+
logger.info("Compilation finished in %.2f [secs].", end - start)
|
|
1186
|
+
self._update_num_xla_graphs("select_hidden_states")
|
|
1187
|
+
|
|
1188
|
+
def _precompile_compute_logits(self) -> None:
|
|
1189
|
+
logger.info("Compiling compute_logits with different input shapes.")
|
|
1190
|
+
start = time.perf_counter()
|
|
1191
|
+
hsize = self.model_config.get_hidden_size()
|
|
1192
|
+
for num_reqs in self.num_reqs_paddings:
|
|
1193
|
+
dummy_hidden = torch.zeros((num_reqs, hsize),
|
|
1194
|
+
device=self.device,
|
|
1195
|
+
dtype=self._hidden_states_dtype)
|
|
1196
|
+
torch._dynamo.mark_dynamic(dummy_hidden, 0)
|
|
1197
|
+
self.compute_logits(dummy_hidden)
|
|
1198
|
+
logger.info(" -- num_seqs: %d", num_reqs)
|
|
1199
|
+
xm.wait_device_ops()
|
|
1200
|
+
end = time.perf_counter()
|
|
1201
|
+
logger.info("Compilation finished in %.2f [secs].", end - start)
|
|
1202
|
+
self._update_num_xla_graphs("compute_logits")
|
|
1203
|
+
|
|
1204
|
+
def _precompile_structured_decoding(self) -> None:
|
|
1205
|
+
logger.info(
|
|
1206
|
+
"Compiling structured_decoding with different input shapes.")
|
|
1207
|
+
start = time.perf_counter()
|
|
1208
|
+
for num_reqs in self.num_reqs_paddings:
|
|
1209
|
+
dummy_logits = torch.zeros((num_reqs, self.vocab_size),
|
|
1210
|
+
device=self.device,
|
|
1211
|
+
dtype=self._hidden_states_dtype)
|
|
1212
|
+
dummy_require_struct_decoding = \
|
|
1213
|
+
self.require_structured_out_cpu[:num_reqs].to(self.device)
|
|
1214
|
+
dummy_grammar_bitmask = \
|
|
1215
|
+
self.grammar_bitmask_cpu[:num_reqs].to(self.device)
|
|
1216
|
+
# The first dimension of the above 3 dummy tensors cannot be
|
|
1217
|
+
# mark_dynamic because some operations in structured_decode require
|
|
1218
|
+
# them to be static.
|
|
1219
|
+
arange = self.structured_decode_arange.to(self.device)
|
|
1220
|
+
self.structured_decode(dummy_require_struct_decoding,
|
|
1221
|
+
dummy_grammar_bitmask, dummy_logits, arange)
|
|
1222
|
+
logger.info(" -- num_seqs: %d", num_reqs)
|
|
1223
|
+
xm.wait_device_ops()
|
|
1224
|
+
end = time.perf_counter()
|
|
1225
|
+
logger.info("Compilation finished in %.2f [secs].", end - start)
|
|
1226
|
+
self._update_num_xla_graphs("structured_decoding")
|
|
1227
|
+
|
|
1228
|
+
def _precompile_sample_from_logits(self) -> None:
|
|
1229
|
+
logger.info(
|
|
1230
|
+
"Compiling sample_from_logits with different input shapes.")
|
|
1231
|
+
start = time.perf_counter()
|
|
1232
|
+
for num_reqs in self.num_reqs_paddings:
|
|
1233
|
+
dummy_logits = torch.zeros((num_reqs, self.vocab_size),
|
|
1234
|
+
device=self.device,
|
|
1235
|
+
dtype=self._hidden_states_dtype)
|
|
1236
|
+
# The first dimension of dummy_logits cannot be mark_dynamic
|
|
1237
|
+
# because some operations in the sampler require it to be static.
|
|
1238
|
+
for all_greedy in [False, True]:
|
|
1239
|
+
generate_params_if_all_greedy = not all_greedy
|
|
1240
|
+
sampling_metadata = (
|
|
1241
|
+
TPUSupportedSamplingMetadata.from_input_batch(
|
|
1242
|
+
self.input_batch,
|
|
1243
|
+
num_reqs,
|
|
1244
|
+
self.device,
|
|
1245
|
+
generate_params_if_all_greedy,
|
|
1246
|
+
))
|
|
1247
|
+
sampling_metadata.all_greedy = all_greedy
|
|
1248
|
+
with self.maybe_select_dummy_loras(
|
|
1249
|
+
self.lora_config, np.array([num_reqs],
|
|
1250
|
+
dtype=np.int32)):
|
|
1251
|
+
self.sample_from_logits_func(dummy_logits,
|
|
1252
|
+
sampling_metadata)
|
|
1253
|
+
logger.info(" -- num_seqs: %d", num_reqs)
|
|
1254
|
+
xm.wait_device_ops()
|
|
1255
|
+
end = time.perf_counter()
|
|
1256
|
+
logger.info("Compilation finished in %.2f [secs].", end - start)
|
|
1257
|
+
self._update_num_xla_graphs("sample_from_logits")
|
|
1258
|
+
|
|
1259
|
+
def _precompile_gather_logprobs(self) -> None:
|
|
1260
|
+
logger.info("Compiling gather_logprobs with different input shapes.")
|
|
1261
|
+
start = time.perf_counter()
|
|
1262
|
+
for num_reqs in self.num_reqs_paddings:
|
|
1263
|
+
dummy_logits = torch.zeros((num_reqs, self.vocab_size),
|
|
1264
|
+
device=self.device,
|
|
1265
|
+
dtype=self._hidden_states_dtype)
|
|
1266
|
+
dummy_tokens = torch.zeros((num_reqs, 1),
|
|
1267
|
+
dtype=torch.int64).to(self.device)
|
|
1268
|
+
with self.maybe_select_dummy_loras(
|
|
1269
|
+
self.lora_config, np.array([num_reqs], dtype=np.int32)):
|
|
1270
|
+
self.gather_logprobs(dummy_logits, dummy_tokens)
|
|
1271
|
+
logger.info(" -- num_seqs: %d", num_reqs)
|
|
1272
|
+
xm.wait_device_ops()
|
|
1273
|
+
end = time.perf_counter()
|
|
1274
|
+
logger.info("Compilation finished in %.2f [secs].", end - start)
|
|
1275
|
+
self._update_num_xla_graphs("gather_logprobs")
|
|
1276
|
+
|
|
1277
|
+
def capture_model(self) -> None:
|
|
1278
|
+
"""
|
|
1279
|
+
Precompile all the subgraphs with possible input shapes.
|
|
1280
|
+
"""
|
|
1281
|
+
with self.maybe_setup_dummy_loras(self.lora_config):
|
|
1282
|
+
self._precompile_mm_encoder()
|
|
1283
|
+
self._precompile_backbone()
|
|
1284
|
+
self._precompile_select_hidden_states()
|
|
1285
|
+
self._precompile_compute_logits()
|
|
1286
|
+
self._precompile_structured_decoding()
|
|
1287
|
+
self._precompile_sample_from_logits()
|
|
1288
|
+
self._precompile_gather_logprobs()
|
|
1289
|
+
|
|
1290
|
+
def profile_run(
|
|
1291
|
+
self,
|
|
1292
|
+
num_tokens: int,
|
|
1293
|
+
) -> None:
|
|
1294
|
+
# Profile with multimodal encoder & encoder cache.
|
|
1295
|
+
# TODO: handle encoder-decoder models once we support them.
|
|
1296
|
+
if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
|
|
1297
|
+
and self.encoder_cache_size > 0):
|
|
1298
|
+
|
|
1299
|
+
# NOTE: Currently model is profiled with a single non-text
|
|
1300
|
+
# modality with the max possible input tokens even when
|
|
1301
|
+
# it supports multiple.
|
|
1302
|
+
dummy_data_modality, max_num_mm_items = max(
|
|
1303
|
+
self.max_num_mm_items_by_modality.items(), key=lambda t: t[1])
|
|
1304
|
+
|
|
1305
|
+
encoder_budget = min(self.max_num_encoder_input_tokens,
|
|
1306
|
+
self.encoder_cache_size)
|
|
1307
|
+
|
|
1308
|
+
logger.info(
|
|
1309
|
+
"Encoder cache will be initialized with a budget of %d tokens,"
|
|
1310
|
+
" and profiled with %s %s items of the maximum feature size.",
|
|
1311
|
+
encoder_budget, max_num_mm_items, dummy_data_modality)
|
|
1312
|
+
|
|
1313
|
+
# Create dummy batch of multimodal inputs.
|
|
1314
|
+
batched_dummy_mm_inputs = self._get_mm_dummy_batch(
|
|
1315
|
+
dummy_data_modality, max_num_mm_items)
|
|
1316
|
+
|
|
1317
|
+
# Run multimodal encoder.
|
|
1318
|
+
# Isolate encoder graph from post-processing to minimize
|
|
1319
|
+
# impact of recompilation until it's fixed.
|
|
1320
|
+
start = time.perf_counter()
|
|
1321
|
+
xm.mark_step()
|
|
1322
|
+
dummy_encoder_outputs = self.model.get_multimodal_embeddings(
|
|
1323
|
+
**batched_dummy_mm_inputs)
|
|
1324
|
+
xm.mark_step()
|
|
1325
|
+
xm.wait_device_ops()
|
|
1326
|
+
end = time.perf_counter()
|
|
1327
|
+
logger.info(
|
|
1328
|
+
"Multimodal Encoder profiling finished in in %.2f [secs].",
|
|
1329
|
+
end - start)
|
|
1330
|
+
|
|
1331
|
+
assert len(dummy_encoder_outputs) == max_num_mm_items, (
|
|
1332
|
+
"Expected dimension 0 of encoder outputs to match the number "
|
|
1333
|
+
f"of multimodal data items: {max_num_mm_items}, got "
|
|
1334
|
+
f"{len(dummy_encoder_outputs)=} instead. This is most likely "
|
|
1335
|
+
"due to the 'get_multimodal_embeddings' method of the model "
|
|
1336
|
+
"not implemented correctly.")
|
|
1337
|
+
|
|
1338
|
+
# Cache the dummy encoder outputs.
|
|
1339
|
+
self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs))
|
|
1340
|
+
|
|
1341
|
+
# Trigger compilation for general shape.
|
|
1342
|
+
self._dummy_run(num_tokens)
|
|
1343
|
+
|
|
1344
|
+
xm.mark_step()
|
|
1345
|
+
xm.wait_device_ops()
|
|
1346
|
+
self.encoder_cache.clear()
|
|
1347
|
+
gc.collect()
|
|
1348
|
+
|
|
1349
|
+
def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
|
|
1350
|
+
"""
|
|
1351
|
+
Initialize KV cache based on `kv_cache_config`.
|
|
1352
|
+
Args:
|
|
1353
|
+
kv_cache_config: Configuration for the KV cache, including the KV
|
|
1354
|
+
cache size of each layer
|
|
1355
|
+
"""
|
|
1356
|
+
if len(kv_cache_config.kv_cache_groups) > 1:
|
|
1357
|
+
raise NotImplementedError(
|
|
1358
|
+
"Hybrid models with more than one KV cache type are not "
|
|
1359
|
+
"supported yet.")
|
|
1360
|
+
|
|
1361
|
+
if kv_cache_config.kv_cache_groups[
|
|
1362
|
+
0].kv_cache_spec.block_size != self.block_size:
|
|
1363
|
+
self.input_batch = InputBatch(
|
|
1364
|
+
max_num_reqs=self.max_num_reqs,
|
|
1365
|
+
max_model_len=self.max_model_len,
|
|
1366
|
+
max_num_batched_tokens=self.max_num_tokens,
|
|
1367
|
+
device=self.device,
|
|
1368
|
+
pin_memory=self.pin_memory,
|
|
1369
|
+
vocab_size=self.model_config.get_vocab_size(),
|
|
1370
|
+
block_sizes=[
|
|
1371
|
+
kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size
|
|
1372
|
+
],
|
|
1373
|
+
)
|
|
1374
|
+
# Verify dtype compatibility between block_table_cpu and input_batch
|
|
1375
|
+
assert self.block_table_cpu.dtype == self.input_batch.block_table[
|
|
1376
|
+
0].get_cpu_tensor().dtype
|
|
1377
|
+
|
|
1378
|
+
kv_cache_sizes = {}
|
|
1379
|
+
for kv_cache_tensor in kv_cache_config.kv_cache_tensors:
|
|
1380
|
+
assert len(kv_cache_tensor.shared_by) == 1, (
|
|
1381
|
+
"KV cache tensor shared by multiple layers is not supported in "
|
|
1382
|
+
"TPU.")
|
|
1383
|
+
kv_cache_sizes[kv_cache_tensor.shared_by[0]] = kv_cache_tensor.size
|
|
1384
|
+
|
|
1385
|
+
kv_caches: dict[str, torch.Tensor] = {}
|
|
1386
|
+
for kv_cache_group in kv_cache_config.kv_cache_groups:
|
|
1387
|
+
kv_cache_spec = kv_cache_group.kv_cache_spec
|
|
1388
|
+
for layer_name in kv_cache_group.layer_names:
|
|
1389
|
+
tensor_size = kv_cache_sizes[layer_name]
|
|
1390
|
+
assert tensor_size % kv_cache_spec.page_size_bytes == 0
|
|
1391
|
+
num_blocks = tensor_size // kv_cache_spec.page_size_bytes # noqa
|
|
1392
|
+
if isinstance(kv_cache_spec, AttentionSpec):
|
|
1393
|
+
if self.use_spmd:
|
|
1394
|
+
num_kv_heads = kv_cache_spec.num_kv_heads
|
|
1395
|
+
assert self.original_parallel_config is not None
|
|
1396
|
+
tp_size = \
|
|
1397
|
+
self.original_parallel_config.tensor_parallel_size
|
|
1398
|
+
# TODO: Handle kv cache duplication under SPMD mode.
|
|
1399
|
+
assert num_kv_heads % tp_size == 0, (
|
|
1400
|
+
f"num_kv_heads {num_kv_heads} must be divisible by "
|
|
1401
|
+
f"tp_size {tp_size} under SPMD mode")
|
|
1402
|
+
kv_cache_shape = PallasAttentionBackend.get_kv_cache_shape(
|
|
1403
|
+
num_blocks, kv_cache_spec.block_size,
|
|
1404
|
+
kv_cache_spec.num_kv_heads, kv_cache_spec.head_size)
|
|
1405
|
+
dtype = kv_cache_spec.dtype
|
|
1406
|
+
|
|
1407
|
+
tpu_kv_cache = torch.zeros(kv_cache_shape,
|
|
1408
|
+
dtype=dtype).to(self.device)
|
|
1409
|
+
|
|
1410
|
+
kv_caches[layer_name] = tpu_kv_cache
|
|
1411
|
+
else:
|
|
1412
|
+
raise NotImplementedError
|
|
1413
|
+
|
|
1414
|
+
# Setup `kv_cache_config` and `kv_caches` for models
|
|
1415
|
+
# with cross-layer KV sharing
|
|
1416
|
+
if self.shared_kv_cache_layers:
|
|
1417
|
+
initialize_kv_cache_for_kv_sharing(
|
|
1418
|
+
self.shared_kv_cache_layers,
|
|
1419
|
+
kv_cache_config.kv_cache_groups,
|
|
1420
|
+
kv_caches,
|
|
1421
|
+
)
|
|
1422
|
+
|
|
1423
|
+
bind_kv_cache(
|
|
1424
|
+
kv_caches,
|
|
1425
|
+
self.vllm_config.compilation_config.static_forward_context,
|
|
1426
|
+
self.kv_caches)
|
|
1427
|
+
|
|
1428
|
+
if self.use_spmd:
|
|
1429
|
+
# Shard KV Cache
|
|
1430
|
+
for cache in self.kv_caches:
|
|
1431
|
+
xs.mark_sharding(cache, self.mesh, (None, 'x', None, None))
|
|
1432
|
+
|
|
1433
|
+
def reset_dynamo_cache(self):
|
|
1434
|
+
if self.is_multimodal_model:
|
|
1435
|
+
compiled_model = self.model.get_language_model().model
|
|
1436
|
+
else:
|
|
1437
|
+
compiled_model = self.model.model
|
|
1438
|
+
if isinstance(compiled_model, TorchCompileWrapperWithCustomDispatcher):
|
|
1439
|
+
logger.info("Clear dynamo cache and cached dynamo bytecode.")
|
|
1440
|
+
torch._dynamo.eval_frame.remove_from_cache(
|
|
1441
|
+
compiled_model.original_code_object)
|
|
1442
|
+
compiled_model.compiled_codes.clear()
|
|
1443
|
+
|
|
1444
|
+
@torch.compile(backend="openxla", fullgraph=True, dynamic=False)
|
|
1445
|
+
def select_hidden_states(self, hidden_states, indices_do_sample):
|
|
1446
|
+
return hidden_states[indices_do_sample]
|
|
1447
|
+
|
|
1448
|
+
@torch.compile(backend="openxla", fullgraph=True, dynamic=False)
|
|
1449
|
+
def compute_logits(self,
|
|
1450
|
+
sample_hidden_states: torch.Tensor) -> torch.Tensor:
|
|
1451
|
+
return self.model.compute_logits(sample_hidden_states, None)
|
|
1452
|
+
|
|
1453
|
+
# TODO: Under SPMD mode, sample_from_logits has correctness issue.
|
|
1454
|
+
# Re-enable the torch.compile once the issue is fixed in torchxla.
|
|
1455
|
+
# @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
|
|
1456
|
+
def sample_from_logits(
|
|
1457
|
+
self, logits: torch.Tensor,
|
|
1458
|
+
sampling_metadata: TPUSupportedSamplingMetadata) -> torch.Tensor:
|
|
1459
|
+
"""
|
|
1460
|
+
Sample with xla-friendly function. This function is to be traced
|
|
1461
|
+
separately from `forward` for lighter compilation overhead.
|
|
1462
|
+
"""
|
|
1463
|
+
if sampling_metadata.all_greedy:
|
|
1464
|
+
out_tokens = torch.argmax(logits, dim=-1, keepdim=True)
|
|
1465
|
+
else:
|
|
1466
|
+
out_tokens = self.sampler(logits,
|
|
1467
|
+
sampling_metadata).sampled_token_ids
|
|
1468
|
+
return out_tokens
|
|
1469
|
+
|
|
1470
|
+
@torch.compile(backend="openxla", fullgraph=True, dynamic=False)
|
|
1471
|
+
def gather_logprobs(self, logits: torch.Tensor,
|
|
1472
|
+
sampled_tokens: torch.Tensor) -> LogprobsTensors:
|
|
1473
|
+
"""
|
|
1474
|
+
Gather the top_logprobs with corresponding tokens. Use a fixed number
|
|
1475
|
+
of logprobs as an alternative to having multiple pre-compiled graphs.
|
|
1476
|
+
Select the number of logprobs actually demanded by each request on CPU.
|
|
1477
|
+
"""
|
|
1478
|
+
logprobs = self.sampler.compute_logprobs(logits)
|
|
1479
|
+
return self.sampler.gather_logprobs(
|
|
1480
|
+
logprobs,
|
|
1481
|
+
self.model_config.max_logprobs,
|
|
1482
|
+
token_ids=sampled_tokens.squeeze(-1))
|
|
1483
|
+
|
|
1484
|
+
@torch.compile(backend="openxla", fullgraph=True, dynamic=False)
|
|
1485
|
+
def structured_decode(self, require_struct_decoding: torch.Tensor,
|
|
1486
|
+
grammar_bitmask: torch.Tensor, logits: torch.Tensor,
|
|
1487
|
+
arange: torch.Tensor) -> torch.Tensor:
|
|
1488
|
+
return torch.where(
|
|
1489
|
+
require_struct_decoding,
|
|
1490
|
+
self.apply_grammar_bitmask(logits, grammar_bitmask, arange),
|
|
1491
|
+
logits)
|
|
1492
|
+
|
|
1493
|
+
def apply_grammar_bitmask(self, logits: torch.Tensor,
|
|
1494
|
+
grammar_bitmask: torch.Tensor,
|
|
1495
|
+
arange: torch.Tensor):
|
|
1496
|
+
assert (logits.shape[0] == grammar_bitmask.shape[0])
|
|
1497
|
+
logits_cloned = logits.clone()
|
|
1498
|
+
for i in range(logits.shape[0]):
|
|
1499
|
+
unpacked_bitmask = (torch.bitwise_right_shift(
|
|
1500
|
+
grammar_bitmask[i][:, None], arange[None, :]) & 1) == 0
|
|
1501
|
+
unpacked_bitmask = unpacked_bitmask.reshape(-1)[:self.vocab_size]
|
|
1502
|
+
logits_cloned[i] = logits_cloned[i].masked_fill(
|
|
1503
|
+
unpacked_bitmask, -float("inf"))
|
|
1504
|
+
return logits_cloned
|
|
1505
|
+
|
|
1506
|
+
def get_multimodal_embeddings(self, *args, **kwargs):
|
|
1507
|
+
return self.model.get_multimodal_embeddings(*args, **kwargs)
|
|
1508
|
+
|
|
1509
|
+
def get_input_embeddings(self, *args, **kwargs):
|
|
1510
|
+
return self.model.get_input_embeddings(*args, **kwargs)
|
|
1511
|
+
|
|
1512
|
+
def prepare_structured_decoding_input(
|
|
1513
|
+
self, logits: torch.Tensor, scheduler_output: "SchedulerOutput"
|
|
1514
|
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
|
1515
|
+
grammar_bitmask = scheduler_output.grammar_bitmask
|
|
1516
|
+
assert grammar_bitmask is not None
|
|
1517
|
+
num_reqs, _ = logits.shape
|
|
1518
|
+
|
|
1519
|
+
# Reset pre-allocated tensors
|
|
1520
|
+
self.grammar_bitmask_cpu.zero_()
|
|
1521
|
+
self.require_structured_out_cpu.zero_()
|
|
1522
|
+
|
|
1523
|
+
# We receive the structured output bitmask from the scheduler, but the
|
|
1524
|
+
# indices of the requests in the batch may not match the indices of
|
|
1525
|
+
# the bitmask since the scheduler doesn't know how the tpu runner is
|
|
1526
|
+
# ordering the requests in the batch. We need to match the order of
|
|
1527
|
+
# bitmask with the order of requests
|
|
1528
|
+
struct_out_indices: list[int] = []
|
|
1529
|
+
mask_indices: list[int] = []
|
|
1530
|
+
for req_id in self.input_batch.req_ids:
|
|
1531
|
+
mask_index = scheduler_output.structured_output_request_ids.get(
|
|
1532
|
+
req_id)
|
|
1533
|
+
if mask_index is None:
|
|
1534
|
+
continue
|
|
1535
|
+
batch_index = self.input_batch.req_id_to_index[req_id]
|
|
1536
|
+
struct_out_indices.append(batch_index)
|
|
1537
|
+
mask_indices.append(mask_index)
|
|
1538
|
+
self.grammar_bitmask_cpu[struct_out_indices] = torch.from_numpy(
|
|
1539
|
+
grammar_bitmask[mask_indices])
|
|
1540
|
+
# It's not guaranteed that all requests in this batch require
|
|
1541
|
+
# structured output, so create a bool tensor to represent
|
|
1542
|
+
# the requests that need structured output.
|
|
1543
|
+
struct_out_indices = torch.tensor(struct_out_indices, dtype=torch.long)
|
|
1544
|
+
self.require_structured_out_cpu[struct_out_indices] = True
|
|
1545
|
+
return self.require_structured_out_cpu[:num_reqs].to(logits.device), \
|
|
1546
|
+
self.grammar_bitmask_cpu[:num_reqs].to(logits.device), \
|
|
1547
|
+
self.structured_decode_arange.to(logits.device)
|
|
1548
|
+
|
|
1549
|
+
def _get_mm_dummy_batch(self, modality: str,
|
|
1550
|
+
batch_size: int) -> BatchedTensorInputs:
|
|
1551
|
+
# Dummy data for pre-compiling multimodal models.
|
|
1552
|
+
dummy_request_data = self.mm_registry.get_decoder_dummy_data(
|
|
1553
|
+
model_config=self.model_config,
|
|
1554
|
+
seq_len=self.max_num_tokens,
|
|
1555
|
+
)
|
|
1556
|
+
dummy_mm_data = dummy_request_data.multi_modal_data
|
|
1557
|
+
|
|
1558
|
+
# Dummy data definition in V0 may contain multiple multimodal items
|
|
1559
|
+
# (e.g, multiple images) for a single request, therefore here we
|
|
1560
|
+
# always replicate first item by max_num_mm_items times since in V1
|
|
1561
|
+
# they are scheduled to be processed separately.
|
|
1562
|
+
assert isinstance(dummy_mm_data, MultiModalKwargs), (
|
|
1563
|
+
"Expected dummy multimodal data to be of type "
|
|
1564
|
+
f"MultiModalKwargs, got {type(dummy_mm_data)=} instead. "
|
|
1565
|
+
"This is most likely due to the model not having a merged "
|
|
1566
|
+
"processor.")
|
|
1567
|
+
|
|
1568
|
+
# When models have a merged processor, their dummy data is
|
|
1569
|
+
# already batched `MultiModalKwargs`, therefore we take the first
|
|
1570
|
+
# `MultiModalKwargsItem` from the desired modality to profile on.
|
|
1571
|
+
dummy_mm_item = dummy_mm_data.get_item(modality=modality, item_index=0)
|
|
1572
|
+
dummy_mm_kwargs = MultiModalKwargs.from_items([dummy_mm_item])
|
|
1573
|
+
|
|
1574
|
+
batched_dummy_mm_inputs = MultiModalKwargs.batch([dummy_mm_kwargs] *
|
|
1575
|
+
batch_size)
|
|
1576
|
+
return MultiModalKwargs.as_kwargs(
|
|
1577
|
+
batched_dummy_mm_inputs,
|
|
1578
|
+
device=self.device,
|
|
1579
|
+
)
|
|
1580
|
+
|
|
1581
|
+
|
|
1582
|
+
def _get_req_paddings(min_req_size: int, max_req_size: int) -> list[int]:
|
|
1583
|
+
logger.info("Preparing request paddings:")
|
|
1584
|
+
# assert min_req_size is power of 2
|
|
1585
|
+
assert (min_req_size & (min_req_size - 1) == 0) and min_req_size > 0
|
|
1586
|
+
paddings: list = []
|
|
1587
|
+
num = max(MIN_NUM_SEQS, min_req_size)
|
|
1588
|
+
while num <= max_req_size and (len(paddings) == 0 or paddings[-1] != num):
|
|
1589
|
+
paddings.append(num)
|
|
1590
|
+
logger.info(" %d", num)
|
|
1591
|
+
num = _get_padded_num_reqs_with_upper_limit(num + 1, max_req_size)
|
|
1592
|
+
return paddings
|
|
1593
|
+
|
|
1594
|
+
|
|
1595
|
+
def _get_padded_num_reqs_with_upper_limit(x: int, upper_limit: int) -> int:
|
|
1596
|
+
res = MIN_NUM_SEQS if x <= MIN_NUM_SEQS else 1 << (x - 1).bit_length()
|
|
1597
|
+
return min(res, upper_limit)
|
|
1598
|
+
|
|
1599
|
+
|
|
1600
|
+
def _get_token_paddings(min_token_size: int, max_token_size: int,
|
|
1601
|
+
padding_gap: int) -> list[int]:
|
|
1602
|
+
"""Generate a list of padding size, starting from min_token_size,
|
|
1603
|
+
ending with a number that can cover max_token_size
|
|
1604
|
+
|
|
1605
|
+
If padding_gap == 0 then:
|
|
1606
|
+
increase 2X each time (exponential)
|
|
1607
|
+
else:
|
|
1608
|
+
first increase the size to twice,
|
|
1609
|
+
then increase the padding size by padding_gap.
|
|
1610
|
+
"""
|
|
1611
|
+
# assert min_token_size is power of 2
|
|
1612
|
+
assert (min_token_size & (min_token_size - 1) == 0) and min_token_size > 0
|
|
1613
|
+
paddings = []
|
|
1614
|
+
num = min_token_size
|
|
1615
|
+
|
|
1616
|
+
if padding_gap == 0:
|
|
1617
|
+
logger.info("Using exponential token paddings:")
|
|
1618
|
+
while True:
|
|
1619
|
+
logger.info(" %d", num)
|
|
1620
|
+
paddings.append(num)
|
|
1621
|
+
if num >= max_token_size:
|
|
1622
|
+
break
|
|
1623
|
+
num *= 2
|
|
1624
|
+
else:
|
|
1625
|
+
logger.info("Using incremental token paddings:")
|
|
1626
|
+
while num <= padding_gap:
|
|
1627
|
+
logger.info(" %d", num)
|
|
1628
|
+
paddings.append(num)
|
|
1629
|
+
num *= 2
|
|
1630
|
+
num //= 2
|
|
1631
|
+
while num < max_token_size:
|
|
1632
|
+
num += padding_gap
|
|
1633
|
+
logger.info(" %d", num)
|
|
1634
|
+
paddings.append(num)
|
|
1635
|
+
|
|
1636
|
+
return paddings
|
|
1637
|
+
|
|
1638
|
+
|
|
1639
|
+
def _get_padded_token_len(paddings: list[int], x: int) -> int:
|
|
1640
|
+
"""Return the first element in paddings list greater or equal to x.
|
|
1641
|
+
"""
|
|
1642
|
+
index = bisect.bisect_left(paddings, x)
|
|
1643
|
+
assert index < len(paddings)
|
|
1644
|
+
return paddings[index]
|
|
1645
|
+
|
|
1646
|
+
|
|
1647
|
+
def replace_set_lora(model):
|
|
1648
|
+
|
|
1649
|
+
def _tpu_set_lora(
|
|
1650
|
+
self,
|
|
1651
|
+
index: int,
|
|
1652
|
+
lora_a: torch.Tensor,
|
|
1653
|
+
lora_b: torch.Tensor,
|
|
1654
|
+
embeddings_tensor: Optional[torch.Tensor],
|
|
1655
|
+
bias: Optional[torch.Tensor] = None,
|
|
1656
|
+
):
|
|
1657
|
+
# TODO: The integer index leads to a recompilation, but converting it
|
|
1658
|
+
# to a tensor doesn't seem to work anymore. This might be fixed with a
|
|
1659
|
+
# later release of torch_xla.
|
|
1660
|
+
self._original_set_lora(index, lora_a, lora_b, embeddings_tensor, bias)
|
|
1661
|
+
xm.mark_step()
|
|
1662
|
+
|
|
1663
|
+
def _tpu_reset_lora(self, index: int):
|
|
1664
|
+
self._original_reset_lora(index)
|
|
1665
|
+
xm.mark_step()
|
|
1666
|
+
|
|
1667
|
+
for _, module in model.named_modules():
|
|
1668
|
+
if isinstance(module, BaseLayerWithLoRA):
|
|
1669
|
+
module._original_set_lora = module.set_lora
|
|
1670
|
+
module._original_reset_lora = module.reset_lora
|
|
1671
|
+
module.set_lora = _tpu_set_lora.__get__(module, module.__class__)
|
|
1672
|
+
module.reset_lora = _tpu_reset_lora.__get__(
|
|
1673
|
+
module, module.__class__)
|