vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +53 -0
- vllm/_custom_ops.py +1828 -0
- vllm/_ipex_ops.py +244 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +34 -0
- vllm/assets/video.py +115 -0
- vllm/attention/__init__.py +20 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +308 -0
- vllm/attention/backends/blocksparse_attn.py +461 -0
- vllm/attention/backends/cpu_mla.py +307 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
- vllm/attention/backends/flash_attn.py +1003 -0
- vllm/attention/backends/flashinfer.py +1104 -0
- vllm/attention/backends/flashmla.py +244 -0
- vllm/attention/backends/hpu_attn.py +313 -0
- vllm/attention/backends/ipex_attn.py +398 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1385 -0
- vllm/attention/backends/pallas.py +351 -0
- vllm/attention/backends/placeholder_attn.py +400 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +975 -0
- vllm/attention/backends/torch_sdpa.py +703 -0
- vllm/attention/backends/triton_mla.py +115 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +802 -0
- vllm/attention/layer.py +468 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
- vllm/attention/ops/blocksparse_attention/interface.py +239 -0
- vllm/attention/ops/blocksparse_attention/utils.py +246 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
- vllm/attention/ops/flashmla.py +116 -0
- vllm/attention/ops/hpu_paged_attn.py +88 -0
- vllm/attention/ops/ipex_attn.py +195 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/nki_flash_attn.py +906 -0
- vllm/attention/ops/paged_attn.py +256 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +100 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +674 -0
- vllm/attention/ops/triton_flash_attention.py +979 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +334 -0
- vllm/attention/selector.py +187 -0
- vllm/attention/utils/fa_utils.py +55 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +1185 -0
- vllm/benchmarks/endpoint_request_func.py +381 -0
- vllm/benchmarks/latency.py +168 -0
- vllm/benchmarks/serve.py +1135 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +70 -0
- vllm/collect_env.py +820 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +89 -0
- vllm/compilation/backends.py +563 -0
- vllm/compilation/base_piecewise_backend.py +72 -0
- vllm/compilation/collective_fusion.py +127 -0
- vllm/compilation/compiler_interface.py +544 -0
- vllm/compilation/counter.py +38 -0
- vllm/compilation/cuda_piecewise_backend.py +214 -0
- vllm/compilation/decorators.py +250 -0
- vllm/compilation/fix_functionalization.py +191 -0
- vllm/compilation/fusion.py +618 -0
- vllm/compilation/fx_utils.py +62 -0
- vllm/compilation/inductor_pass.py +115 -0
- vllm/compilation/monitor.py +39 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +137 -0
- vllm/compilation/pass_manager.py +78 -0
- vllm/compilation/sequence_parallelism.py +268 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +67 -0
- vllm/compilation/wrapper.py +135 -0
- vllm/config.py +4746 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +441 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +521 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +135 -0
- vllm/core/placeholder_block_space_manager.py +100 -0
- vllm/core/scheduler.py +2093 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +281 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +264 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +145 -0
- vllm/distributed/device_communicators/cuda_communicator.py +176 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
- vllm/distributed/device_communicators/hpu_communicator.py +46 -0
- vllm/distributed/device_communicators/neuron_communicator.py +20 -0
- vllm/distributed/device_communicators/pynccl.py +218 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/tpu_communicator.py +103 -0
- vllm/distributed/device_communicators/xpu_communicator.py +55 -0
- vllm/distributed/kv_events.py +356 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +12 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
- vllm/distributed/parallel_state.py +1296 -0
- vllm/distributed/tpu_distributed_utils.py +177 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1708 -0
- vllm/engine/async_llm_engine.py +1200 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +2097 -0
- vllm/engine/metrics.py +629 -0
- vllm/engine/metrics_types.py +94 -0
- vllm/engine/multiprocessing/__init__.py +148 -0
- vllm/engine/multiprocessing/client.py +681 -0
- vllm/engine/multiprocessing/engine.py +460 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +75 -0
- vllm/engine/output_processor/multi_step.py +216 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +317 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1299 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +39 -0
- vllm/entrypoints/cli/benchmark/latency.py +30 -0
- vllm/entrypoints/cli/benchmark/main.py +54 -0
- vllm/entrypoints/cli/benchmark/serve.py +30 -0
- vllm/entrypoints/cli/benchmark/throughput.py +30 -0
- vllm/entrypoints/cli/collect_env.py +35 -0
- vllm/entrypoints/cli/main.py +65 -0
- vllm/entrypoints/cli/openai.py +205 -0
- vllm/entrypoints/cli/run_batch.py +62 -0
- vllm/entrypoints/cli/serve.py +328 -0
- vllm/entrypoints/cli/types.py +25 -0
- vllm/entrypoints/launcher.py +147 -0
- vllm/entrypoints/llm.py +1544 -0
- vllm/entrypoints/logger.py +50 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1387 -0
- vllm/entrypoints/openai/cli_args.py +315 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +1913 -0
- vllm/entrypoints/openai/run_batch.py +463 -0
- vllm/entrypoints/openai/serving_chat.py +1221 -0
- vllm/entrypoints/openai/serving_classification.py +160 -0
- vllm/entrypoints/openai/serving_completion.py +592 -0
- vllm/entrypoints/openai/serving_embedding.py +201 -0
- vllm/entrypoints/openai/serving_engine.py +986 -0
- vllm/entrypoints/openai/serving_models.py +315 -0
- vllm/entrypoints/openai/serving_pooling.py +232 -0
- vllm/entrypoints/openai/serving_score.py +433 -0
- vllm/entrypoints/openai/serving_tokenization.py +157 -0
- vllm/entrypoints/openai/serving_transcription.py +424 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/score_utils.py +50 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/utils.py +233 -0
- vllm/env_override.py +41 -0
- vllm/envs.py +944 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +401 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +30 -0
- vllm/executor/multiproc_worker_utils.py +313 -0
- vllm/executor/ray_distributed_executor.py +701 -0
- vllm/executor/ray_utils.py +399 -0
- vllm/executor/uniproc_executor.py +139 -0
- vllm/forward_context.py +179 -0
- vllm/inputs/__init__.py +41 -0
- vllm/inputs/data.py +331 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +909 -0
- vllm/inputs/registry.py +237 -0
- vllm/jsontree.py +80 -0
- vllm/logger.py +212 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +85 -0
- vllm/logging_utils/formatter.py +18 -0
- vllm/logits_process.py +119 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +355 -0
- vllm/lora/layers.py +1285 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +818 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
- vllm/lora/ops/triton_ops/utils.py +120 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +136 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +485 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +290 -0
- vllm/lora/punica_wrapper/punica_hpu.py +145 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +405 -0
- vllm/lora/punica_wrapper/utils.py +164 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +240 -0
- vllm/lora/worker_manager.py +259 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +152 -0
- vllm/model_executor/guided_decoding/__init__.py +181 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
- vllm/model_executor/guided_decoding/guided_fields.py +41 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
- vllm/model_executor/guided_decoding/utils.py +242 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +369 -0
- vllm/model_executor/layers/fused_moe/__init__.py +54 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
- vllm/model_executor/layers/fused_moe/layer.py +1535 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/utils.py +98 -0
- vllm/model_executor/layers/layernorm.py +288 -0
- vllm/model_executor/layers/lightning_attn.py +652 -0
- vllm/model_executor/layers/linear.py +1524 -0
- vllm/model_executor/layers/logits_processor.py +197 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
- vllm/model_executor/layers/pooler.py +350 -0
- vllm/model_executor/layers/quantization/__init__.py +157 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/auto_round.py +310 -0
- vllm/model_executor/layers/quantization/awq.py +194 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +151 -0
- vllm/model_executor/layers/quantization/bitblas.py +461 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
- vllm/model_executor/layers/quantization/experts_int8.py +196 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +906 -0
- vllm/model_executor/layers/quantization/gguf.py +565 -0
- vllm/model_executor/layers/quantization/gptq.py +278 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/marlin.py +261 -0
- vllm/model_executor/layers/quantization/modelopt.py +737 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
- vllm/model_executor/layers/quantization/qqq.py +275 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +441 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +161 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
- vllm/model_executor/layers/rejection_sampler.py +406 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding.py +1862 -0
- vllm/model_executor/layers/sampler.py +1204 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
- vllm/model_executor/layers/utils.py +95 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +76 -0
- vllm/model_executor/model_loader/base_loader.py +43 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
- vllm/model_executor/model_loader/default_loader.py +282 -0
- vllm/model_executor/model_loader/dummy_loader.py +27 -0
- vllm/model_executor/model_loader/gguf_loader.py +120 -0
- vllm/model_executor/model_loader/neuron.py +476 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
- vllm/model_executor/model_loader/tensorizer.py +600 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
- vllm/model_executor/model_loader/tpu.py +112 -0
- vllm/model_executor/model_loader/utils.py +302 -0
- vllm/model_executor/model_loader/weight_utils.py +782 -0
- vllm/model_executor/models/__init__.py +28 -0
- vllm/model_executor/models/adapters.py +248 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +657 -0
- vllm/model_executor/models/aya_vision.py +466 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bamba.py +543 -0
- vllm/model_executor/models/bart.py +938 -0
- vllm/model_executor/models/bert.py +523 -0
- vllm/model_executor/models/bert_with_rope.py +769 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +718 -0
- vllm/model_executor/models/bloom.py +373 -0
- vllm/model_executor/models/chameleon.py +1136 -0
- vllm/model_executor/models/chatglm.py +478 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/commandr.py +472 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +472 -0
- vllm/model_executor/models/deepseek.py +486 -0
- vllm/model_executor/models/deepseek_mtp.py +269 -0
- vllm/model_executor/models/deepseek_v2.py +843 -0
- vllm/model_executor/models/deepseek_vl2.py +648 -0
- vllm/model_executor/models/eagle.py +260 -0
- vllm/model_executor/models/exaone.py +551 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +510 -0
- vllm/model_executor/models/falcon_h1.py +685 -0
- vllm/model_executor/models/florence2.py +1103 -0
- vllm/model_executor/models/fuyu.py +389 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +425 -0
- vllm/model_executor/models/gemma3.py +533 -0
- vllm/model_executor/models/gemma3_mm.py +709 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4v.py +648 -0
- vllm/model_executor/models/gpt2.py +328 -0
- vllm/model_executor/models/gpt_bigcode.py +335 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +332 -0
- vllm/model_executor/models/granite.py +493 -0
- vllm/model_executor/models/granite_speech.py +779 -0
- vllm/model_executor/models/granitemoe.py +437 -0
- vllm/model_executor/models/granitemoehybrid.py +586 -0
- vllm/model_executor/models/granitemoeshared.py +341 -0
- vllm/model_executor/models/gritlm.py +224 -0
- vllm/model_executor/models/grok1.py +546 -0
- vllm/model_executor/models/h2ovl.py +546 -0
- vllm/model_executor/models/idefics2_vision_model.py +389 -0
- vllm/model_executor/models/idefics3.py +776 -0
- vllm/model_executor/models/interfaces.py +572 -0
- vllm/model_executor/models/interfaces_base.py +164 -0
- vllm/model_executor/models/intern_vit.py +480 -0
- vllm/model_executor/models/internlm2.py +455 -0
- vllm/model_executor/models/internlm2_ve.py +147 -0
- vllm/model_executor/models/internvl.py +1418 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +592 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/llama.py +644 -0
- vllm/model_executor/models/llama4.py +532 -0
- vllm/model_executor/models/llama_eagle.py +165 -0
- vllm/model_executor/models/llama_eagle3.py +263 -0
- vllm/model_executor/models/llava.py +866 -0
- vllm/model_executor/models/llava_next.py +586 -0
- vllm/model_executor/models/llava_next_video.py +471 -0
- vllm/model_executor/models/llava_onevision.py +956 -0
- vllm/model_executor/models/mamba.py +273 -0
- vllm/model_executor/models/mamba2.py +308 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/mimo.py +192 -0
- vllm/model_executor/models/mimo_mtp.py +285 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +759 -0
- vllm/model_executor/models/minicpmv.py +1287 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1301 -0
- vllm/model_executor/models/minimax_vl_01.py +364 -0
- vllm/model_executor/models/mistral3.py +604 -0
- vllm/model_executor/models/mixtral.py +488 -0
- vllm/model_executor/models/mixtral_quant.py +453 -0
- vllm/model_executor/models/mllama.py +1624 -0
- vllm/model_executor/models/mllama4.py +938 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +331 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1568 -0
- vllm/model_executor/models/moonvit.py +630 -0
- vllm/model_executor/models/mpt.py +331 -0
- vllm/model_executor/models/nemotron.py +508 -0
- vllm/model_executor/models/nemotron_h.py +573 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +389 -0
- vllm/model_executor/models/olmo2.py +414 -0
- vllm/model_executor/models/olmoe.py +468 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +349 -0
- vllm/model_executor/models/ovis.py +567 -0
- vllm/model_executor/models/paligemma.py +398 -0
- vllm/model_executor/models/persimmon.py +344 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3_small.py +465 -0
- vllm/model_executor/models/phi3v.py +723 -0
- vllm/model_executor/models/phi4mm.py +1246 -0
- vllm/model_executor/models/phi4mm_audio.py +1233 -0
- vllm/model_executor/models/phi4mm_utils.py +1884 -0
- vllm/model_executor/models/phimoe.py +665 -0
- vllm/model_executor/models/pixtral.py +1316 -0
- vllm/model_executor/models/plamo2.py +738 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
- vllm/model_executor/models/qwen.py +362 -0
- vllm/model_executor/models/qwen2.py +497 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
- vllm/model_executor/models/qwen2_5_vl.py +1166 -0
- vllm/model_executor/models/qwen2_audio.py +410 -0
- vllm/model_executor/models/qwen2_moe.py +540 -0
- vllm/model_executor/models/qwen2_rm.py +132 -0
- vllm/model_executor/models/qwen2_vl.py +1405 -0
- vllm/model_executor/models/qwen3.py +321 -0
- vllm/model_executor/models/qwen3_moe.py +535 -0
- vllm/model_executor/models/qwen_vl.py +785 -0
- vllm/model_executor/models/registry.py +622 -0
- vllm/model_executor/models/roberta.py +276 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/skyworkr1v.py +951 -0
- vllm/model_executor/models/smolvlm.py +52 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +343 -0
- vllm/model_executor/models/starcoder2.py +356 -0
- vllm/model_executor/models/tarsier.py +643 -0
- vllm/model_executor/models/telechat2.py +140 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/transformers.py +508 -0
- vllm/model_executor/models/ultravox.py +656 -0
- vllm/model_executor/models/utils.py +731 -0
- vllm/model_executor/models/vision.py +147 -0
- vllm/model_executor/models/whisper.py +747 -0
- vllm/model_executor/models/zamba2.py +1009 -0
- vllm/model_executor/parameter.py +459 -0
- vllm/model_executor/pooling_metadata.py +72 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +77 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +106 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/hasher.py +118 -0
- vllm/multimodal/image.py +97 -0
- vllm/multimodal/inputs.py +876 -0
- vllm/multimodal/parse.py +461 -0
- vllm/multimodal/processing.py +1895 -0
- vllm/multimodal/profiling.py +258 -0
- vllm/multimodal/registry.py +331 -0
- vllm/multimodal/utils.py +436 -0
- vllm/multimodal/video.py +198 -0
- vllm/outputs.py +512 -0
- vllm/platforms/__init__.py +291 -0
- vllm/platforms/cpu.py +266 -0
- vllm/platforms/cuda.py +526 -0
- vllm/platforms/hpu.py +106 -0
- vllm/platforms/interface.py +538 -0
- vllm/platforms/neuron.py +150 -0
- vllm/platforms/rocm.py +435 -0
- vllm/platforms/tpu.py +216 -0
- vllm/platforms/xpu.py +156 -0
- vllm/plugins/__init__.py +94 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +54 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +83 -0
- vllm/prompt_adapter/models.py +358 -0
- vllm/prompt_adapter/request.py +37 -0
- vllm/prompt_adapter/utils.py +98 -0
- vllm/prompt_adapter/worker_manager.py +179 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +15 -0
- vllm/reasoning/abs_reasoning_parsers.py +192 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/sampling_params.py +602 -0
- vllm/scalar_type.py +347 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1568 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +506 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +99 -0
- vllm/spec_decode/medusa_worker.py +138 -0
- vllm/spec_decode/metrics.py +213 -0
- vllm/spec_decode/mlp_speculator_worker.py +94 -0
- vllm/spec_decode/mqa_scorer.py +160 -0
- vllm/spec_decode/multi_step_worker.py +423 -0
- vllm/spec_decode/ngram_worker.py +196 -0
- vllm/spec_decode/proposer_worker_base.py +59 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
- vllm/spec_decode/spec_decode_worker.py +1326 -0
- vllm/spec_decode/target_model_runner.py +45 -0
- vllm/spec_decode/top1_proposer.py +275 -0
- vllm/spec_decode/util.py +277 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +131 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +60 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +887 -0
- vllm/transformers_utils/configs/__init__.py +61 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/cohere2.py +195 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +85 -0
- vllm/transformers_utils/configs/exaone.py +190 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/h2ovl.py +16 -0
- vllm/transformers_utils/configs/internvl.py +54 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/minimax_text_01.py +70 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
- vllm/transformers_utils/configs/mllama.py +31 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/mpt.py +180 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +258 -0
- vllm/transformers_utils/configs/nvlm_d.py +15 -0
- vllm/transformers_utils/configs/ovis.py +184 -0
- vllm/transformers_utils/configs/skyworkr1v.py +54 -0
- vllm/transformers_utils/configs/solar.py +247 -0
- vllm/transformers_utils/configs/telechat2.py +64 -0
- vllm/transformers_utils/configs/ultravox.py +108 -0
- vllm/transformers_utils/detokenizer.py +168 -0
- vllm/transformers_utils/detokenizer_utils.py +189 -0
- vllm/transformers_utils/processor.py +221 -0
- vllm/transformers_utils/processors/__init__.py +8 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/s3_utils.py +162 -0
- vllm/transformers_utils/tokenizer.py +302 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +120 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +493 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +14 -0
- vllm/triton_utils/importing.py +50 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +256 -0
- vllm/utils.py +2910 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +163 -0
- vllm/v1/attention/backends/flash_attn.py +869 -0
- vllm/v1/attention/backends/flashinfer.py +651 -0
- vllm/v1/attention/backends/flex_attention.py +477 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +931 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
- vllm/v1/attention/backends/mla/flashmla.py +152 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
- vllm/v1/attention/backends/mla/triton_mla.py +120 -0
- vllm/v1/attention/backends/pallas.py +240 -0
- vllm/v1/attention/backends/triton_attn.py +285 -0
- vllm/v1/attention/backends/utils.py +52 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +349 -0
- vllm/v1/core/encoder_cache_manager.py +150 -0
- vllm/v1/core/kv_cache_coordinator.py +363 -0
- vllm/v1/core/kv_cache_manager.py +392 -0
- vllm/v1/core/kv_cache_utils.py +996 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +150 -0
- vllm/v1/core/sched/output.py +154 -0
- vllm/v1/core/sched/scheduler.py +1044 -0
- vllm/v1/core/sched/utils.py +23 -0
- vllm/v1/core/single_type_kv_cache_manager.py +403 -0
- vllm/v1/engine/__init__.py +173 -0
- vllm/v1/engine/async_llm.py +558 -0
- vllm/v1/engine/coordinator.py +253 -0
- vllm/v1/engine/core.py +961 -0
- vllm/v1/engine/core_client.py +1129 -0
- vllm/v1/engine/detokenizer.py +261 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +317 -0
- vllm/v1/engine/logprobs.py +199 -0
- vllm/v1/engine/mm_input_cache.py +91 -0
- vllm/v1/engine/output_processor.py +428 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +407 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +113 -0
- vllm/v1/executor/multiproc_executor.py +537 -0
- vllm/v1/executor/ray_distributed_executor.py +62 -0
- vllm/v1/kv_cache_interface.py +194 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +523 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +131 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +239 -0
- vllm/v1/outputs.py +116 -0
- vllm/v1/request.py +193 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/penalties.py +59 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +286 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +145 -0
- vllm/v1/serial_utils.py +315 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +432 -0
- vllm/v1/spec_decode/medusa.py +62 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +178 -0
- vllm/v1/spec_decode/ngram_proposer.py +132 -0
- vllm/v1/spec_decode/utils.py +46 -0
- vllm/v1/structured_output/__init__.py +222 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +318 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +175 -0
- vllm/v1/utils.py +743 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +142 -0
- vllm/v1/worker/cpu_model_runner.py +86 -0
- vllm/v1/worker/cpu_worker.py +152 -0
- vllm/v1/worker/gpu_input_batch.py +681 -0
- vllm/v1/worker/gpu_model_runner.py +2320 -0
- vllm/v1/worker/gpu_worker.py +393 -0
- vllm/v1/worker/lora_model_runner_mixin.py +173 -0
- vllm/v1/worker/tpu_model_runner.py +1673 -0
- vllm/v1/worker/tpu_worker.py +299 -0
- vllm/v1/worker/utils.py +111 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +450 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2320 -0
- vllm/worker/hpu_worker.py +484 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +282 -0
- vllm/worker/multi_step_hpu_worker.py +123 -0
- vllm/worker/multi_step_model_runner.py +911 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +108 -0
- vllm/worker/multi_step_worker.py +197 -0
- vllm/worker/neuron_model_runner.py +460 -0
- vllm/worker/neuron_worker.py +193 -0
- vllm/worker/neuronx_distributed_model_runner.py +294 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +909 -0
- vllm/worker/tpu_worker.py +337 -0
- vllm/worker/utils.py +53 -0
- vllm/worker/worker.py +577 -0
- vllm/worker/worker_base.py +646 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +186 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1030 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
import contextlib
|
|
4
|
+
import math
|
|
5
|
+
import threading
|
|
6
|
+
import time
|
|
7
|
+
import uuid
|
|
8
|
+
from collections import defaultdict
|
|
9
|
+
from collections.abc import Iterator
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
12
|
+
|
|
13
|
+
import msgspec
|
|
14
|
+
import torch
|
|
15
|
+
import zmq
|
|
16
|
+
|
|
17
|
+
from vllm import envs
|
|
18
|
+
from vllm.attention.selector import backend_name_to_enum, get_attn_backend
|
|
19
|
+
from vllm.config import VllmConfig
|
|
20
|
+
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
|
|
21
|
+
KVConnectorBase_V1, KVConnectorMetadata, KVConnectorRole)
|
|
22
|
+
from vllm.distributed.parallel_state import (
|
|
23
|
+
get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size,
|
|
24
|
+
get_tp_group)
|
|
25
|
+
from vllm.logger import init_logger
|
|
26
|
+
from vllm.platforms import _Backend
|
|
27
|
+
from vllm.utils import make_zmq_path, make_zmq_socket, round_down
|
|
28
|
+
from vllm.v1.core.sched.output import SchedulerOutput
|
|
29
|
+
from vllm.v1.request import RequestStatus
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from vllm.attention.backends.abstract import AttentionMetadata
|
|
33
|
+
from vllm.forward_context import ForwardContext
|
|
34
|
+
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
|
|
35
|
+
from vllm.v1.request import Request
|
|
36
|
+
|
|
37
|
+
Transfer = tuple[int, float] # (xfer_handle, start_time)
|
|
38
|
+
GET_META_MSG = b"get_meta_msg"
|
|
39
|
+
|
|
40
|
+
logger = init_logger(__name__)
|
|
41
|
+
|
|
42
|
+
# Lazy import nixl_wrapper to avoid loading nixl_bindings if nixl is not used
|
|
43
|
+
try:
|
|
44
|
+
from nixl._api import nixl_agent as NixlWrapper
|
|
45
|
+
logger.info("NIXL is available")
|
|
46
|
+
except ImportError:
|
|
47
|
+
logger.warning("NIXL is not available")
|
|
48
|
+
NixlWrapper = None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class NixlAgentMetadata(
|
|
52
|
+
msgspec.Struct,
|
|
53
|
+
omit_defaults=True, # type: ignore[call-arg]
|
|
54
|
+
# required for @cached_property.
|
|
55
|
+
dict=True):
|
|
56
|
+
engine_id: str
|
|
57
|
+
agent_metadata: bytes
|
|
58
|
+
kv_caches_base_addr: list[int]
|
|
59
|
+
num_blocks: int
|
|
60
|
+
tp_size: int
|
|
61
|
+
block_len: int
|
|
62
|
+
attn_backend_name: str
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class ReqMeta:
|
|
67
|
+
local_block_ids: list[int]
|
|
68
|
+
remote_block_ids: list[int]
|
|
69
|
+
remote_host: str
|
|
70
|
+
remote_port: int
|
|
71
|
+
remote_engine_id: str
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class NixlConnectorMetadata(KVConnectorMetadata):
|
|
75
|
+
|
|
76
|
+
def __init__(self):
|
|
77
|
+
self.requests: dict[str, ReqMeta] = {}
|
|
78
|
+
|
|
79
|
+
def add_new_req(
|
|
80
|
+
self,
|
|
81
|
+
request_id: str,
|
|
82
|
+
local_block_ids: list[int],
|
|
83
|
+
kv_transfer_params: dict[str, Any],
|
|
84
|
+
):
|
|
85
|
+
self.requests[request_id] = ReqMeta(
|
|
86
|
+
local_block_ids=local_block_ids,
|
|
87
|
+
remote_block_ids=kv_transfer_params["remote_block_ids"],
|
|
88
|
+
remote_engine_id=kv_transfer_params["remote_engine_id"],
|
|
89
|
+
remote_host=kv_transfer_params["remote_host"],
|
|
90
|
+
remote_port=kv_transfer_params["remote_port"],
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class NixlConnector(KVConnectorBase_V1):
|
|
95
|
+
|
|
96
|
+
def __init__(self, vllm_config: VllmConfig, role: KVConnectorRole):
|
|
97
|
+
assert vllm_config.kv_transfer_config is not None
|
|
98
|
+
self.engine_id = vllm_config.kv_transfer_config.engine_id
|
|
99
|
+
|
|
100
|
+
if role == KVConnectorRole.SCHEDULER:
|
|
101
|
+
self.connector_scheduler : Optional[NixlConnectorScheduler] = \
|
|
102
|
+
NixlConnectorScheduler(vllm_config, str(self.engine_id))
|
|
103
|
+
self.connector_worker: Optional[NixlConnectorWorker] = None
|
|
104
|
+
elif role == KVConnectorRole.WORKER:
|
|
105
|
+
self.connector_scheduler = None
|
|
106
|
+
self.connector_worker = NixlConnectorWorker(
|
|
107
|
+
vllm_config, str(self.engine_id))
|
|
108
|
+
|
|
109
|
+
############################################################
|
|
110
|
+
# Scheduler Side Methods
|
|
111
|
+
############################################################
|
|
112
|
+
|
|
113
|
+
def get_num_new_matched_tokens(
|
|
114
|
+
self, request: "Request",
|
|
115
|
+
num_computed_tokens: int) -> tuple[int, bool]:
|
|
116
|
+
assert self.connector_scheduler is not None
|
|
117
|
+
return self.connector_scheduler.get_num_new_matched_tokens(
|
|
118
|
+
request, num_computed_tokens)
|
|
119
|
+
|
|
120
|
+
def update_state_after_alloc(self, request: "Request",
|
|
121
|
+
blocks: "KVCacheBlocks",
|
|
122
|
+
num_external_tokens: int):
|
|
123
|
+
assert self.connector_scheduler is not None
|
|
124
|
+
return self.connector_scheduler.update_state_after_alloc(
|
|
125
|
+
request, blocks, num_external_tokens)
|
|
126
|
+
|
|
127
|
+
def build_connector_meta(
|
|
128
|
+
self,
|
|
129
|
+
scheduler_output: SchedulerOutput,
|
|
130
|
+
) -> KVConnectorMetadata:
|
|
131
|
+
assert self.connector_scheduler is not None
|
|
132
|
+
return self.connector_scheduler.build_connector_meta(scheduler_output)
|
|
133
|
+
|
|
134
|
+
def request_finished(
|
|
135
|
+
self,
|
|
136
|
+
request: "Request",
|
|
137
|
+
block_ids: list[int],
|
|
138
|
+
) -> tuple[bool, Optional[dict[str, Any]]]:
|
|
139
|
+
assert self.connector_scheduler is not None
|
|
140
|
+
return self.connector_scheduler.request_finished(request, block_ids)
|
|
141
|
+
|
|
142
|
+
############################################################
|
|
143
|
+
# Worker Side Methods
|
|
144
|
+
############################################################
|
|
145
|
+
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
|
|
146
|
+
assert self.connector_worker is not None
|
|
147
|
+
self.connector_worker.register_kv_caches(kv_caches)
|
|
148
|
+
|
|
149
|
+
def get_finished(self,
|
|
150
|
+
finished_req_ids: set[str]) -> tuple[set[str], set[str]]:
|
|
151
|
+
"""Get the finished recving and sending requests."""
|
|
152
|
+
assert self.connector_worker is not None
|
|
153
|
+
return self.connector_worker.get_finished()
|
|
154
|
+
|
|
155
|
+
def start_load_kv(self, forward_context: "ForwardContext",
|
|
156
|
+
**kwargs) -> None:
|
|
157
|
+
assert self.connector_worker is not None
|
|
158
|
+
assert isinstance(self._connector_metadata, NixlConnectorMetadata)
|
|
159
|
+
self.connector_worker.start_load_kv(self._connector_metadata)
|
|
160
|
+
|
|
161
|
+
def wait_for_layer_load(self, layer_name: str) -> None:
|
|
162
|
+
"""NixlConnector does not do layerwise saving."""
|
|
163
|
+
pass
|
|
164
|
+
|
|
165
|
+
def save_kv_layer(self, layer_name: str, kv_layer: torch.Tensor,
|
|
166
|
+
attn_metadata: "AttentionMetadata", **kwargs) -> None:
|
|
167
|
+
"""NixlConnector does not save explicitly."""
|
|
168
|
+
pass
|
|
169
|
+
|
|
170
|
+
def wait_for_save(self):
|
|
171
|
+
"""NixlConnector does not save explicitly."""
|
|
172
|
+
pass
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
class NixlConnectorScheduler:
|
|
176
|
+
"""Implementation of Scheduler side methods"""
|
|
177
|
+
|
|
178
|
+
def __init__(self, vllm_config: VllmConfig, engine_id: str):
|
|
179
|
+
self.vllm_config = vllm_config
|
|
180
|
+
self.block_size = vllm_config.cache_config.block_size
|
|
181
|
+
self.engine_id = engine_id
|
|
182
|
+
self.side_channel_host = envs.VLLM_NIXL_SIDE_CHANNEL_HOST
|
|
183
|
+
self.side_channel_port = (
|
|
184
|
+
envs.VLLM_NIXL_SIDE_CHANNEL_PORT +
|
|
185
|
+
vllm_config.parallel_config.data_parallel_rank_local *
|
|
186
|
+
vllm_config.parallel_config.tensor_parallel_size)
|
|
187
|
+
logger.info("Initializing NIXL Scheduler %s", engine_id)
|
|
188
|
+
|
|
189
|
+
# Requests that need to start recv.
|
|
190
|
+
# New requests are added by update_state_after_alloc in
|
|
191
|
+
# the scheduler. Used to make metadata passed to Worker.
|
|
192
|
+
self._reqs_need_recv: dict[str, tuple[Request, list[int]]] = {}
|
|
193
|
+
|
|
194
|
+
def get_num_new_matched_tokens(
|
|
195
|
+
self, request: "Request",
|
|
196
|
+
num_computed_tokens: int) -> tuple[int, bool]:
|
|
197
|
+
"""
|
|
198
|
+
For remote prefill, pull all prompt blocks from remote
|
|
199
|
+
asynchronously relative to engine execution.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
request (Request): the request object.
|
|
203
|
+
num_computed_tokens (int): the number of locally
|
|
204
|
+
computed tokens for this request
|
|
205
|
+
Returns:
|
|
206
|
+
* the number of tokens that can be loaded from the
|
|
207
|
+
external KV cache beyond what is already computed.
|
|
208
|
+
* true if the external KV cache tokens will be loaded
|
|
209
|
+
asynchronously (between scheduler steps).
|
|
210
|
+
"""
|
|
211
|
+
|
|
212
|
+
params = request.kv_transfer_params
|
|
213
|
+
logger.debug(
|
|
214
|
+
"NIXLConnector get_num_new_matched_tokens: "
|
|
215
|
+
"num_computed_tokens=%s, kv_transfer_params=%s",
|
|
216
|
+
num_computed_tokens, params)
|
|
217
|
+
|
|
218
|
+
if params is not None and params.get("do_remote_prefill"):
|
|
219
|
+
# Remote prefill: get all prompt blocks from remote.
|
|
220
|
+
assert num_computed_tokens % self.block_size == 0
|
|
221
|
+
rounded_num_prompt_tokens = round_down(
|
|
222
|
+
len(request.prompt_token_ids), self.block_size)
|
|
223
|
+
count = max(rounded_num_prompt_tokens - num_computed_tokens, 0)
|
|
224
|
+
if count > 0:
|
|
225
|
+
return count, True
|
|
226
|
+
|
|
227
|
+
# No remote prefill for this request.
|
|
228
|
+
return 0, False
|
|
229
|
+
|
|
230
|
+
def update_state_after_alloc(self, request: "Request",
|
|
231
|
+
blocks: "KVCacheBlocks",
|
|
232
|
+
num_external_tokens: int):
|
|
233
|
+
|
|
234
|
+
params = request.kv_transfer_params
|
|
235
|
+
logger.debug(
|
|
236
|
+
"NIXLConnector update_state_after_alloc: "
|
|
237
|
+
"num_external_tokens=%s, kv_transfer_params=%s",
|
|
238
|
+
num_external_tokens, params)
|
|
239
|
+
|
|
240
|
+
if params is not None and params.get("do_remote_prefill"):
|
|
241
|
+
if params.get("remote_block_ids"):
|
|
242
|
+
if all(p in params for p in ("remote_engine_id", "remote_host",
|
|
243
|
+
"remote_port")):
|
|
244
|
+
# If remote_blocks and num_external_tokens = 0, we have
|
|
245
|
+
# a full prefix cache hit on the D worker. We need to call
|
|
246
|
+
# send_notif in _read_blocks to free the memory on the P.
|
|
247
|
+
local_block_ids = (blocks.get_unhashed_block_ids()
|
|
248
|
+
if num_external_tokens > 0 else [])
|
|
249
|
+
# Get unhashed blocks to pull from remote.
|
|
250
|
+
self._reqs_need_recv[request.request_id] = (
|
|
251
|
+
request, local_block_ids)
|
|
252
|
+
else:
|
|
253
|
+
logger.warning(
|
|
254
|
+
"Got invalid KVTransferParams: %s. This "
|
|
255
|
+
"request will not utilize KVTransfer", params)
|
|
256
|
+
else:
|
|
257
|
+
assert num_external_tokens == 0
|
|
258
|
+
# Only trigger 1 KV transfer per request.
|
|
259
|
+
params["do_remote_prefill"] = False
|
|
260
|
+
|
|
261
|
+
def build_connector_meta(
|
|
262
|
+
self,
|
|
263
|
+
scheduler_output: SchedulerOutput,
|
|
264
|
+
) -> KVConnectorMetadata:
|
|
265
|
+
meta = NixlConnectorMetadata()
|
|
266
|
+
|
|
267
|
+
# Loop through scheduled reqs and convert to ReqMeta.
|
|
268
|
+
for req_id, (req, block_ids) in self._reqs_need_recv.items():
|
|
269
|
+
assert req.kv_transfer_params is not None
|
|
270
|
+
meta.add_new_req(
|
|
271
|
+
request_id=req_id,
|
|
272
|
+
local_block_ids=block_ids,
|
|
273
|
+
kv_transfer_params=req.kv_transfer_params,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Clear the list once workers start the transfers
|
|
277
|
+
self._reqs_need_recv.clear()
|
|
278
|
+
|
|
279
|
+
return meta
|
|
280
|
+
|
|
281
|
+
def request_finished(
|
|
282
|
+
self,
|
|
283
|
+
request: "Request",
|
|
284
|
+
block_ids: list[int],
|
|
285
|
+
) -> tuple[bool, Optional[dict[str, Any]]]:
|
|
286
|
+
"""
|
|
287
|
+
Once a request is finished, determine whether request blocks
|
|
288
|
+
should be freed now or will be sent asynchronously and freed later.
|
|
289
|
+
"""
|
|
290
|
+
|
|
291
|
+
params = request.kv_transfer_params
|
|
292
|
+
logger.debug(
|
|
293
|
+
"NIXLConnector request_finished, request_status=%s, "
|
|
294
|
+
"kv_transfer_params=%s", request.status, params)
|
|
295
|
+
|
|
296
|
+
if (params is None or not params.get("do_remote_decode")
|
|
297
|
+
or request.status != RequestStatus.FINISHED_LENGTH_CAPPED):
|
|
298
|
+
return False, None
|
|
299
|
+
|
|
300
|
+
# Get computed blocks.
|
|
301
|
+
all_full = request.num_computed_tokens % self.block_size == 0
|
|
302
|
+
computed_block_ids = block_ids if all_full else block_ids[:-1]
|
|
303
|
+
|
|
304
|
+
# If prompt < block_size, no xfer so free blocks immediately.
|
|
305
|
+
delay_free_blocks = len(computed_block_ids) > 0
|
|
306
|
+
|
|
307
|
+
return delay_free_blocks, dict(
|
|
308
|
+
do_remote_prefill=True,
|
|
309
|
+
do_remote_decode=False,
|
|
310
|
+
remote_block_ids=computed_block_ids,
|
|
311
|
+
remote_engine_id=self.engine_id,
|
|
312
|
+
remote_host=self.side_channel_host,
|
|
313
|
+
remote_port=self.side_channel_port,
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class NixlConnectorWorker:
|
|
318
|
+
"""Implementation of Worker side methods"""
|
|
319
|
+
|
|
320
|
+
def __init__(self, vllm_config: VllmConfig, engine_id: str):
|
|
321
|
+
if NixlWrapper is None:
|
|
322
|
+
logger.error("NIXL is not available")
|
|
323
|
+
raise RuntimeError("NIXL is not available")
|
|
324
|
+
logger.info("Initializing NIXL wrapper")
|
|
325
|
+
logger.info("Initializing NIXL worker %s", engine_id)
|
|
326
|
+
|
|
327
|
+
# Config.
|
|
328
|
+
self.vllm_config = vllm_config
|
|
329
|
+
self.block_size = vllm_config.cache_config.block_size
|
|
330
|
+
|
|
331
|
+
# Agent.
|
|
332
|
+
self.nixl_wrapper = NixlWrapper(str(uuid.uuid4()), None)
|
|
333
|
+
# Map of engine_id -> {rank0: agent_name0, rank1: agent_name1..}.
|
|
334
|
+
self._remote_agents: dict[str, dict[int, str]] = defaultdict(dict)
|
|
335
|
+
|
|
336
|
+
# NIXL handshake port.
|
|
337
|
+
# NOTE(rob): Within a DP group, each DP rank gets its own
|
|
338
|
+
# base port (which is sent in the KVTransferParams).
|
|
339
|
+
# Each TP rank listens/queries on the base_port + tp_rank.
|
|
340
|
+
self.side_channel_port = (
|
|
341
|
+
envs.VLLM_NIXL_SIDE_CHANNEL_PORT +
|
|
342
|
+
vllm_config.parallel_config.data_parallel_rank_local *
|
|
343
|
+
vllm_config.parallel_config.tensor_parallel_size)
|
|
344
|
+
|
|
345
|
+
# Metadata.
|
|
346
|
+
self.engine_id = engine_id
|
|
347
|
+
self.tp_rank = get_tensor_model_parallel_rank()
|
|
348
|
+
self.world_size = get_tensor_model_parallel_world_size()
|
|
349
|
+
self.tp_group = get_tp_group()
|
|
350
|
+
|
|
351
|
+
# KV Caches and nixl tracking data.
|
|
352
|
+
self.kv_caches: dict[str, torch.Tensor] = {}
|
|
353
|
+
|
|
354
|
+
# Map of engine_id -> kv_caches_base_addr. For TP case, each local
|
|
355
|
+
# rank will still only pull from a single remote TP worker.
|
|
356
|
+
self.kv_caches_base_addr: dict[str, list[int]] = {}
|
|
357
|
+
|
|
358
|
+
# Number of NIXL regions. Currently one region per cache
|
|
359
|
+
# (so 1 per layer for MLA, otherwise 2 per layer)
|
|
360
|
+
self.num_regions = 0
|
|
361
|
+
self.num_layers = 0
|
|
362
|
+
|
|
363
|
+
# nixl_prepped_dlist_handle.
|
|
364
|
+
self.src_xfer_side_handle: int = 0
|
|
365
|
+
# Map of engine_id -> nixl_prepped_dlist_handle (int)].
|
|
366
|
+
self.dst_xfer_side_handles: dict[str, int] = {}
|
|
367
|
+
|
|
368
|
+
# Map of engine_id -> num_blocks. All ranks in the same deployment will
|
|
369
|
+
# have the same number of blocks.
|
|
370
|
+
self.dst_num_blocks: dict[str, int] = {}
|
|
371
|
+
self._registered_descs: list[Any] = []
|
|
372
|
+
|
|
373
|
+
# In progress transfers.
|
|
374
|
+
# [req_id -> list[handle]]
|
|
375
|
+
self._recving_transfers = defaultdict[str, list[Transfer]](list)
|
|
376
|
+
|
|
377
|
+
# Complete transfer tracker. Used by the rank 0 to track finished
|
|
378
|
+
# transactions on ranks 1 to N-1.
|
|
379
|
+
# [req_id -> count]
|
|
380
|
+
self._done_recving_count: defaultdict[str,
|
|
381
|
+
int] = defaultdict(lambda: 0)
|
|
382
|
+
self._done_sending_count: defaultdict[str,
|
|
383
|
+
int] = defaultdict(lambda: 0)
|
|
384
|
+
|
|
385
|
+
# Background thread for establishing new connections.
|
|
386
|
+
self._nixl_handshake_listener_t: Optional[threading.Thread] = None
|
|
387
|
+
|
|
388
|
+
self.vllm_config = vllm_config
|
|
389
|
+
self.block_size = vllm_config.cache_config.block_size
|
|
390
|
+
self.model_config = vllm_config.model_config
|
|
391
|
+
self.cache_config = vllm_config.cache_config
|
|
392
|
+
|
|
393
|
+
# TODO(mgoin): remove this once we have hybrid memory allocator
|
|
394
|
+
# Optimization for models with local attention (Llama 4)
|
|
395
|
+
# List of block window sizes for each layer for local attention
|
|
396
|
+
self.block_window_per_layer: list[Optional[int]] = []
|
|
397
|
+
self.use_mla = self.model_config.use_mla
|
|
398
|
+
|
|
399
|
+
backend = get_attn_backend(self.model_config.get_head_size(),
|
|
400
|
+
self.model_config.dtype,
|
|
401
|
+
self.cache_config.cache_dtype,
|
|
402
|
+
self.block_size,
|
|
403
|
+
self.model_config.is_attention_free,
|
|
404
|
+
use_mla=self.use_mla)
|
|
405
|
+
self.backend_name = backend.get_name()
|
|
406
|
+
attn_backend = backend_name_to_enum(self.backend_name)
|
|
407
|
+
self._use_flashinfer = attn_backend == _Backend.FLASHINFER_VLLM_V1
|
|
408
|
+
logger.debug("Detected attention backend %s", self.backend_name)
|
|
409
|
+
|
|
410
|
+
self._tp_size: dict[str, int] = {self.engine_id: self.world_size}
|
|
411
|
+
# With heterogeneous TP, P must wait for all assigned D TP workers to
|
|
412
|
+
# finish reading before safely freeing the blocks.
|
|
413
|
+
self.consumer_notification_counts_by_req = defaultdict[str, int](int)
|
|
414
|
+
|
|
415
|
+
@staticmethod
|
|
416
|
+
def _nixl_handshake_listener(metadata: NixlAgentMetadata,
|
|
417
|
+
ready_event: threading.Event, base_port: int,
|
|
418
|
+
tp_rank: int):
|
|
419
|
+
"""Background thread for getting new NIXL handshakes."""
|
|
420
|
+
# NOTE(rob): this is a simple implementation. We will move
|
|
421
|
+
# to a better approach via HTTP endpoint soon.
|
|
422
|
+
|
|
423
|
+
encoder = msgspec.msgpack.Encoder()
|
|
424
|
+
encoded_data = encoder.encode(metadata)
|
|
425
|
+
size_in_bytes = len(encoded_data)
|
|
426
|
+
logger.debug("Size of encoded NixlAgentMetadata: %s bytes",
|
|
427
|
+
str(size_in_bytes))
|
|
428
|
+
|
|
429
|
+
# Listen for new requests for metadata.
|
|
430
|
+
host = envs.VLLM_NIXL_SIDE_CHANNEL_HOST
|
|
431
|
+
path = make_zmq_path("tcp", host, base_port + tp_rank)
|
|
432
|
+
logger.debug("Starting listening on path: %s", path)
|
|
433
|
+
with zmq_ctx(zmq.ROUTER, path) as sock:
|
|
434
|
+
ready_event.set()
|
|
435
|
+
while True:
|
|
436
|
+
identity, _, msg = sock.recv_multipart()
|
|
437
|
+
if msg != GET_META_MSG:
|
|
438
|
+
logger.warning(
|
|
439
|
+
"Connection listener got unexpected message %s", msg)
|
|
440
|
+
sock.send_multipart((identity, b"", encoded_data))
|
|
441
|
+
|
|
442
|
+
def _nixl_handshake(self, host: str, port: int):
|
|
443
|
+
"""Do a NIXL handshake with a remote instance."""
|
|
444
|
+
|
|
445
|
+
start_time = time.perf_counter()
|
|
446
|
+
|
|
447
|
+
# NOTE(rob): we need each rank to have a unique port. This is
|
|
448
|
+
# a hack to keep us moving. We will switch when moving to etcd
|
|
449
|
+
# or where we have a single ZMQ socket in the scheduler.
|
|
450
|
+
|
|
451
|
+
def handshake(path: str, rank: int) -> NixlAgentMetadata:
|
|
452
|
+
# Send query for the request.
|
|
453
|
+
with zmq_ctx(zmq.REQ, path) as sock:
|
|
454
|
+
sock.send(GET_META_MSG)
|
|
455
|
+
metadata_bytes = sock.recv()
|
|
456
|
+
decoder = msgspec.msgpack.Decoder(NixlAgentMetadata)
|
|
457
|
+
metadata = decoder.decode(metadata_bytes)
|
|
458
|
+
got_metadata_time = time.perf_counter()
|
|
459
|
+
|
|
460
|
+
# Register Remote agent.
|
|
461
|
+
self.add_remote_agent(metadata, rank)
|
|
462
|
+
setup_agent_time = time.perf_counter()
|
|
463
|
+
|
|
464
|
+
logger.debug("NIXL handshake: get metadata took: %s",
|
|
465
|
+
got_metadata_time - start_time)
|
|
466
|
+
logger.debug("NIXL handshake: add agent took: %s",
|
|
467
|
+
setup_agent_time - got_metadata_time)
|
|
468
|
+
return metadata
|
|
469
|
+
|
|
470
|
+
# Handshake with remote agent-rank0 first to get the tp_size of remote
|
|
471
|
+
path = make_zmq_path("tcp", host, port)
|
|
472
|
+
logger.debug("Querying master rank metadata on path: %s", path)
|
|
473
|
+
metadata = handshake(path, 0)
|
|
474
|
+
|
|
475
|
+
# Handshake only with the other TP remote the current local rank will
|
|
476
|
+
# pull from. With homogeneous TP it happens to be the same rank_i.
|
|
477
|
+
tp_ratio = self._tp_size[self.engine_id] // metadata.tp_size
|
|
478
|
+
p_remote_rank = self.tp_rank // tp_ratio
|
|
479
|
+
if p_remote_rank > 0:
|
|
480
|
+
path = make_zmq_path("tcp", host, port + p_remote_rank)
|
|
481
|
+
logger.debug("Querying metadata on path: %s at remote rank %s",
|
|
482
|
+
path, p_remote_rank)
|
|
483
|
+
_ = handshake(path, p_remote_rank)
|
|
484
|
+
|
|
485
|
+
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
|
|
486
|
+
"""Register the KV Cache data in nixl."""
|
|
487
|
+
|
|
488
|
+
_, first_kv_cache = next(iter(kv_caches.items()))
|
|
489
|
+
kv_elem_size = first_kv_cache.element_size()
|
|
490
|
+
|
|
491
|
+
# TODO(tms): Find a more robust way to detect and handle MLA
|
|
492
|
+
# NOTE (NickLucche) To move blocks efficiently with NIXL, the expected
|
|
493
|
+
# KV memory layout is HND, as opposed to the default NHD. Note that it
|
|
494
|
+
# will only affects the strides. For MLA instead, we make require no
|
|
495
|
+
# such thing and resort to the standard layout.
|
|
496
|
+
use_mla = len(first_kv_cache.shape) == 3
|
|
497
|
+
assert use_mla == self.use_mla
|
|
498
|
+
|
|
499
|
+
# TODO (NickLucche) not compatible with hybrid allocator. Enforce check
|
|
500
|
+
# once it goes live, as a single kv layout is expected for xfers.
|
|
501
|
+
if use_mla:
|
|
502
|
+
# MLA case.
|
|
503
|
+
self.num_blocks = first_kv_cache.shape[0]
|
|
504
|
+
block_rank = 2 # [block_size, latent_dim]
|
|
505
|
+
block_shape = first_kv_cache.shape[-block_rank:]
|
|
506
|
+
block_size, kv_latent_dim = block_shape
|
|
507
|
+
self.slot_size_bytes = kv_elem_size * kv_latent_dim
|
|
508
|
+
else:
|
|
509
|
+
# [2 (k and v), num_blocks, ...]
|
|
510
|
+
if self._use_flashinfer:
|
|
511
|
+
# FlashInfer swaps 2<->num_blocks dimensions.
|
|
512
|
+
self.num_blocks = first_kv_cache.shape[0]
|
|
513
|
+
block_rank = 4 # [2, block_size, kv_heads, head_dim]
|
|
514
|
+
else:
|
|
515
|
+
self.num_blocks = first_kv_cache.shape[1]
|
|
516
|
+
block_rank = 3 # [block_size, kv_heads, head_dim]
|
|
517
|
+
block_shape = first_kv_cache.shape[-block_rank:]
|
|
518
|
+
block_size, n_kv_heads, head_dim = block_shape[-3:]
|
|
519
|
+
# head size in bytes.
|
|
520
|
+
self.slot_size_bytes = kv_elem_size * n_kv_heads * head_dim
|
|
521
|
+
assert block_size == self.block_size
|
|
522
|
+
# TODO(tms): self.block_len needs to be per-layer for sliding window,
|
|
523
|
+
# hybrid attn, etc
|
|
524
|
+
# block size in bytes
|
|
525
|
+
self.block_len = kv_elem_size * math.prod(block_shape)
|
|
526
|
+
logger.info(
|
|
527
|
+
"Registering KV_Caches: use_mla: %s, num_blocks: %s, "
|
|
528
|
+
"block_shape: %s, per_layer_kv_cache_shape: %s", use_mla,
|
|
529
|
+
self.num_blocks, block_shape, first_kv_cache.shape)
|
|
530
|
+
self.dst_num_blocks[self.engine_id] = self.num_blocks
|
|
531
|
+
self.kv_caches = kv_caches
|
|
532
|
+
kv_caches_base_addr = []
|
|
533
|
+
caches_data = []
|
|
534
|
+
|
|
535
|
+
# Note(tms): I modified this from the original region setup code.
|
|
536
|
+
# K and V are now in different regions. Advantage is that we can
|
|
537
|
+
# elegantly support MLA and any cases where the K and V tensors
|
|
538
|
+
# are non-contiguous (it's not locally guaranteed that they will be)
|
|
539
|
+
# Disadvantage is that the encoded NixlAgentMetadata is now larger
|
|
540
|
+
# (roughly 8KB vs 5KB).
|
|
541
|
+
# Conversely for FlashInfer, K and V are transferred in the same tensor
|
|
542
|
+
# to better exploit the memory layout (ie num_blocks is the first dim).
|
|
543
|
+
for cache_or_caches in kv_caches.values():
|
|
544
|
+
# Normalize to always be a list of caches
|
|
545
|
+
cache_list = [cache_or_caches] if use_mla or self._use_flashinfer \
|
|
546
|
+
else cache_or_caches
|
|
547
|
+
for cache in cache_list:
|
|
548
|
+
base_addr = cache.data_ptr()
|
|
549
|
+
region_len = self.num_blocks * self.block_len
|
|
550
|
+
caches_data.append(
|
|
551
|
+
(base_addr, region_len, cache.device.index, ""))
|
|
552
|
+
kv_caches_base_addr.append(base_addr)
|
|
553
|
+
self.kv_caches_base_addr[self.engine_id] = kv_caches_base_addr
|
|
554
|
+
self.num_regions = len(caches_data)
|
|
555
|
+
self.num_layers = len(self.kv_caches.keys())
|
|
556
|
+
|
|
557
|
+
# TODO(mgoin): remove this once we have hybrid memory allocator
|
|
558
|
+
# Optimization for models with local attention (Llama 4)
|
|
559
|
+
if self.vllm_config.model_config.hf_config.model_type == "llama4":
|
|
560
|
+
from transformers import Llama4TextConfig
|
|
561
|
+
assert isinstance(self.vllm_config.model_config.hf_text_config,
|
|
562
|
+
Llama4TextConfig)
|
|
563
|
+
llama4_config = self.vllm_config.model_config.hf_text_config
|
|
564
|
+
no_rope_layers = llama4_config.no_rope_layers
|
|
565
|
+
chunk_size = llama4_config.attention_chunk_size
|
|
566
|
+
chunk_block_size = math.ceil(chunk_size / self.block_size)
|
|
567
|
+
for layer_idx in range(self.num_layers):
|
|
568
|
+
# no_rope_layers[layer_idx] == 0 means NoPE (global)
|
|
569
|
+
# Any other value means RoPE (local chunked)
|
|
570
|
+
is_local_attention = no_rope_layers[layer_idx] != 0
|
|
571
|
+
block_window = chunk_block_size if is_local_attention else None
|
|
572
|
+
self.block_window_per_layer.append(block_window)
|
|
573
|
+
logger.debug("Llama 4 block window per layer mapping: %s",
|
|
574
|
+
self.block_window_per_layer)
|
|
575
|
+
assert len(self.block_window_per_layer) == self.num_layers
|
|
576
|
+
|
|
577
|
+
descs = self.nixl_wrapper.get_reg_descs(caches_data, "VRAM")
|
|
578
|
+
logger.debug("Registering descs: %s", caches_data)
|
|
579
|
+
self.nixl_wrapper.register_memory(descs)
|
|
580
|
+
logger.debug("Done registering descs")
|
|
581
|
+
self._registered_descs.append(descs)
|
|
582
|
+
|
|
583
|
+
# Register local/src descr for NIXL xfer.
|
|
584
|
+
blocks_data = []
|
|
585
|
+
for base_addr in self.kv_caches_base_addr[self.engine_id]:
|
|
586
|
+
# NOTE With heter-TP, more blocks are prepared than what are
|
|
587
|
+
# needed as self.num_blocks >= nixl_agent_meta.num_blocks. We
|
|
588
|
+
# could create fewer, but then _get_block_descs_ids needs to
|
|
589
|
+
# select agent_meta.num_blocks instead of self.num_blocks for
|
|
590
|
+
# local descr, and that makes handling regular flow less clean.
|
|
591
|
+
for block_id in range(self.num_blocks):
|
|
592
|
+
block_offset = block_id * self.block_len
|
|
593
|
+
addr = base_addr + block_offset
|
|
594
|
+
# (addr, len, device id)
|
|
595
|
+
blocks_data.append((addr, self.block_len, self.tp_rank))
|
|
596
|
+
logger.debug("Created %s blocks for src engine %s and rank %s",
|
|
597
|
+
len(blocks_data), self.engine_id, self.tp_rank)
|
|
598
|
+
|
|
599
|
+
descs = self.nixl_wrapper.get_xfer_descs(blocks_data, "VRAM")
|
|
600
|
+
# NIXL_INIT_AGENT to be used for preparations of local descs.
|
|
601
|
+
self.src_xfer_side_handle = self.nixl_wrapper.prep_xfer_dlist(
|
|
602
|
+
"NIXL_INIT_AGENT", descs)
|
|
603
|
+
|
|
604
|
+
# After KV Caches registered, listen for new connections.
|
|
605
|
+
metadata = NixlAgentMetadata(
|
|
606
|
+
engine_id=self.engine_id,
|
|
607
|
+
agent_metadata=self.nixl_wrapper.get_agent_metadata(),
|
|
608
|
+
kv_caches_base_addr=self.kv_caches_base_addr[self.engine_id],
|
|
609
|
+
num_blocks=self.num_blocks,
|
|
610
|
+
tp_size=self.world_size,
|
|
611
|
+
block_len=self.block_len,
|
|
612
|
+
attn_backend_name=self.backend_name)
|
|
613
|
+
ready_event = threading.Event()
|
|
614
|
+
self._nixl_handshake_listener_t = threading.Thread(
|
|
615
|
+
target=self._nixl_handshake_listener,
|
|
616
|
+
args=(metadata, ready_event, self.side_channel_port, self.tp_rank),
|
|
617
|
+
daemon=True,
|
|
618
|
+
name="nixl_handshake_listener")
|
|
619
|
+
self._nixl_handshake_listener_t.start()
|
|
620
|
+
ready_event.wait()
|
|
621
|
+
|
|
622
|
+
def add_remote_agent(self,
|
|
623
|
+
nixl_agent_meta: NixlAgentMetadata,
|
|
624
|
+
remote_tp_rank: int = 0):
|
|
625
|
+
"""
|
|
626
|
+
Add the remote NIXL agent and prepare the descriptors for reading cache
|
|
627
|
+
blocks from remote.
|
|
628
|
+
|
|
629
|
+
In particular, handle both homogeneous and heterogeneous TP. The former
|
|
630
|
+
requires local rank_i to read from remote rank_i.
|
|
631
|
+
The latter, assuming D.world_size > P.world_size, requires that two or
|
|
632
|
+
more local TP worker share the xfer from a single TP worker.
|
|
633
|
+
|
|
634
|
+
Here's an example:
|
|
635
|
+
|
|
636
|
+
rank_offset p_remote_tp_rank
|
|
637
|
+
(kv split no)
|
|
638
|
+
--------------------------------
|
|
639
|
+
0 0 Worker0 ---- 1st half of KV ----> Worker0 [ KV Cache ]
|
|
640
|
+
/
|
|
641
|
+
1 0 Worker1 ---- 2nd half of KV -----/
|
|
642
|
+
|
|
643
|
+
0 1 Worker2 ---- 1st half of KV ----> Worker1 [ KV Cache ]
|
|
644
|
+
/
|
|
645
|
+
1 1 Worker3 ---- 2nd half of KV -----/
|
|
646
|
+
|
|
647
|
+
|
|
648
|
+
Decoder TP workers Prefix TP workers
|
|
649
|
+
(world_size=4) (world_size=2)
|
|
650
|
+
tp_ratio = 4 // 2 = 2
|
|
651
|
+
|
|
652
|
+
Considering the KV Caches, if P-Worker_i has cache size [2, num_blocksP, kv_heads, block_size, head_dim]
|
|
653
|
+
then D-Worker_j has [2, num_blocksD, kv_heads//tp_ratio, block_size, head_dim]. Mind the "HND" layout format.
|
|
654
|
+
Assuming num_blocksD >= num_blocksP, D-Worker0 reads from P-Worker0 by preparing the kv_heads//tp_ratio
|
|
655
|
+
first heads from all the slots of all the blocks. D-Worker1 will do the same, but reading the second split
|
|
656
|
+
along the kv_heads dimension, and so forth until "tp_ratio" D TP workers have pulled from P-Worker0.
|
|
657
|
+
|
|
658
|
+
Note that the above will also hold true for the homogeneous TP case, where tp_ratio evaluates to 1.
|
|
659
|
+
|
|
660
|
+
Regarding MLA case, the cache is replicated across TP workers so the rank_offset will just always be 0
|
|
661
|
+
so that the whole cache is shared by "tp_ratio" D TP workers.
|
|
662
|
+
""" # noqa: E501
|
|
663
|
+
engine_id = nixl_agent_meta.engine_id
|
|
664
|
+
# TODO re-evaluate refreshing for scaling/recovery
|
|
665
|
+
if remote_tp_rank in self._remote_agents.get(engine_id, ()):
|
|
666
|
+
return
|
|
667
|
+
|
|
668
|
+
if engine_id in self._tp_size:
|
|
669
|
+
assert self._tp_size[engine_id] == nixl_agent_meta.tp_size
|
|
670
|
+
else:
|
|
671
|
+
self._tp_size[engine_id] = nixl_agent_meta.tp_size
|
|
672
|
+
# We may eventually enable this after asserting equality in cache
|
|
673
|
+
# layout and close outputs.
|
|
674
|
+
assert nixl_agent_meta.attn_backend_name == self.backend_name
|
|
675
|
+
|
|
676
|
+
self._remote_agents[engine_id][
|
|
677
|
+
remote_tp_rank] = self.nixl_wrapper.add_remote_agent(
|
|
678
|
+
nixl_agent_meta.agent_metadata)
|
|
679
|
+
|
|
680
|
+
# Number of D TP workers reading from a single P TP worker. This is
|
|
681
|
+
# 1 when P and D `--tensor-parallel-size` match.
|
|
682
|
+
assert self._tp_size[self.engine_id] % self._tp_size[engine_id] == 0, (
|
|
683
|
+
"Local TP size must be divisible by remote TP size.")
|
|
684
|
+
tp_ratio = self._tp_size[self.engine_id] // self._tp_size[engine_id]
|
|
685
|
+
assert tp_ratio > 0, "Decode TP cannot be smaller than prefill TP"
|
|
686
|
+
if self.use_mla:
|
|
687
|
+
# With MLA the only difference is in the number of blocks.
|
|
688
|
+
remote_block_size = nixl_agent_meta.block_len // (
|
|
689
|
+
self.slot_size_bytes)
|
|
690
|
+
assert self.block_len == nixl_agent_meta.block_len
|
|
691
|
+
else:
|
|
692
|
+
remote_block_size = nixl_agent_meta.block_len // (
|
|
693
|
+
self.slot_size_bytes * tp_ratio)
|
|
694
|
+
if self._use_flashinfer:
|
|
695
|
+
# Account for joint KV in FlashInfer.
|
|
696
|
+
remote_block_size //= 2
|
|
697
|
+
|
|
698
|
+
assert nixl_agent_meta.block_len == self.block_len * tp_ratio, (
|
|
699
|
+
"Remote P worker KV layer cache must be of shape [2, N, "
|
|
700
|
+
"local_kv_heads*tp_ratio, block_size, head_dim] and same dtype."
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
assert self.block_size == remote_block_size, "Remote P worker with " \
|
|
704
|
+
"different block size is not supported"
|
|
705
|
+
|
|
706
|
+
assert self.num_blocks >= nixl_agent_meta.num_blocks
|
|
707
|
+
|
|
708
|
+
# Create dst descs and xfer side handles. TP workers have same #blocks.
|
|
709
|
+
if engine_id in self.dst_num_blocks:
|
|
710
|
+
assert self.dst_num_blocks[engine_id] == nixl_agent_meta.num_blocks
|
|
711
|
+
else:
|
|
712
|
+
self.dst_num_blocks[engine_id] = nixl_agent_meta.num_blocks
|
|
713
|
+
|
|
714
|
+
blocks_data = []
|
|
715
|
+
# With homogeneous TP, D pulls the whole kv cache from corresponding
|
|
716
|
+
# rank. With heterogeneous TP, prepare the descriptors by splitting the
|
|
717
|
+
# P KV cache along kv_head dim, of D worker's kv_head size (D>P).
|
|
718
|
+
# Eg. PTP1 DTP2 => P0 KV:[block0-KV_0 | block0-KV_1..].
|
|
719
|
+
p_remote_tp_rank = self.tp_rank // tp_ratio
|
|
720
|
+
# Only register the remote's descriptors if current rank pulls from it.
|
|
721
|
+
if p_remote_tp_rank == remote_tp_rank:
|
|
722
|
+
self.kv_caches_base_addr[
|
|
723
|
+
engine_id] = nixl_agent_meta.kv_caches_base_addr
|
|
724
|
+
rank_offset = self.tp_rank % tp_ratio * self.block_len \
|
|
725
|
+
if not self.use_mla else 0
|
|
726
|
+
# Register all remote blocks, but only the corresponding kv heads.
|
|
727
|
+
for base_addr in nixl_agent_meta.kv_caches_base_addr:
|
|
728
|
+
for block_id in range(nixl_agent_meta.num_blocks):
|
|
729
|
+
block_offset = block_id * nixl_agent_meta.block_len
|
|
730
|
+
# For each block, grab the heads chunk belonging to rank_i
|
|
731
|
+
# of size remote_nheads // tp_ratio, which correspond to
|
|
732
|
+
# self.block_len == remote_block_len//tp_ratio bytes.
|
|
733
|
+
addr = base_addr + block_offset + rank_offset
|
|
734
|
+
# (addr, len, device id)
|
|
735
|
+
blocks_data.append((addr, self.block_len, remote_tp_rank))
|
|
736
|
+
logger.debug(
|
|
737
|
+
"Created %s blocks for dst engine %s with remote rank %s and "
|
|
738
|
+
"local rank %s", len(blocks_data), engine_id, remote_tp_rank,
|
|
739
|
+
self.tp_rank)
|
|
740
|
+
|
|
741
|
+
# Register with NIXL.
|
|
742
|
+
descs = self.nixl_wrapper.get_xfer_descs(blocks_data, "VRAM")
|
|
743
|
+
self.dst_xfer_side_handles[
|
|
744
|
+
engine_id] = self.nixl_wrapper.prep_xfer_dlist(
|
|
745
|
+
self._remote_agents[engine_id][remote_tp_rank], descs)
|
|
746
|
+
|
|
747
|
+
def get_finished(self) -> tuple[set[str], set[str]]:
|
|
748
|
+
"""
|
|
749
|
+
Get requests that are done sending or recving.
|
|
750
|
+
|
|
751
|
+
In TP>1 setup, each rank exchanges KVs with its counterpart
|
|
752
|
+
ranks independently. get_finished() runs in a worker creates
|
|
753
|
+
the done_sending and done_recving sets that are sent to the
|
|
754
|
+
scheduler via ModelRunnerOutput by Rank 0. To ensure trnxs
|
|
755
|
+
are done before adding to finished, Ranks 1 to N-1 communicate
|
|
756
|
+
to Rank 0 once their transaction is done + Rank 0 returns
|
|
757
|
+
finished sets to Scheduler only once all ranks are done.
|
|
758
|
+
"""
|
|
759
|
+
done_sending = self._get_new_notifs()
|
|
760
|
+
done_recving = self._pop_done_transfers(self._recving_transfers)
|
|
761
|
+
if len(done_sending) > 0 or len(done_recving) > 0:
|
|
762
|
+
logger.debug(
|
|
763
|
+
"Rank %s, get_finished: %s requests done sending "
|
|
764
|
+
"and %s requests done recving", self.tp_rank,
|
|
765
|
+
len(done_sending), len(done_recving))
|
|
766
|
+
|
|
767
|
+
if self.world_size == 1:
|
|
768
|
+
return done_sending, done_recving
|
|
769
|
+
|
|
770
|
+
# Rank 0: get finished from all other ranks.
|
|
771
|
+
if self.tp_rank == 0:
|
|
772
|
+
for req_id in done_sending:
|
|
773
|
+
self._done_sending_count[req_id] += 1
|
|
774
|
+
for req_id in done_recving:
|
|
775
|
+
self._done_recving_count[req_id] += 1
|
|
776
|
+
|
|
777
|
+
# Keep track of how many other ranks have finished.
|
|
778
|
+
other_ranks_finished_ids: list[str] = []
|
|
779
|
+
for i in range(1, self.world_size):
|
|
780
|
+
other_ranks_finished_ids.extend(
|
|
781
|
+
self.tp_group.recv_object(src=i))
|
|
782
|
+
for req_id in other_ranks_finished_ids:
|
|
783
|
+
if (req_id in self._done_recving_count
|
|
784
|
+
or req_id in self._recving_transfers):
|
|
785
|
+
self._done_recving_count[req_id] += 1
|
|
786
|
+
else:
|
|
787
|
+
self._done_sending_count[req_id] += 1
|
|
788
|
+
|
|
789
|
+
# Return ids that finished on all ranks to the scheduler.
|
|
790
|
+
all_done_recving: set[str] = set()
|
|
791
|
+
for req_id in list(self._done_recving_count.keys()):
|
|
792
|
+
if self._done_recving_count[req_id] == self.world_size:
|
|
793
|
+
del self._done_recving_count[req_id]
|
|
794
|
+
all_done_recving.add(req_id)
|
|
795
|
+
|
|
796
|
+
all_done_sending: set[str] = set()
|
|
797
|
+
for req_id in list(self._done_sending_count.keys()):
|
|
798
|
+
if self._done_sending_count[req_id] == self.world_size:
|
|
799
|
+
del self._done_sending_count[req_id]
|
|
800
|
+
all_done_sending.add(req_id)
|
|
801
|
+
|
|
802
|
+
return all_done_sending, all_done_recving
|
|
803
|
+
|
|
804
|
+
# Ranks 1 to N-1: send finished ids to Rank 0.
|
|
805
|
+
else:
|
|
806
|
+
finished_req_ids = list(done_recving.union(done_sending))
|
|
807
|
+
self.tp_group.send_object(finished_req_ids, dst=0)
|
|
808
|
+
|
|
809
|
+
# Unused as only Rank 0 results are sent to scheduler.
|
|
810
|
+
return done_sending, done_recving
|
|
811
|
+
|
|
812
|
+
def _get_new_notifs(self) -> set[str]:
|
|
813
|
+
"""
|
|
814
|
+
Get req_ids which got a remote xfer message. When multiple consumers
|
|
815
|
+
are reading from the same producer (heterogeneous TP scenario), wait
|
|
816
|
+
for all consumers to be done pulling.
|
|
817
|
+
"""
|
|
818
|
+
notified_req_ids: set[str] = set()
|
|
819
|
+
for notifs in self.nixl_wrapper.get_new_notifs().values():
|
|
820
|
+
for notif in notifs:
|
|
821
|
+
req_id, tp_ratio = notif.decode("utf-8").rsplit(":", 1)
|
|
822
|
+
self.consumer_notification_counts_by_req[req_id] += 1
|
|
823
|
+
# Wait all consumers (D) to be done reading before freeing.
|
|
824
|
+
if self.consumer_notification_counts_by_req[req_id] == int(
|
|
825
|
+
tp_ratio):
|
|
826
|
+
notified_req_ids.add(req_id)
|
|
827
|
+
del self.consumer_notification_counts_by_req[req_id]
|
|
828
|
+
return notified_req_ids
|
|
829
|
+
|
|
830
|
+
def _pop_done_transfers(
|
|
831
|
+
self, transfers: dict[str, list[tuple[int, float]]]) -> set[str]:
|
|
832
|
+
"""
|
|
833
|
+
Pop completed xfers by checking for DONE state.
|
|
834
|
+
Args:
|
|
835
|
+
transfers: dict of req_id -> list[running_xfer]
|
|
836
|
+
Returns:
|
|
837
|
+
set of req_ids that have all done xfers
|
|
838
|
+
"""
|
|
839
|
+
done_req_ids: set[str] = set()
|
|
840
|
+
for req_id, handles in list(transfers.items()):
|
|
841
|
+
for handle, xfer_stime in handles:
|
|
842
|
+
xfer_state = self.nixl_wrapper.check_xfer_state(handle)
|
|
843
|
+
if xfer_state == "DONE":
|
|
844
|
+
self.nixl_wrapper.release_xfer_handle(handle)
|
|
845
|
+
done_req_ids.add(req_id)
|
|
846
|
+
del transfers[req_id]
|
|
847
|
+
elif xfer_state == "PROC":
|
|
848
|
+
continue
|
|
849
|
+
else:
|
|
850
|
+
raise RuntimeError("Transfer failed with state %s",
|
|
851
|
+
xfer_state)
|
|
852
|
+
return done_req_ids
|
|
853
|
+
|
|
854
|
+
def start_load_kv(self, metadata: NixlConnectorMetadata):
|
|
855
|
+
"""
|
|
856
|
+
Start loading by triggering non-blocking nixl_xfer.
|
|
857
|
+
We check for these trnxs to complete in each step().
|
|
858
|
+
"""
|
|
859
|
+
for req_id, meta in metadata.requests.items():
|
|
860
|
+
logger.debug(
|
|
861
|
+
"start_load_kv for request %s from remote engine %s. "
|
|
862
|
+
"Num local_block_ids: %s. Num remote_block_ids: %s. ", req_id,
|
|
863
|
+
meta.remote_engine_id, len(meta.local_block_ids),
|
|
864
|
+
len(meta.remote_block_ids))
|
|
865
|
+
self._read_blocks(
|
|
866
|
+
request_id=req_id,
|
|
867
|
+
dst_engine_id=meta.remote_engine_id,
|
|
868
|
+
local_block_ids=meta.local_block_ids,
|
|
869
|
+
remote_block_ids=meta.remote_block_ids,
|
|
870
|
+
remote_host=meta.remote_host,
|
|
871
|
+
remote_port=meta.remote_port,
|
|
872
|
+
)
|
|
873
|
+
|
|
874
|
+
def _read_blocks(
|
|
875
|
+
self,
|
|
876
|
+
local_block_ids: list[int],
|
|
877
|
+
remote_block_ids: list[int],
|
|
878
|
+
remote_host: str,
|
|
879
|
+
remote_port: int,
|
|
880
|
+
dst_engine_id: str,
|
|
881
|
+
request_id: str,
|
|
882
|
+
):
|
|
883
|
+
# NOTE(rob): this takes ~2s. We need to get this off the hotpath.
|
|
884
|
+
if dst_engine_id not in self._remote_agents:
|
|
885
|
+
self._nixl_handshake(remote_host, remote_port)
|
|
886
|
+
|
|
887
|
+
# NOTE(rob): having the staging blocks be on the READER side is
|
|
888
|
+
# not going to work well (since we will have to call rearrange tensors).
|
|
889
|
+
# after we detect the txn is complete (which means we cannot make the
|
|
890
|
+
# read trxn async easily). If we want to make "READ" happen cleanly,
|
|
891
|
+
# then we will need to have the staging blocks on the remote side.
|
|
892
|
+
|
|
893
|
+
# NOTE(rob): according to nvidia the staging blocks are used to
|
|
894
|
+
# saturate IB with heterogeneous TP sizes. We should remove the staging
|
|
895
|
+
# blocks until we are ready.
|
|
896
|
+
|
|
897
|
+
# Number of D TP workers that will read from dst P. Propagate tp_ratio
|
|
898
|
+
# on notification so that dst worker can wait before freeing blocks.
|
|
899
|
+
tp_ratio = self._tp_size[
|
|
900
|
+
self.engine_id] // self._tp_size[dst_engine_id]
|
|
901
|
+
notif_id = f"{request_id}:{tp_ratio}".encode()
|
|
902
|
+
|
|
903
|
+
# Full prefix cache hit: do not need to read remote blocks,
|
|
904
|
+
# just notify P worker that we have the blocks we need.
|
|
905
|
+
num_local_blocks = len(local_block_ids)
|
|
906
|
+
if num_local_blocks == 0:
|
|
907
|
+
remote_rank = self.tp_rank // tp_ratio
|
|
908
|
+
agent_name = self._remote_agents[dst_engine_id][remote_rank]
|
|
909
|
+
self.nixl_wrapper.send_notif(agent_name, notif_msg=notif_id)
|
|
910
|
+
return
|
|
911
|
+
|
|
912
|
+
# Partial prefix cache hit: just read uncomputed blocks.
|
|
913
|
+
num_remote_blocks = len(remote_block_ids)
|
|
914
|
+
assert num_local_blocks <= num_remote_blocks
|
|
915
|
+
if num_local_blocks < num_remote_blocks:
|
|
916
|
+
remote_block_ids = remote_block_ids[-num_local_blocks:]
|
|
917
|
+
|
|
918
|
+
# Get side handles.
|
|
919
|
+
local_xfer_side_handle = self.src_xfer_side_handle
|
|
920
|
+
remote_xfer_side_handle = self.dst_xfer_side_handles[dst_engine_id]
|
|
921
|
+
|
|
922
|
+
# NOTE (nicolo) With homogeneous TP, each TP worker loads KV from
|
|
923
|
+
# corresponding rank. With heterogeneous TP, fixing D>P, the D tp
|
|
924
|
+
# workers will issue xfers to parts of the P worker remote kv caches.
|
|
925
|
+
|
|
926
|
+
# Get descs ids.
|
|
927
|
+
local_block_descs_ids: list[int] = []
|
|
928
|
+
remote_block_descs_ids: list[int] = []
|
|
929
|
+
if not self.block_window_per_layer:
|
|
930
|
+
# Default case: assume global attention
|
|
931
|
+
remote_block_descs_ids = self._get_block_descs_ids(
|
|
932
|
+
dst_engine_id, remote_block_ids)
|
|
933
|
+
local_block_descs_ids = self._get_block_descs_ids(
|
|
934
|
+
self.engine_id, local_block_ids)
|
|
935
|
+
else:
|
|
936
|
+
# TODO(mgoin): remove this once we have hybrid memory allocator
|
|
937
|
+
# Optimization for models with local attention (Llama 4)
|
|
938
|
+
for layer_idx, block_window in enumerate(
|
|
939
|
+
self.block_window_per_layer):
|
|
940
|
+
# For each layer:
|
|
941
|
+
if block_window is None:
|
|
942
|
+
# If not chunked, we just use the
|
|
943
|
+
# full block lists (global attention)
|
|
944
|
+
layer_local_block_ids = local_block_ids
|
|
945
|
+
layer_remote_block_ids = remote_block_ids
|
|
946
|
+
else:
|
|
947
|
+
# If chunked, get the last block_window blocks
|
|
948
|
+
layer_local_block_ids = local_block_ids[-block_window:]
|
|
949
|
+
layer_remote_block_ids = remote_block_ids[-block_window:]
|
|
950
|
+
|
|
951
|
+
# Get descs ids for the layer.
|
|
952
|
+
layer_local_desc_ids = self._get_block_descs_ids(
|
|
953
|
+
self.engine_id, layer_local_block_ids, layer_idx)
|
|
954
|
+
layer_remote_desc_ids = self._get_block_descs_ids(
|
|
955
|
+
dst_engine_id, layer_remote_block_ids, layer_idx)
|
|
956
|
+
|
|
957
|
+
local_block_descs_ids.extend(layer_local_desc_ids)
|
|
958
|
+
remote_block_descs_ids.extend(layer_remote_desc_ids)
|
|
959
|
+
|
|
960
|
+
assert len(local_block_descs_ids) == len(remote_block_descs_ids)
|
|
961
|
+
|
|
962
|
+
# Prepare transfer with Nixl.
|
|
963
|
+
handle = self.nixl_wrapper.make_prepped_xfer(
|
|
964
|
+
"READ",
|
|
965
|
+
local_xfer_side_handle,
|
|
966
|
+
local_block_descs_ids,
|
|
967
|
+
remote_xfer_side_handle,
|
|
968
|
+
remote_block_descs_ids,
|
|
969
|
+
notif_msg=notif_id,
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
# Begin async xfer.
|
|
973
|
+
self.nixl_wrapper.transfer(handle)
|
|
974
|
+
|
|
975
|
+
# Use handle to check completion in future step().
|
|
976
|
+
# TODO (NickLucche) surface xfer elapsed time
|
|
977
|
+
self._recving_transfers[request_id].append(
|
|
978
|
+
(handle, time.perf_counter()))
|
|
979
|
+
|
|
980
|
+
def _get_block_descs_ids(self,
|
|
981
|
+
engine_id: str,
|
|
982
|
+
block_ids: list[int],
|
|
983
|
+
layer_idx: Optional[int] = None) -> list[int]:
|
|
984
|
+
"""
|
|
985
|
+
Get the descs ids for a set of block ids.
|
|
986
|
+
If layer_idx is provided, we use the region_ids for the given layer.
|
|
987
|
+
Otherwise, we use all regions.
|
|
988
|
+
"""
|
|
989
|
+
if layer_idx is None:
|
|
990
|
+
region_ids = range(self.num_regions)
|
|
991
|
+
else:
|
|
992
|
+
assert layer_idx < self.num_layers
|
|
993
|
+
if self.num_layers < self.num_regions:
|
|
994
|
+
# If we have more regions than layers, we assume that
|
|
995
|
+
# the regions are organized as [K0, V0, K1, V1, ...]
|
|
996
|
+
# and we select K_i and V_i
|
|
997
|
+
assert 2 * self.num_layers == self.num_regions
|
|
998
|
+
region_ids = range(2 * layer_idx, 2 * layer_idx + 2)
|
|
999
|
+
else:
|
|
1000
|
+
# Otherwise, we assume we have MLA and select i-th layer
|
|
1001
|
+
assert self.num_layers == self.num_regions
|
|
1002
|
+
region_ids = range(layer_idx, layer_idx + 1)
|
|
1003
|
+
|
|
1004
|
+
num_blocks = self.dst_num_blocks[engine_id]
|
|
1005
|
+
|
|
1006
|
+
# Compute the desc ids for each block.
|
|
1007
|
+
descs_ids: list[int] = []
|
|
1008
|
+
for reg_id in region_ids:
|
|
1009
|
+
for block_id in block_ids:
|
|
1010
|
+
descs_ids.append(reg_id * num_blocks + block_id)
|
|
1011
|
+
return descs_ids
|
|
1012
|
+
|
|
1013
|
+
|
|
1014
|
+
@contextlib.contextmanager
|
|
1015
|
+
def zmq_ctx(socket_type: Any, addr: str) -> Iterator[zmq.Socket]:
|
|
1016
|
+
"""Context manager for a ZMQ socket"""
|
|
1017
|
+
|
|
1018
|
+
if socket_type not in (zmq.ROUTER, zmq.REQ):
|
|
1019
|
+
raise ValueError(f"Unexpected socket type: {socket_type}")
|
|
1020
|
+
|
|
1021
|
+
ctx: Optional[zmq.Context] = None
|
|
1022
|
+
try:
|
|
1023
|
+
ctx = zmq.Context() # type: ignore[attr-defined]
|
|
1024
|
+
yield make_zmq_socket(ctx=ctx,
|
|
1025
|
+
path=addr,
|
|
1026
|
+
socket_type=socket_type,
|
|
1027
|
+
bind=socket_type == zmq.ROUTER)
|
|
1028
|
+
finally:
|
|
1029
|
+
if ctx is not None:
|
|
1030
|
+
ctx.destroy(linger=0)
|