vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +53 -0
- vllm/_custom_ops.py +1828 -0
- vllm/_ipex_ops.py +244 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +34 -0
- vllm/assets/video.py +115 -0
- vllm/attention/__init__.py +20 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +308 -0
- vllm/attention/backends/blocksparse_attn.py +461 -0
- vllm/attention/backends/cpu_mla.py +307 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
- vllm/attention/backends/flash_attn.py +1003 -0
- vllm/attention/backends/flashinfer.py +1104 -0
- vllm/attention/backends/flashmla.py +244 -0
- vllm/attention/backends/hpu_attn.py +313 -0
- vllm/attention/backends/ipex_attn.py +398 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1385 -0
- vllm/attention/backends/pallas.py +351 -0
- vllm/attention/backends/placeholder_attn.py +400 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +975 -0
- vllm/attention/backends/torch_sdpa.py +703 -0
- vllm/attention/backends/triton_mla.py +115 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +802 -0
- vllm/attention/layer.py +468 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
- vllm/attention/ops/blocksparse_attention/interface.py +239 -0
- vllm/attention/ops/blocksparse_attention/utils.py +246 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
- vllm/attention/ops/flashmla.py +116 -0
- vllm/attention/ops/hpu_paged_attn.py +88 -0
- vllm/attention/ops/ipex_attn.py +195 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/nki_flash_attn.py +906 -0
- vllm/attention/ops/paged_attn.py +256 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +100 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +674 -0
- vllm/attention/ops/triton_flash_attention.py +979 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +334 -0
- vllm/attention/selector.py +187 -0
- vllm/attention/utils/fa_utils.py +55 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +1185 -0
- vllm/benchmarks/endpoint_request_func.py +381 -0
- vllm/benchmarks/latency.py +168 -0
- vllm/benchmarks/serve.py +1135 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +70 -0
- vllm/collect_env.py +820 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +89 -0
- vllm/compilation/backends.py +563 -0
- vllm/compilation/base_piecewise_backend.py +72 -0
- vllm/compilation/collective_fusion.py +127 -0
- vllm/compilation/compiler_interface.py +544 -0
- vllm/compilation/counter.py +38 -0
- vllm/compilation/cuda_piecewise_backend.py +214 -0
- vllm/compilation/decorators.py +250 -0
- vllm/compilation/fix_functionalization.py +191 -0
- vllm/compilation/fusion.py +618 -0
- vllm/compilation/fx_utils.py +62 -0
- vllm/compilation/inductor_pass.py +115 -0
- vllm/compilation/monitor.py +39 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +137 -0
- vllm/compilation/pass_manager.py +78 -0
- vllm/compilation/sequence_parallelism.py +268 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +67 -0
- vllm/compilation/wrapper.py +135 -0
- vllm/config.py +4746 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +441 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +521 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +135 -0
- vllm/core/placeholder_block_space_manager.py +100 -0
- vllm/core/scheduler.py +2093 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +281 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +264 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +145 -0
- vllm/distributed/device_communicators/cuda_communicator.py +176 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
- vllm/distributed/device_communicators/hpu_communicator.py +46 -0
- vllm/distributed/device_communicators/neuron_communicator.py +20 -0
- vllm/distributed/device_communicators/pynccl.py +218 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/tpu_communicator.py +103 -0
- vllm/distributed/device_communicators/xpu_communicator.py +55 -0
- vllm/distributed/kv_events.py +356 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +12 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
- vllm/distributed/parallel_state.py +1296 -0
- vllm/distributed/tpu_distributed_utils.py +177 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1708 -0
- vllm/engine/async_llm_engine.py +1200 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +2097 -0
- vllm/engine/metrics.py +629 -0
- vllm/engine/metrics_types.py +94 -0
- vllm/engine/multiprocessing/__init__.py +148 -0
- vllm/engine/multiprocessing/client.py +681 -0
- vllm/engine/multiprocessing/engine.py +460 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +75 -0
- vllm/engine/output_processor/multi_step.py +216 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +317 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1299 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +39 -0
- vllm/entrypoints/cli/benchmark/latency.py +30 -0
- vllm/entrypoints/cli/benchmark/main.py +54 -0
- vllm/entrypoints/cli/benchmark/serve.py +30 -0
- vllm/entrypoints/cli/benchmark/throughput.py +30 -0
- vllm/entrypoints/cli/collect_env.py +35 -0
- vllm/entrypoints/cli/main.py +65 -0
- vllm/entrypoints/cli/openai.py +205 -0
- vllm/entrypoints/cli/run_batch.py +62 -0
- vllm/entrypoints/cli/serve.py +328 -0
- vllm/entrypoints/cli/types.py +25 -0
- vllm/entrypoints/launcher.py +147 -0
- vllm/entrypoints/llm.py +1544 -0
- vllm/entrypoints/logger.py +50 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1387 -0
- vllm/entrypoints/openai/cli_args.py +315 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +1913 -0
- vllm/entrypoints/openai/run_batch.py +463 -0
- vllm/entrypoints/openai/serving_chat.py +1221 -0
- vllm/entrypoints/openai/serving_classification.py +160 -0
- vllm/entrypoints/openai/serving_completion.py +592 -0
- vllm/entrypoints/openai/serving_embedding.py +201 -0
- vllm/entrypoints/openai/serving_engine.py +986 -0
- vllm/entrypoints/openai/serving_models.py +315 -0
- vllm/entrypoints/openai/serving_pooling.py +232 -0
- vllm/entrypoints/openai/serving_score.py +433 -0
- vllm/entrypoints/openai/serving_tokenization.py +157 -0
- vllm/entrypoints/openai/serving_transcription.py +424 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/score_utils.py +50 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/utils.py +233 -0
- vllm/env_override.py +41 -0
- vllm/envs.py +944 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +401 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +30 -0
- vllm/executor/multiproc_worker_utils.py +313 -0
- vllm/executor/ray_distributed_executor.py +701 -0
- vllm/executor/ray_utils.py +399 -0
- vllm/executor/uniproc_executor.py +139 -0
- vllm/forward_context.py +179 -0
- vllm/inputs/__init__.py +41 -0
- vllm/inputs/data.py +331 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +909 -0
- vllm/inputs/registry.py +237 -0
- vllm/jsontree.py +80 -0
- vllm/logger.py +212 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +85 -0
- vllm/logging_utils/formatter.py +18 -0
- vllm/logits_process.py +119 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +355 -0
- vllm/lora/layers.py +1285 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +818 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
- vllm/lora/ops/triton_ops/utils.py +120 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +136 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +485 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +290 -0
- vllm/lora/punica_wrapper/punica_hpu.py +145 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +405 -0
- vllm/lora/punica_wrapper/utils.py +164 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +240 -0
- vllm/lora/worker_manager.py +259 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +152 -0
- vllm/model_executor/guided_decoding/__init__.py +181 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
- vllm/model_executor/guided_decoding/guided_fields.py +41 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
- vllm/model_executor/guided_decoding/utils.py +242 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +369 -0
- vllm/model_executor/layers/fused_moe/__init__.py +54 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
- vllm/model_executor/layers/fused_moe/layer.py +1535 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/utils.py +98 -0
- vllm/model_executor/layers/layernorm.py +288 -0
- vllm/model_executor/layers/lightning_attn.py +652 -0
- vllm/model_executor/layers/linear.py +1524 -0
- vllm/model_executor/layers/logits_processor.py +197 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
- vllm/model_executor/layers/pooler.py +350 -0
- vllm/model_executor/layers/quantization/__init__.py +157 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/auto_round.py +310 -0
- vllm/model_executor/layers/quantization/awq.py +194 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +151 -0
- vllm/model_executor/layers/quantization/bitblas.py +461 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
- vllm/model_executor/layers/quantization/experts_int8.py +196 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +906 -0
- vllm/model_executor/layers/quantization/gguf.py +565 -0
- vllm/model_executor/layers/quantization/gptq.py +278 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/marlin.py +261 -0
- vllm/model_executor/layers/quantization/modelopt.py +737 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
- vllm/model_executor/layers/quantization/qqq.py +275 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +441 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +161 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
- vllm/model_executor/layers/rejection_sampler.py +406 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding.py +1862 -0
- vllm/model_executor/layers/sampler.py +1204 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
- vllm/model_executor/layers/utils.py +95 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +76 -0
- vllm/model_executor/model_loader/base_loader.py +43 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
- vllm/model_executor/model_loader/default_loader.py +282 -0
- vllm/model_executor/model_loader/dummy_loader.py +27 -0
- vllm/model_executor/model_loader/gguf_loader.py +120 -0
- vllm/model_executor/model_loader/neuron.py +476 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
- vllm/model_executor/model_loader/tensorizer.py +600 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
- vllm/model_executor/model_loader/tpu.py +112 -0
- vllm/model_executor/model_loader/utils.py +302 -0
- vllm/model_executor/model_loader/weight_utils.py +782 -0
- vllm/model_executor/models/__init__.py +28 -0
- vllm/model_executor/models/adapters.py +248 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +657 -0
- vllm/model_executor/models/aya_vision.py +466 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bamba.py +543 -0
- vllm/model_executor/models/bart.py +938 -0
- vllm/model_executor/models/bert.py +523 -0
- vllm/model_executor/models/bert_with_rope.py +769 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +718 -0
- vllm/model_executor/models/bloom.py +373 -0
- vllm/model_executor/models/chameleon.py +1136 -0
- vllm/model_executor/models/chatglm.py +478 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/commandr.py +472 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +472 -0
- vllm/model_executor/models/deepseek.py +486 -0
- vllm/model_executor/models/deepseek_mtp.py +269 -0
- vllm/model_executor/models/deepseek_v2.py +843 -0
- vllm/model_executor/models/deepseek_vl2.py +648 -0
- vllm/model_executor/models/eagle.py +260 -0
- vllm/model_executor/models/exaone.py +551 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +510 -0
- vllm/model_executor/models/falcon_h1.py +685 -0
- vllm/model_executor/models/florence2.py +1103 -0
- vllm/model_executor/models/fuyu.py +389 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +425 -0
- vllm/model_executor/models/gemma3.py +533 -0
- vllm/model_executor/models/gemma3_mm.py +709 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4v.py +648 -0
- vllm/model_executor/models/gpt2.py +328 -0
- vllm/model_executor/models/gpt_bigcode.py +335 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +332 -0
- vllm/model_executor/models/granite.py +493 -0
- vllm/model_executor/models/granite_speech.py +779 -0
- vllm/model_executor/models/granitemoe.py +437 -0
- vllm/model_executor/models/granitemoehybrid.py +586 -0
- vllm/model_executor/models/granitemoeshared.py +341 -0
- vllm/model_executor/models/gritlm.py +224 -0
- vllm/model_executor/models/grok1.py +546 -0
- vllm/model_executor/models/h2ovl.py +546 -0
- vllm/model_executor/models/idefics2_vision_model.py +389 -0
- vllm/model_executor/models/idefics3.py +776 -0
- vllm/model_executor/models/interfaces.py +572 -0
- vllm/model_executor/models/interfaces_base.py +164 -0
- vllm/model_executor/models/intern_vit.py +480 -0
- vllm/model_executor/models/internlm2.py +455 -0
- vllm/model_executor/models/internlm2_ve.py +147 -0
- vllm/model_executor/models/internvl.py +1418 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +592 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/llama.py +644 -0
- vllm/model_executor/models/llama4.py +532 -0
- vllm/model_executor/models/llama_eagle.py +165 -0
- vllm/model_executor/models/llama_eagle3.py +263 -0
- vllm/model_executor/models/llava.py +866 -0
- vllm/model_executor/models/llava_next.py +586 -0
- vllm/model_executor/models/llava_next_video.py +471 -0
- vllm/model_executor/models/llava_onevision.py +956 -0
- vllm/model_executor/models/mamba.py +273 -0
- vllm/model_executor/models/mamba2.py +308 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/mimo.py +192 -0
- vllm/model_executor/models/mimo_mtp.py +285 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +759 -0
- vllm/model_executor/models/minicpmv.py +1287 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1301 -0
- vllm/model_executor/models/minimax_vl_01.py +364 -0
- vllm/model_executor/models/mistral3.py +604 -0
- vllm/model_executor/models/mixtral.py +488 -0
- vllm/model_executor/models/mixtral_quant.py +453 -0
- vllm/model_executor/models/mllama.py +1624 -0
- vllm/model_executor/models/mllama4.py +938 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +331 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1568 -0
- vllm/model_executor/models/moonvit.py +630 -0
- vllm/model_executor/models/mpt.py +331 -0
- vllm/model_executor/models/nemotron.py +508 -0
- vllm/model_executor/models/nemotron_h.py +573 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +389 -0
- vllm/model_executor/models/olmo2.py +414 -0
- vllm/model_executor/models/olmoe.py +468 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +349 -0
- vllm/model_executor/models/ovis.py +567 -0
- vllm/model_executor/models/paligemma.py +398 -0
- vllm/model_executor/models/persimmon.py +344 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3_small.py +465 -0
- vllm/model_executor/models/phi3v.py +723 -0
- vllm/model_executor/models/phi4mm.py +1246 -0
- vllm/model_executor/models/phi4mm_audio.py +1233 -0
- vllm/model_executor/models/phi4mm_utils.py +1884 -0
- vllm/model_executor/models/phimoe.py +665 -0
- vllm/model_executor/models/pixtral.py +1316 -0
- vllm/model_executor/models/plamo2.py +738 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
- vllm/model_executor/models/qwen.py +362 -0
- vllm/model_executor/models/qwen2.py +497 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
- vllm/model_executor/models/qwen2_5_vl.py +1166 -0
- vllm/model_executor/models/qwen2_audio.py +410 -0
- vllm/model_executor/models/qwen2_moe.py +540 -0
- vllm/model_executor/models/qwen2_rm.py +132 -0
- vllm/model_executor/models/qwen2_vl.py +1405 -0
- vllm/model_executor/models/qwen3.py +321 -0
- vllm/model_executor/models/qwen3_moe.py +535 -0
- vllm/model_executor/models/qwen_vl.py +785 -0
- vllm/model_executor/models/registry.py +622 -0
- vllm/model_executor/models/roberta.py +276 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/skyworkr1v.py +951 -0
- vllm/model_executor/models/smolvlm.py +52 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +343 -0
- vllm/model_executor/models/starcoder2.py +356 -0
- vllm/model_executor/models/tarsier.py +643 -0
- vllm/model_executor/models/telechat2.py +140 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/transformers.py +508 -0
- vllm/model_executor/models/ultravox.py +656 -0
- vllm/model_executor/models/utils.py +731 -0
- vllm/model_executor/models/vision.py +147 -0
- vllm/model_executor/models/whisper.py +747 -0
- vllm/model_executor/models/zamba2.py +1009 -0
- vllm/model_executor/parameter.py +459 -0
- vllm/model_executor/pooling_metadata.py +72 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +77 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +106 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/hasher.py +118 -0
- vllm/multimodal/image.py +97 -0
- vllm/multimodal/inputs.py +876 -0
- vllm/multimodal/parse.py +461 -0
- vllm/multimodal/processing.py +1895 -0
- vllm/multimodal/profiling.py +258 -0
- vllm/multimodal/registry.py +331 -0
- vllm/multimodal/utils.py +436 -0
- vllm/multimodal/video.py +198 -0
- vllm/outputs.py +512 -0
- vllm/platforms/__init__.py +291 -0
- vllm/platforms/cpu.py +266 -0
- vllm/platforms/cuda.py +526 -0
- vllm/platforms/hpu.py +106 -0
- vllm/platforms/interface.py +538 -0
- vllm/platforms/neuron.py +150 -0
- vllm/platforms/rocm.py +435 -0
- vllm/platforms/tpu.py +216 -0
- vllm/platforms/xpu.py +156 -0
- vllm/plugins/__init__.py +94 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +54 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +83 -0
- vllm/prompt_adapter/models.py +358 -0
- vllm/prompt_adapter/request.py +37 -0
- vllm/prompt_adapter/utils.py +98 -0
- vllm/prompt_adapter/worker_manager.py +179 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +15 -0
- vllm/reasoning/abs_reasoning_parsers.py +192 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/sampling_params.py +602 -0
- vllm/scalar_type.py +347 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1568 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +506 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +99 -0
- vllm/spec_decode/medusa_worker.py +138 -0
- vllm/spec_decode/metrics.py +213 -0
- vllm/spec_decode/mlp_speculator_worker.py +94 -0
- vllm/spec_decode/mqa_scorer.py +160 -0
- vllm/spec_decode/multi_step_worker.py +423 -0
- vllm/spec_decode/ngram_worker.py +196 -0
- vllm/spec_decode/proposer_worker_base.py +59 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
- vllm/spec_decode/spec_decode_worker.py +1326 -0
- vllm/spec_decode/target_model_runner.py +45 -0
- vllm/spec_decode/top1_proposer.py +275 -0
- vllm/spec_decode/util.py +277 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +131 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +60 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +887 -0
- vllm/transformers_utils/configs/__init__.py +61 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/cohere2.py +195 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +85 -0
- vllm/transformers_utils/configs/exaone.py +190 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/h2ovl.py +16 -0
- vllm/transformers_utils/configs/internvl.py +54 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/minimax_text_01.py +70 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
- vllm/transformers_utils/configs/mllama.py +31 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/mpt.py +180 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +258 -0
- vllm/transformers_utils/configs/nvlm_d.py +15 -0
- vllm/transformers_utils/configs/ovis.py +184 -0
- vllm/transformers_utils/configs/skyworkr1v.py +54 -0
- vllm/transformers_utils/configs/solar.py +247 -0
- vllm/transformers_utils/configs/telechat2.py +64 -0
- vllm/transformers_utils/configs/ultravox.py +108 -0
- vllm/transformers_utils/detokenizer.py +168 -0
- vllm/transformers_utils/detokenizer_utils.py +189 -0
- vllm/transformers_utils/processor.py +221 -0
- vllm/transformers_utils/processors/__init__.py +8 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/s3_utils.py +162 -0
- vllm/transformers_utils/tokenizer.py +302 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +120 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +493 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +14 -0
- vllm/triton_utils/importing.py +50 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +256 -0
- vllm/utils.py +2910 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +163 -0
- vllm/v1/attention/backends/flash_attn.py +869 -0
- vllm/v1/attention/backends/flashinfer.py +651 -0
- vllm/v1/attention/backends/flex_attention.py +477 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +931 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
- vllm/v1/attention/backends/mla/flashmla.py +152 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
- vllm/v1/attention/backends/mla/triton_mla.py +120 -0
- vllm/v1/attention/backends/pallas.py +240 -0
- vllm/v1/attention/backends/triton_attn.py +285 -0
- vllm/v1/attention/backends/utils.py +52 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +349 -0
- vllm/v1/core/encoder_cache_manager.py +150 -0
- vllm/v1/core/kv_cache_coordinator.py +363 -0
- vllm/v1/core/kv_cache_manager.py +392 -0
- vllm/v1/core/kv_cache_utils.py +996 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +150 -0
- vllm/v1/core/sched/output.py +154 -0
- vllm/v1/core/sched/scheduler.py +1044 -0
- vllm/v1/core/sched/utils.py +23 -0
- vllm/v1/core/single_type_kv_cache_manager.py +403 -0
- vllm/v1/engine/__init__.py +173 -0
- vllm/v1/engine/async_llm.py +558 -0
- vllm/v1/engine/coordinator.py +253 -0
- vllm/v1/engine/core.py +961 -0
- vllm/v1/engine/core_client.py +1129 -0
- vllm/v1/engine/detokenizer.py +261 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +317 -0
- vllm/v1/engine/logprobs.py +199 -0
- vllm/v1/engine/mm_input_cache.py +91 -0
- vllm/v1/engine/output_processor.py +428 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +407 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +113 -0
- vllm/v1/executor/multiproc_executor.py +537 -0
- vllm/v1/executor/ray_distributed_executor.py +62 -0
- vllm/v1/kv_cache_interface.py +194 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +523 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +131 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +239 -0
- vllm/v1/outputs.py +116 -0
- vllm/v1/request.py +193 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/penalties.py +59 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +286 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +145 -0
- vllm/v1/serial_utils.py +315 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +432 -0
- vllm/v1/spec_decode/medusa.py +62 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +178 -0
- vllm/v1/spec_decode/ngram_proposer.py +132 -0
- vllm/v1/spec_decode/utils.py +46 -0
- vllm/v1/structured_output/__init__.py +222 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +318 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +175 -0
- vllm/v1/utils.py +743 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +142 -0
- vllm/v1/worker/cpu_model_runner.py +86 -0
- vllm/v1/worker/cpu_worker.py +152 -0
- vllm/v1/worker/gpu_input_batch.py +681 -0
- vllm/v1/worker/gpu_model_runner.py +2320 -0
- vllm/v1/worker/gpu_worker.py +393 -0
- vllm/v1/worker/lora_model_runner_mixin.py +173 -0
- vllm/v1/worker/tpu_model_runner.py +1673 -0
- vllm/v1/worker/tpu_worker.py +299 -0
- vllm/v1/worker/utils.py +111 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +450 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2320 -0
- vllm/worker/hpu_worker.py +484 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +282 -0
- vllm/worker/multi_step_hpu_worker.py +123 -0
- vllm/worker/multi_step_model_runner.py +911 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +108 -0
- vllm/worker/multi_step_worker.py +197 -0
- vllm/worker/neuron_model_runner.py +460 -0
- vllm/worker/neuron_worker.py +193 -0
- vllm/worker/neuronx_distributed_model_runner.py +294 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +909 -0
- vllm/worker/tpu_worker.py +337 -0
- vllm/worker/utils.py +53 -0
- vllm/worker/worker.py +577 -0
- vllm/worker/worker_base.py +646 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +186 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,701 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
import asyncio
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
|
|
10
|
+
|
|
11
|
+
import cloudpickle
|
|
12
|
+
import msgspec
|
|
13
|
+
|
|
14
|
+
import vllm.envs as envs
|
|
15
|
+
from vllm.executor.executor_base import (
|
|
16
|
+
DistributedExecutorBase) # yapf: disable
|
|
17
|
+
from vllm.executor.msgspec_utils import encode_hook
|
|
18
|
+
from vllm.executor.ray_utils import (RayWorkerWrapper, initialize_ray_cluster,
|
|
19
|
+
ray)
|
|
20
|
+
from vllm.logger import init_logger
|
|
21
|
+
from vllm.model_executor.layers.sampler import SamplerOutput
|
|
22
|
+
from vllm.platforms import current_platform
|
|
23
|
+
from vllm.sequence import ExecuteModelRequest
|
|
24
|
+
from vllm.utils import (_run_task_with_lock, get_distributed_init_method,
|
|
25
|
+
get_ip, get_open_port, make_async)
|
|
26
|
+
|
|
27
|
+
if ray is not None:
|
|
28
|
+
from ray.actor import ActorHandle
|
|
29
|
+
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
|
|
30
|
+
else:
|
|
31
|
+
ActorHandle = None
|
|
32
|
+
|
|
33
|
+
if TYPE_CHECKING:
|
|
34
|
+
from ray.util.placement_group import PlacementGroup
|
|
35
|
+
|
|
36
|
+
logger = init_logger(__name__)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class RayWorkerMetaData:
|
|
41
|
+
"""
|
|
42
|
+
Metadata for a Ray worker.
|
|
43
|
+
The order of ray worker creation can be random,
|
|
44
|
+
and we need to reset the rank after creating all workers.
|
|
45
|
+
"""
|
|
46
|
+
worker: ActorHandle
|
|
47
|
+
created_rank: int
|
|
48
|
+
adjusted_rank: int = -1
|
|
49
|
+
ip: str = ""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class RayDistributedExecutor(DistributedExecutorBase):
|
|
53
|
+
"""Ray-based distributed executor"""
|
|
54
|
+
|
|
55
|
+
# These env vars are worker-specific, therefore are NOT copied
|
|
56
|
+
# from the driver to the workers
|
|
57
|
+
WORKER_SPECIFIC_ENV_VARS = {
|
|
58
|
+
"VLLM_HOST_IP", "VLLM_HOST_PORT", "LOCAL_RANK", "CUDA_VISIBLE_DEVICES"
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
config_home = envs.VLLM_CONFIG_ROOT
|
|
62
|
+
# This file contains a list of env vars that should not be copied
|
|
63
|
+
# from the driver to the Ray workers.
|
|
64
|
+
non_carry_over_env_vars_file = os.path.join(
|
|
65
|
+
config_home, "ray_non_carry_over_env_vars.json")
|
|
66
|
+
if os.path.exists(non_carry_over_env_vars_file):
|
|
67
|
+
with open(non_carry_over_env_vars_file) as f:
|
|
68
|
+
non_carry_over_env_vars = set(json.load(f))
|
|
69
|
+
else:
|
|
70
|
+
non_carry_over_env_vars = set()
|
|
71
|
+
|
|
72
|
+
uses_ray: bool = True
|
|
73
|
+
|
|
74
|
+
def _init_executor(self) -> None:
|
|
75
|
+
self.forward_dag: Optional[ray.dag.CompiledDAG] = None
|
|
76
|
+
if envs.VLLM_USE_V1:
|
|
77
|
+
# V1 uses SPMD worker and compiled DAG
|
|
78
|
+
os.environ["VLLM_USE_RAY_SPMD_WORKER"] = "1"
|
|
79
|
+
os.environ["VLLM_USE_RAY_COMPILED_DAG"] = "1"
|
|
80
|
+
|
|
81
|
+
# For TPU, avoid compiling NVIDIA's NCCL
|
|
82
|
+
if current_platform.is_tpu():
|
|
83
|
+
os.environ["VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE"] = "shm"
|
|
84
|
+
|
|
85
|
+
# If the env var is set, it uses the Ray's compiled DAG API
|
|
86
|
+
# which optimizes the control plane overhead.
|
|
87
|
+
# Run vLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it.
|
|
88
|
+
# Currently, this requires USE_RAY_SPMD_WORKER=True.
|
|
89
|
+
self.use_ray_compiled_dag = envs.VLLM_USE_RAY_COMPILED_DAG
|
|
90
|
+
# If the env var is set, then we do not distinguish between the
|
|
91
|
+
# "driver worker" vs other workers. Also, the rank 0 worker will
|
|
92
|
+
# be executed in a remote Ray worker. Currently this requires
|
|
93
|
+
# USE_RAY_COMPILED_DAG=True.
|
|
94
|
+
self.use_ray_spmd_worker = envs.VLLM_USE_RAY_SPMD_WORKER
|
|
95
|
+
if self.use_ray_compiled_dag:
|
|
96
|
+
assert self.use_ray_spmd_worker, (
|
|
97
|
+
"VLLM_USE_RAY_COMPILED_DAG=1 requires "
|
|
98
|
+
"VLLM_USE_RAY_SPMD_WORKER=1")
|
|
99
|
+
if self.use_ray_spmd_worker:
|
|
100
|
+
# TODO: Support SPMD worker for non-DAG Ray executor.
|
|
101
|
+
assert self.use_ray_compiled_dag, (
|
|
102
|
+
"VLLM_USE_RAY_SPMD_WORKER=1 requires "
|
|
103
|
+
"VLLM_USE_RAY_COMPILED_DAG=1")
|
|
104
|
+
|
|
105
|
+
assert self.uses_ray
|
|
106
|
+
initialize_ray_cluster(self.parallel_config)
|
|
107
|
+
placement_group = self.parallel_config.placement_group
|
|
108
|
+
|
|
109
|
+
# Disable Ray usage stats collection.
|
|
110
|
+
ray_usage = os.environ.get("RAY_USAGE_STATS_ENABLED", "0")
|
|
111
|
+
if ray_usage != "1":
|
|
112
|
+
os.environ["RAY_USAGE_STATS_ENABLED"] = "0"
|
|
113
|
+
|
|
114
|
+
# Create the parallel GPU workers.
|
|
115
|
+
self._init_workers_ray(placement_group)
|
|
116
|
+
|
|
117
|
+
self.input_encoder = msgspec.msgpack.Encoder(enc_hook=encode_hook)
|
|
118
|
+
self.output_decoder = msgspec.msgpack.Decoder(
|
|
119
|
+
Optional[List[SamplerOutput]])
|
|
120
|
+
self.use_v1 = envs.VLLM_USE_V1
|
|
121
|
+
|
|
122
|
+
self.pp_locks: Optional[List[asyncio.Lock]] = None
|
|
123
|
+
if not self.use_ray_compiled_dag:
|
|
124
|
+
self.driver_exec_method = make_async(
|
|
125
|
+
self.driver_worker.execute_method)
|
|
126
|
+
|
|
127
|
+
def shutdown(self) -> None:
|
|
128
|
+
logger.info(
|
|
129
|
+
"Shutting down Ray distributed executor. If you see error log "
|
|
130
|
+
"from logging.cc regarding SIGTERM received, please ignore because "
|
|
131
|
+
"this is the expected termination process in Ray.")
|
|
132
|
+
if hasattr(self, "forward_dag") and self.forward_dag is not None:
|
|
133
|
+
self.forward_dag.teardown()
|
|
134
|
+
import ray
|
|
135
|
+
for worker in self.workers:
|
|
136
|
+
ray.kill(worker)
|
|
137
|
+
self.forward_dag = None
|
|
138
|
+
|
|
139
|
+
def _configure_ray_workers_use_nsight(self,
|
|
140
|
+
ray_remote_kwargs) -> Dict[str, Any]:
|
|
141
|
+
# If nsight profiling is enabled, we need to set the profiling
|
|
142
|
+
# configuration for the ray workers as runtime env.
|
|
143
|
+
runtime_env = ray_remote_kwargs.setdefault("runtime_env", {})
|
|
144
|
+
runtime_env.update({
|
|
145
|
+
"nsight": {
|
|
146
|
+
"t": "cuda,cudnn,cublas",
|
|
147
|
+
"o": "'worker_process_%p'",
|
|
148
|
+
"cuda-graph-trace": "node",
|
|
149
|
+
}
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
return ray_remote_kwargs
|
|
153
|
+
|
|
154
|
+
# child class could overwrite this to return actual env vars.
|
|
155
|
+
def _get_env_vars_to_be_updated(self):
|
|
156
|
+
return self._env_vars_for_all_workers
|
|
157
|
+
|
|
158
|
+
def _init_workers_ray(self, placement_group: "PlacementGroup",
|
|
159
|
+
**ray_remote_kwargs):
|
|
160
|
+
num_gpus = envs.VLLM_RAY_PER_WORKER_GPUS
|
|
161
|
+
|
|
162
|
+
# The driver dummy worker does not actually use any resources.
|
|
163
|
+
# It holds the resource for the driver worker.
|
|
164
|
+
self.driver_dummy_worker: Optional[RayWorkerWrapper] = None
|
|
165
|
+
# The remaining workers are the actual ray actors.
|
|
166
|
+
self.workers: List[RayWorkerWrapper] = []
|
|
167
|
+
|
|
168
|
+
# Used in ray compiled DAG: indexed first by PP rank,
|
|
169
|
+
# and then TP rank. In other words, the inner list is
|
|
170
|
+
# the TP group of workers for a PP rank.
|
|
171
|
+
self.pp_tp_workers: List[List[RayWorkerWrapper]] = []
|
|
172
|
+
|
|
173
|
+
if self.parallel_config.ray_workers_use_nsight:
|
|
174
|
+
ray_remote_kwargs = self._configure_ray_workers_use_nsight(
|
|
175
|
+
ray_remote_kwargs)
|
|
176
|
+
|
|
177
|
+
logger.info("use_ray_spmd_worker: %s", self.use_ray_spmd_worker)
|
|
178
|
+
|
|
179
|
+
# Create the workers.
|
|
180
|
+
bundle_indices: List[int]
|
|
181
|
+
if envs.VLLM_RAY_BUNDLE_INDICES:
|
|
182
|
+
# Use the bundle indices specified by the user.
|
|
183
|
+
bundle_indices = list(
|
|
184
|
+
map(int, envs.VLLM_RAY_BUNDLE_INDICES.split(",")))
|
|
185
|
+
assert len(bundle_indices) == self.parallel_config.world_size, \
|
|
186
|
+
("VLLM_RAY_BUNDLE_INDICES must have the same size"
|
|
187
|
+
f" as the world size, but got {bundle_indices=} "
|
|
188
|
+
f"and {self.parallel_config.world_size=}")
|
|
189
|
+
assert len(set(bundle_indices)) == len(bundle_indices), \
|
|
190
|
+
("VLLM_RAY_BUNDLE_INDICES cannot have duplicate values,"
|
|
191
|
+
f" but got {bundle_indices=}")
|
|
192
|
+
else:
|
|
193
|
+
# use the first N bundles that have GPU resources.
|
|
194
|
+
bundle_indices = []
|
|
195
|
+
for bundle_id, bundle in enumerate(placement_group.bundle_specs):
|
|
196
|
+
if bundle.get(current_platform.ray_device_key, 0):
|
|
197
|
+
bundle_indices.append(bundle_id)
|
|
198
|
+
bundle_indices = bundle_indices[:self.parallel_config.world_size]
|
|
199
|
+
|
|
200
|
+
worker_metadata: List[RayWorkerMetaData] = []
|
|
201
|
+
driver_ip = get_ip()
|
|
202
|
+
for rank, bundle_id in enumerate(bundle_indices):
|
|
203
|
+
scheduling_strategy = PlacementGroupSchedulingStrategy(
|
|
204
|
+
placement_group=placement_group,
|
|
205
|
+
placement_group_capture_child_tasks=True,
|
|
206
|
+
placement_group_bundle_index=bundle_id,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
if current_platform.ray_device_key == "GPU":
|
|
210
|
+
# NV+AMD GPUs, and Intel XPUs
|
|
211
|
+
worker = ray.remote(
|
|
212
|
+
num_cpus=0,
|
|
213
|
+
num_gpus=num_gpus,
|
|
214
|
+
scheduling_strategy=scheduling_strategy,
|
|
215
|
+
**ray_remote_kwargs,
|
|
216
|
+
)(RayWorkerWrapper).remote(vllm_config=self.vllm_config,
|
|
217
|
+
rpc_rank=rank)
|
|
218
|
+
else:
|
|
219
|
+
worker = ray.remote(
|
|
220
|
+
num_cpus=0,
|
|
221
|
+
num_gpus=0,
|
|
222
|
+
resources={current_platform.ray_device_key: num_gpus},
|
|
223
|
+
scheduling_strategy=scheduling_strategy,
|
|
224
|
+
**ray_remote_kwargs,
|
|
225
|
+
)(RayWorkerWrapper).remote(vllm_config=self.vllm_config,
|
|
226
|
+
rpc_rank=rank)
|
|
227
|
+
worker_metadata.append(
|
|
228
|
+
RayWorkerMetaData(worker=worker, created_rank=rank))
|
|
229
|
+
|
|
230
|
+
worker_ips = ray.get([
|
|
231
|
+
each.worker.get_node_ip.remote() # type: ignore[attr-defined]
|
|
232
|
+
for each in worker_metadata
|
|
233
|
+
])
|
|
234
|
+
|
|
235
|
+
for each, ip in zip(worker_metadata, worker_ips):
|
|
236
|
+
each.ip = ip
|
|
237
|
+
|
|
238
|
+
if not self.use_ray_spmd_worker:
|
|
239
|
+
for i, each in enumerate(worker_metadata):
|
|
240
|
+
# find and remove the dummy worker from the list
|
|
241
|
+
worker = each.worker
|
|
242
|
+
worker_ip = each.ip
|
|
243
|
+
if self.driver_dummy_worker is None and worker_ip == driver_ip:
|
|
244
|
+
# If the worker is on the same node as the driver, we use it
|
|
245
|
+
# as the resource holder for the driver process.
|
|
246
|
+
self.driver_dummy_worker = worker
|
|
247
|
+
self.driver_worker = RayWorkerWrapper(
|
|
248
|
+
vllm_config=self.vllm_config, rpc_rank=0)
|
|
249
|
+
worker_metadata.pop(i)
|
|
250
|
+
break
|
|
251
|
+
|
|
252
|
+
logger.debug("workers: %s", worker_metadata)
|
|
253
|
+
logger.debug("driver_dummy_worker: %s", self.driver_dummy_worker)
|
|
254
|
+
if not self.use_ray_spmd_worker and self.driver_dummy_worker is None:
|
|
255
|
+
raise ValueError(
|
|
256
|
+
"Ray does not allocate any GPUs on the driver node."
|
|
257
|
+
f"Driver IP: {driver_ip}, worker IPs: {worker_ips}."
|
|
258
|
+
"Consider adjusting the Ray placement group or running "
|
|
259
|
+
"the driver on a GPU node.")
|
|
260
|
+
|
|
261
|
+
ip_counts: Dict[str, int] = {}
|
|
262
|
+
for ip in worker_ips:
|
|
263
|
+
ip_counts[ip] = ip_counts.get(ip, 0) + 1
|
|
264
|
+
|
|
265
|
+
def sort_by_driver_then_worker_ip(item: RayWorkerMetaData):
|
|
266
|
+
"""
|
|
267
|
+
Sort the workers based on 3 properties:
|
|
268
|
+
1. If the worker is on the same node as the driver (vllm engine),
|
|
269
|
+
it should be placed first.
|
|
270
|
+
2. Then, if the worker is on a node with fewer workers, it should
|
|
271
|
+
be placed first.
|
|
272
|
+
3. Finally, if the work is on a node with smaller IP address, it
|
|
273
|
+
should be placed first.
|
|
274
|
+
"""
|
|
275
|
+
ip = item.ip
|
|
276
|
+
return (0 if ip == driver_ip else 1, ip_counts[ip], ip)
|
|
277
|
+
|
|
278
|
+
# After sorting, the workers on the same node will be
|
|
279
|
+
# close to each other, and the workers on the driver
|
|
280
|
+
# node will be placed first.
|
|
281
|
+
sorted_worker_metadata = sorted(worker_metadata,
|
|
282
|
+
key=sort_by_driver_then_worker_ip)
|
|
283
|
+
start_rank = 0 if self.use_ray_spmd_worker else 1
|
|
284
|
+
for i, item in enumerate(sorted_worker_metadata):
|
|
285
|
+
item.adjusted_rank = i + start_rank
|
|
286
|
+
self.workers = [item.worker for item in sorted_worker_metadata]
|
|
287
|
+
rerank_mapping = {
|
|
288
|
+
item.created_rank: item.adjusted_rank
|
|
289
|
+
for item in sorted_worker_metadata
|
|
290
|
+
}
|
|
291
|
+
self._run_workers("adjust_rank", rerank_mapping)
|
|
292
|
+
|
|
293
|
+
# Get the set of GPU IDs used on each node.
|
|
294
|
+
worker_node_and_gpu_ids = []
|
|
295
|
+
for worker in [self.driver_dummy_worker] + self.workers:
|
|
296
|
+
if worker is None:
|
|
297
|
+
# driver_dummy_worker can be None when using ray spmd worker.
|
|
298
|
+
continue
|
|
299
|
+
worker_node_and_gpu_ids.append(
|
|
300
|
+
ray.get(worker.get_node_and_gpu_ids.remote()) \
|
|
301
|
+
) # type: ignore
|
|
302
|
+
|
|
303
|
+
node_workers = defaultdict(list) # node id -> list of worker ranks
|
|
304
|
+
node_gpus = defaultdict(list) # node id -> list of gpu ids
|
|
305
|
+
|
|
306
|
+
for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids):
|
|
307
|
+
node_workers[node_id].append(i)
|
|
308
|
+
# `gpu_ids` can be a list of strings or integers.
|
|
309
|
+
# convert them to integers for consistency.
|
|
310
|
+
# NOTE: gpu_ids can be larger than 9 (e.g. 16 GPUs),
|
|
311
|
+
# string sorting is not sufficient.
|
|
312
|
+
# see https://github.com/vllm-project/vllm/issues/5590
|
|
313
|
+
gpu_ids = [int(x) for x in gpu_ids]
|
|
314
|
+
node_gpus[node_id].extend(gpu_ids)
|
|
315
|
+
for node_id, gpu_ids in node_gpus.items():
|
|
316
|
+
node_gpus[node_id] = sorted(gpu_ids)
|
|
317
|
+
|
|
318
|
+
all_ips = set(worker_ips + [driver_ip])
|
|
319
|
+
n_ips = len(all_ips)
|
|
320
|
+
n_nodes = len(node_workers)
|
|
321
|
+
|
|
322
|
+
if n_nodes != n_ips:
|
|
323
|
+
raise RuntimeError(
|
|
324
|
+
f"Every node should have a unique IP address. Got {n_nodes}"
|
|
325
|
+
f" nodes with node ids {list(node_workers.keys())} and "
|
|
326
|
+
f"{n_ips} unique IP addresses {all_ips}. Please check your"
|
|
327
|
+
" network configuration. If you set `VLLM_HOST_IP`"
|
|
328
|
+
" environment variable, make sure it is unique for"
|
|
329
|
+
" each node.")
|
|
330
|
+
|
|
331
|
+
# Set environment variables for the driver and workers.
|
|
332
|
+
all_args_to_update_environment_variables = [{
|
|
333
|
+
current_platform.device_control_env_var:
|
|
334
|
+
",".join(map(str, node_gpus[node_id])),
|
|
335
|
+
} for (node_id, _) in worker_node_and_gpu_ids]
|
|
336
|
+
|
|
337
|
+
# Environment variables to copy from driver to workers
|
|
338
|
+
env_vars_to_copy = [
|
|
339
|
+
v for v in envs.environment_variables
|
|
340
|
+
if v not in self.WORKER_SPECIFIC_ENV_VARS
|
|
341
|
+
and v not in self.non_carry_over_env_vars
|
|
342
|
+
]
|
|
343
|
+
|
|
344
|
+
env_vars_to_copy.extend(current_platform.additional_env_vars)
|
|
345
|
+
|
|
346
|
+
# Copy existing env vars to each worker's args
|
|
347
|
+
for args in all_args_to_update_environment_variables:
|
|
348
|
+
# TODO: refactor platform-specific env vars
|
|
349
|
+
for name in env_vars_to_copy:
|
|
350
|
+
if name in os.environ:
|
|
351
|
+
args[name] = os.environ[name]
|
|
352
|
+
|
|
353
|
+
logger.info("non_carry_over_env_vars from config: %s",
|
|
354
|
+
self.non_carry_over_env_vars)
|
|
355
|
+
logger.info(
|
|
356
|
+
"Copying the following environment variables to workers: %s",
|
|
357
|
+
[v for v in env_vars_to_copy if v in os.environ])
|
|
358
|
+
logger.info(
|
|
359
|
+
"If certain env vars should NOT be copied to workers, add them to "
|
|
360
|
+
"%s file", self.non_carry_over_env_vars_file)
|
|
361
|
+
|
|
362
|
+
self._env_vars_for_all_workers = (
|
|
363
|
+
all_args_to_update_environment_variables)
|
|
364
|
+
|
|
365
|
+
self._run_workers("update_environment_variables",
|
|
366
|
+
self._get_env_vars_to_be_updated())
|
|
367
|
+
|
|
368
|
+
if len(node_gpus) == 1:
|
|
369
|
+
# in single node case, we don't need to get the IP address.
|
|
370
|
+
# the loopback address is sufficient
|
|
371
|
+
# NOTE: a node may have several IP addresses, one for each
|
|
372
|
+
# network interface. `get_ip()` might return any of them,
|
|
373
|
+
# while they might not work for communication inside the node
|
|
374
|
+
# if the network setup is complicated. Using the loopback address
|
|
375
|
+
# solves this issue, as it always works for communication inside
|
|
376
|
+
# the node.
|
|
377
|
+
driver_ip = "127.0.0.1"
|
|
378
|
+
distributed_init_method = get_distributed_init_method(
|
|
379
|
+
driver_ip, get_open_port())
|
|
380
|
+
|
|
381
|
+
# Initialize the actual workers inside worker wrapper.
|
|
382
|
+
all_kwargs = []
|
|
383
|
+
for rank, (node_id, _) in enumerate(worker_node_and_gpu_ids):
|
|
384
|
+
local_rank = node_workers[node_id].index(rank)
|
|
385
|
+
kwargs = dict(
|
|
386
|
+
vllm_config=self.vllm_config,
|
|
387
|
+
local_rank=local_rank,
|
|
388
|
+
rank=rank,
|
|
389
|
+
distributed_init_method=distributed_init_method,
|
|
390
|
+
is_driver_worker=(not self.parallel_config)
|
|
391
|
+
or (rank % self.parallel_config.tensor_parallel_size == 0),
|
|
392
|
+
)
|
|
393
|
+
all_kwargs.append(kwargs)
|
|
394
|
+
self._run_workers("init_worker", all_kwargs)
|
|
395
|
+
|
|
396
|
+
self._run_workers("init_device")
|
|
397
|
+
self._run_workers("load_model",
|
|
398
|
+
max_concurrent_workers=self.parallel_config.
|
|
399
|
+
max_parallel_loading_workers)
|
|
400
|
+
|
|
401
|
+
if self.use_ray_spmd_worker:
|
|
402
|
+
for pp_rank in range(self.parallel_config.pipeline_parallel_size):
|
|
403
|
+
self.pp_tp_workers.append([])
|
|
404
|
+
for tp_rank in range(
|
|
405
|
+
self.parallel_config.tensor_parallel_size):
|
|
406
|
+
# PP=2, TP=4
|
|
407
|
+
# pp_tp_workers = [[0, 1, 2, 3], [4, 5, 6, 7]]
|
|
408
|
+
rank = (pp_rank * self.parallel_config.tensor_parallel_size
|
|
409
|
+
) + tp_rank
|
|
410
|
+
assert len(self.pp_tp_workers[pp_rank]) == tp_rank
|
|
411
|
+
assert pp_rank < len(self.pp_tp_workers)
|
|
412
|
+
self.pp_tp_workers[pp_rank].append(self.workers[rank])
|
|
413
|
+
|
|
414
|
+
# This is the list of workers that are rank 0 of each TP group EXCEPT
|
|
415
|
+
# global rank 0. These are the workers that will broadcast to the
|
|
416
|
+
# rest of the workers.
|
|
417
|
+
self.tp_driver_workers: List[RayWorkerWrapper] = []
|
|
418
|
+
# This is the list of workers that are not drivers and not the first
|
|
419
|
+
# worker in a TP group. These are the workers that will be
|
|
420
|
+
# broadcasted to.
|
|
421
|
+
self.non_driver_workers: List[RayWorkerWrapper] = []
|
|
422
|
+
|
|
423
|
+
# Enforce rank order for correct rank to return final output.
|
|
424
|
+
for index, worker in enumerate(self.workers):
|
|
425
|
+
# The driver worker is rank 0 and not in self.workers.
|
|
426
|
+
rank = index + 1
|
|
427
|
+
if rank % self.parallel_config.tensor_parallel_size == 0:
|
|
428
|
+
self.tp_driver_workers.append(worker)
|
|
429
|
+
else:
|
|
430
|
+
self.non_driver_workers.append(worker)
|
|
431
|
+
|
|
432
|
+
def _driver_execute_model(
|
|
433
|
+
self, execute_model_req: Optional[ExecuteModelRequest]
|
|
434
|
+
) -> Optional[List[SamplerOutput]]:
|
|
435
|
+
"""Run execute_model in the driver worker.
|
|
436
|
+
|
|
437
|
+
Passing None will cause the driver to stop the model execution
|
|
438
|
+
loop running in each of the remote workers.
|
|
439
|
+
"""
|
|
440
|
+
assert not self.use_ray_spmd_worker, (
|
|
441
|
+
"driver_worker does not exist for VLLM_USE_RAY_SPMD_WORKER=1")
|
|
442
|
+
return self.driver_worker.execute_method("execute_model",
|
|
443
|
+
execute_model_req)
|
|
444
|
+
|
|
445
|
+
def execute_model(
|
|
446
|
+
self,
|
|
447
|
+
execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
|
|
448
|
+
if not self.use_ray_spmd_worker:
|
|
449
|
+
return super().execute_model(execute_model_req)
|
|
450
|
+
|
|
451
|
+
if self.forward_dag is None:
|
|
452
|
+
self.forward_dag = self._compiled_ray_dag(enable_asyncio=False)
|
|
453
|
+
|
|
454
|
+
if self.use_v1:
|
|
455
|
+
serialized_data = execute_model_req
|
|
456
|
+
else:
|
|
457
|
+
serialized_data = self.input_encoder.encode(execute_model_req)
|
|
458
|
+
outputs = ray.get(self.forward_dag.execute(serialized_data))
|
|
459
|
+
if self.use_v1:
|
|
460
|
+
output = outputs[0]
|
|
461
|
+
else:
|
|
462
|
+
output = self.output_decoder.decode(outputs[0])
|
|
463
|
+
return output
|
|
464
|
+
|
|
465
|
+
def _run_workers(
|
|
466
|
+
self,
|
|
467
|
+
method: Union[str, Callable],
|
|
468
|
+
*args,
|
|
469
|
+
async_run_tensor_parallel_workers_only: bool = False,
|
|
470
|
+
max_concurrent_workers: Optional[int] = None,
|
|
471
|
+
**kwargs,
|
|
472
|
+
) -> Any:
|
|
473
|
+
"""Runs the given method on all workers. Can be used in the following
|
|
474
|
+
ways:
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
- async_run_tensor_parallel_workers_only: If True the method will be
|
|
478
|
+
run only in the remote TP workers, not the driver worker.
|
|
479
|
+
It will also be run asynchronously and return a list of futures
|
|
480
|
+
rather than blocking on the results.
|
|
481
|
+
- args/kwargs: All workers share the same args/kwargs
|
|
482
|
+
"""
|
|
483
|
+
if isinstance(method, str):
|
|
484
|
+
sent_method = method
|
|
485
|
+
else:
|
|
486
|
+
sent_method = cloudpickle.dumps(method)
|
|
487
|
+
del method
|
|
488
|
+
if self.use_ray_spmd_worker:
|
|
489
|
+
assert not async_run_tensor_parallel_workers_only, (
|
|
490
|
+
"async_run_tensor_parallel_workers_only is not supported for "
|
|
491
|
+
"spmd mode.")
|
|
492
|
+
|
|
493
|
+
if max_concurrent_workers:
|
|
494
|
+
raise NotImplementedError(
|
|
495
|
+
"max_concurrent_workers is not supported yet.")
|
|
496
|
+
|
|
497
|
+
# Start the ray workers first.
|
|
498
|
+
ray_workers = self.workers
|
|
499
|
+
if async_run_tensor_parallel_workers_only:
|
|
500
|
+
ray_workers = self.non_driver_workers
|
|
501
|
+
ray_worker_outputs = [
|
|
502
|
+
worker.execute_method.remote(sent_method, *args, **kwargs)
|
|
503
|
+
for worker in ray_workers
|
|
504
|
+
]
|
|
505
|
+
|
|
506
|
+
if async_run_tensor_parallel_workers_only:
|
|
507
|
+
# Just return futures
|
|
508
|
+
return ray_worker_outputs
|
|
509
|
+
|
|
510
|
+
driver_worker_output = []
|
|
511
|
+
# In SPMD mode, the driver worker is the same as any other worker,
|
|
512
|
+
# so we only explicitly execute on the driver worker if using a
|
|
513
|
+
# non-SPMD worker class.
|
|
514
|
+
if not self.use_ray_spmd_worker:
|
|
515
|
+
# Start the driver worker after all the ray workers.
|
|
516
|
+
driver_worker_output = [
|
|
517
|
+
self.driver_worker.execute_method(sent_method, *args, **kwargs)
|
|
518
|
+
]
|
|
519
|
+
|
|
520
|
+
# Get the results of the ray workers.
|
|
521
|
+
if self.workers:
|
|
522
|
+
ray_worker_outputs = ray.get(ray_worker_outputs)
|
|
523
|
+
|
|
524
|
+
return driver_worker_output + ray_worker_outputs
|
|
525
|
+
|
|
526
|
+
def _wait_for_tasks_completion(self, parallel_worker_tasks: Any) -> None:
|
|
527
|
+
"""Wait for futures returned from _run_workers() with
|
|
528
|
+
async_run_remote_workers_only to complete."""
|
|
529
|
+
ray.get(parallel_worker_tasks)
|
|
530
|
+
|
|
531
|
+
def _check_ray_cgraph_installation(self):
|
|
532
|
+
import importlib.metadata
|
|
533
|
+
|
|
534
|
+
from packaging import version
|
|
535
|
+
|
|
536
|
+
required_version = version.parse("2.43.0")
|
|
537
|
+
current_version = version.parse(importlib.metadata.version("ray"))
|
|
538
|
+
if current_version < required_version:
|
|
539
|
+
raise ValueError(f"Ray version {required_version} is "
|
|
540
|
+
f"required, but found {current_version}")
|
|
541
|
+
|
|
542
|
+
import importlib.util
|
|
543
|
+
cgraph_spec = importlib.util.find_spec(
|
|
544
|
+
"ray.experimental.compiled_dag_ref")
|
|
545
|
+
if cgraph_spec is None:
|
|
546
|
+
raise ValueError("Ray Compiled Graph is not installed. "
|
|
547
|
+
"Run `pip install ray[cgraph]` to install it.")
|
|
548
|
+
|
|
549
|
+
cupy_spec = importlib.util.find_spec("cupy")
|
|
550
|
+
if (cupy_spec is None
|
|
551
|
+
and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE == "nccl"):
|
|
552
|
+
raise ValueError(
|
|
553
|
+
"cupy is not installed but required since "
|
|
554
|
+
"VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE is set to 'nccl'. "
|
|
555
|
+
"Run `pip install ray[cgraph]` and check cupy installation.")
|
|
556
|
+
|
|
557
|
+
def _compiled_ray_dag(self, enable_asyncio: bool):
|
|
558
|
+
assert self.parallel_config.use_ray
|
|
559
|
+
self._check_ray_cgraph_installation()
|
|
560
|
+
from ray.dag import InputNode, MultiOutputNode
|
|
561
|
+
|
|
562
|
+
logger.info("VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE = %s",
|
|
563
|
+
envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE)
|
|
564
|
+
logger.info("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s",
|
|
565
|
+
envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM)
|
|
566
|
+
|
|
567
|
+
channel_type = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
|
|
568
|
+
if channel_type not in ("auto", "nccl", "shm"):
|
|
569
|
+
raise ValueError(
|
|
570
|
+
"Invalid value for VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: "
|
|
571
|
+
f"{channel_type}. Valid values are: 'auto', 'nccl', or 'shm'.")
|
|
572
|
+
|
|
573
|
+
# Enlarge the default value of "RAY_CGRAPH_get_timeout" to 300 seconds
|
|
574
|
+
# (it is 10 seconds by default). This is a Ray environment variable to
|
|
575
|
+
# control the timeout of getting result from a compiled graph execution,
|
|
576
|
+
# i.e., the distributed execution that includes model forward runs and
|
|
577
|
+
# intermediate tensor communications, in the case of vllm.
|
|
578
|
+
os.environ.setdefault("RAY_CGRAPH_get_timeout", "300") # noqa: SIM112
|
|
579
|
+
logger.info("RAY_CGRAPH_get_timeout is set to %s",
|
|
580
|
+
os.environ["RAY_CGRAPH_get_timeout"]) # noqa: SIM112
|
|
581
|
+
|
|
582
|
+
with InputNode() as input_data:
|
|
583
|
+
# Example DAG: PP=2, TP=4
|
|
584
|
+
#
|
|
585
|
+
# For V0:
|
|
586
|
+
# ExecuteModelRequest -> 0 -> (ExecuteModelReq, IntermediateTensors) -> 4 -> SamplerOutput # noqa: E501
|
|
587
|
+
# ExecuteModelRequest -> 1 -> (ExecuteModelReq, IntermediateTensors) -> 5 -> SamplerOutput # noqa: E501
|
|
588
|
+
# ExecuteModelRequest -> 2 -> (ExecuteModelReq, IntermediateTensors) -> 6 -> SamplerOutput # noqa: E501
|
|
589
|
+
# ExecuteModelRequest -> 3 -> (ExecuteModelReq, IntermediateTensors) -> 7 -> SamplerOutput # noqa: E501
|
|
590
|
+
#
|
|
591
|
+
# For V1:
|
|
592
|
+
# SchedulerOutput -> 0 -> (SchedulerOutput, IntermediateTensors) -> 4 -> ModelRunnerOutput # noqa: E501
|
|
593
|
+
# SchedulerOutput -> 1 -> (SchedulerOutput, IntermediateTensors) -> 5 -> ModelRunnerOutput # noqa: E501
|
|
594
|
+
# SchedulerOutput -> 2 -> (SchedulerOutput, IntermediateTensors) -> 6 -> ModelRunnerOutput # noqa: E501
|
|
595
|
+
# SchedulerOutput -> 3 -> (SchedulerOutput, IntermediateTensors) -> 7 -> ModelRunnerOutput # noqa: E501
|
|
596
|
+
|
|
597
|
+
# All workers in the first TP group will take in the
|
|
598
|
+
# ExecuteModelRequest as input.
|
|
599
|
+
outputs = [input_data for _ in self.pp_tp_workers[0]]
|
|
600
|
+
for pp_rank, tp_group in enumerate(self.pp_tp_workers):
|
|
601
|
+
# Each PP worker takes in the output of the previous PP worker,
|
|
602
|
+
# and the TP group executes in SPMD fashion.
|
|
603
|
+
if self.use_v1:
|
|
604
|
+
outputs = [
|
|
605
|
+
worker.execute_model_ray.
|
|
606
|
+
bind( # type: ignore[attr-defined]
|
|
607
|
+
outputs[i]) for i, worker in enumerate(tp_group)
|
|
608
|
+
]
|
|
609
|
+
else:
|
|
610
|
+
outputs = [
|
|
611
|
+
worker.execute_model_spmd.
|
|
612
|
+
bind( # type: ignore[attr-defined]
|
|
613
|
+
outputs[i]) for i, worker in enumerate(tp_group)
|
|
614
|
+
]
|
|
615
|
+
|
|
616
|
+
last_pp_rank = len(self.pp_tp_workers) - 1
|
|
617
|
+
if (pp_rank < last_pp_rank and
|
|
618
|
+
envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE != "shm"):
|
|
619
|
+
# Specify how intermediate tensors should be passed
|
|
620
|
+
# between pp stages, no need to specify for the last
|
|
621
|
+
# pp stage or when using shared memory (the default).
|
|
622
|
+
transport = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
|
|
623
|
+
outputs = [
|
|
624
|
+
output.with_tensor_transport(transport=transport)
|
|
625
|
+
for output in outputs
|
|
626
|
+
]
|
|
627
|
+
|
|
628
|
+
forward_dag = MultiOutputNode(outputs)
|
|
629
|
+
|
|
630
|
+
return forward_dag.experimental_compile(
|
|
631
|
+
enable_asyncio=enable_asyncio,
|
|
632
|
+
_overlap_gpu_communication=envs.
|
|
633
|
+
VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM)
|
|
634
|
+
|
|
635
|
+
def __del__(self):
|
|
636
|
+
self.shutdown()
|
|
637
|
+
|
|
638
|
+
async def execute_model_async(
|
|
639
|
+
self,
|
|
640
|
+
execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
|
|
641
|
+
if not self.use_ray_spmd_worker:
|
|
642
|
+
return await super().execute_model_async(execute_model_req)
|
|
643
|
+
|
|
644
|
+
if self.forward_dag is None:
|
|
645
|
+
self.forward_dag = self._compiled_ray_dag(enable_asyncio=True)
|
|
646
|
+
|
|
647
|
+
serialized_data = self.input_encoder.encode(execute_model_req)
|
|
648
|
+
dag_future = await self.forward_dag.execute_async(serialized_data)
|
|
649
|
+
output = await dag_future[0]
|
|
650
|
+
return self.output_decoder.decode(output)
|
|
651
|
+
|
|
652
|
+
async def _driver_execute_model_async(
|
|
653
|
+
self,
|
|
654
|
+
execute_model_req: Optional[ExecuteModelRequest] = None
|
|
655
|
+
) -> List[SamplerOutput]:
|
|
656
|
+
assert not self.use_ray_spmd_worker, (
|
|
657
|
+
"driver_worker does not exist for VLLM_USE_RAY_SPMD_WORKER=1")
|
|
658
|
+
if not self.tp_driver_workers:
|
|
659
|
+
return await self.driver_exec_method("execute_model",
|
|
660
|
+
execute_model_req)
|
|
661
|
+
if self.pp_locks is None:
|
|
662
|
+
# This locks each pipeline parallel stage so multiple virtual
|
|
663
|
+
# engines can't execute on the same stage at the same time
|
|
664
|
+
# We create the locks here to avoid creating them in the constructor
|
|
665
|
+
# which uses a different asyncio loop.
|
|
666
|
+
self.pp_locks = [
|
|
667
|
+
asyncio.Lock()
|
|
668
|
+
for _ in range(self.parallel_config.pipeline_parallel_size)
|
|
669
|
+
]
|
|
670
|
+
|
|
671
|
+
tasks = [
|
|
672
|
+
asyncio.create_task(
|
|
673
|
+
_run_task_with_lock(self.driver_exec_method, self.pp_locks[0],
|
|
674
|
+
"execute_model", execute_model_req))
|
|
675
|
+
]
|
|
676
|
+
for pp_rank, driver_worker in enumerate(self.tp_driver_workers,
|
|
677
|
+
start=1):
|
|
678
|
+
tasks.append(
|
|
679
|
+
asyncio.create_task(
|
|
680
|
+
_run_task_with_lock(driver_worker.execute_method.remote,
|
|
681
|
+
self.pp_locks[pp_rank],
|
|
682
|
+
"execute_model", execute_model_req)))
|
|
683
|
+
|
|
684
|
+
results = await asyncio.gather(*tasks)
|
|
685
|
+
|
|
686
|
+
# Only the last PP stage has the final results.
|
|
687
|
+
return results[-1]
|
|
688
|
+
|
|
689
|
+
async def _start_worker_execution_loop(self):
|
|
690
|
+
assert not self.use_ray_spmd_worker, (
|
|
691
|
+
"worker loop is disabled for VLLM_USE_RAY_SPMD_WORKER=1")
|
|
692
|
+
coros = [
|
|
693
|
+
worker.execute_method.remote("start_worker_execution_loop")
|
|
694
|
+
for worker in self.non_driver_workers
|
|
695
|
+
]
|
|
696
|
+
return await asyncio.gather(*coros)
|
|
697
|
+
|
|
698
|
+
def check_health(self) -> None:
|
|
699
|
+
# Assume that the Ray workers are healthy.
|
|
700
|
+
# TODO: check the health of the Ray workers
|
|
701
|
+
return
|