vllm-cpu 0.9.2.post2__cp311-cp311-manylinux_2_17_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +214 -0
- vllm/_custom_ops.py +1915 -0
- vllm/_ipex_ops.py +350 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +34 -0
- vllm/assets/video.py +139 -0
- vllm/attention/__init__.py +20 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +325 -0
- vllm/attention/backends/blocksparse_attn.py +465 -0
- vllm/attention/backends/cpu_mla.py +307 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1506 -0
- vllm/attention/backends/flash_attn.py +1008 -0
- vllm/attention/backends/flashinfer.py +1107 -0
- vllm/attention/backends/flashmla.py +244 -0
- vllm/attention/backends/hpu_attn.py +318 -0
- vllm/attention/backends/ipex_attn.py +403 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1391 -0
- vllm/attention/backends/pallas.py +356 -0
- vllm/attention/backends/placeholder_attn.py +400 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +1015 -0
- vllm/attention/backends/torch_sdpa.py +707 -0
- vllm/attention/backends/triton_mla.py +115 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +807 -0
- vllm/attention/layer.py +481 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
- vllm/attention/ops/blocksparse_attention/interface.py +239 -0
- vllm/attention/ops/blocksparse_attention/utils.py +246 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
- vllm/attention/ops/flashmla.py +116 -0
- vllm/attention/ops/hpu_paged_attn.py +88 -0
- vllm/attention/ops/ipex_attn.py +195 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/nki_flash_attn.py +903 -0
- vllm/attention/ops/paged_attn.py +256 -0
- vllm/attention/ops/pallas_kv_cache_update.py +120 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +100 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +674 -0
- vllm/attention/ops/triton_flash_attention.py +984 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +738 -0
- vllm/attention/selector.py +214 -0
- vllm/attention/utils/fa_utils.py +72 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +1441 -0
- vllm/benchmarks/endpoint_request_func.py +393 -0
- vllm/benchmarks/latency.py +168 -0
- vllm/benchmarks/serve.py +1063 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +70 -0
- vllm/collect_env.py +820 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +89 -0
- vllm/compilation/backends.py +610 -0
- vllm/compilation/base_piecewise_backend.py +72 -0
- vllm/compilation/collective_fusion.py +127 -0
- vllm/compilation/compiler_interface.py +564 -0
- vllm/compilation/counter.py +41 -0
- vllm/compilation/cuda_piecewise_backend.py +218 -0
- vllm/compilation/decorators.py +250 -0
- vllm/compilation/fix_functionalization.py +191 -0
- vllm/compilation/fusion.py +645 -0
- vllm/compilation/fusion_attn.py +166 -0
- vllm/compilation/fx_utils.py +84 -0
- vllm/compilation/inductor_pass.py +115 -0
- vllm/compilation/monitor.py +39 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +165 -0
- vllm/compilation/pass_manager.py +82 -0
- vllm/compilation/sequence_parallelism.py +482 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +70 -0
- vllm/compilation/wrapper.py +135 -0
- vllm/config.py +4913 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +441 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +525 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +139 -0
- vllm/core/placeholder_block_space_manager.py +103 -0
- vllm/core/scheduler.py +2126 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +281 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +264 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +145 -0
- vllm/distributed/device_communicators/cuda_communicator.py +194 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
- vllm/distributed/device_communicators/hpu_communicator.py +46 -0
- vllm/distributed/device_communicators/neuron_communicator.py +20 -0
- vllm/distributed/device_communicators/pynccl.py +218 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +349 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/tpu_communicator.py +103 -0
- vllm/distributed/device_communicators/xpu_communicator.py +55 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/eplb_state.py +432 -0
- vllm/distributed/eplb/rebalance_algo.py +234 -0
- vllm/distributed/eplb/rebalance_execute.py +307 -0
- vllm/distributed/kv_events.py +356 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +12 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +133 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +109 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1103 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +485 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +533 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +265 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +389 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
- vllm/distributed/parallel_state.py +1385 -0
- vllm/distributed/tpu_distributed_utils.py +178 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1801 -0
- vllm/engine/async_llm_engine.py +1200 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +2101 -0
- vllm/engine/metrics.py +629 -0
- vllm/engine/metrics_types.py +94 -0
- vllm/engine/multiprocessing/__init__.py +148 -0
- vllm/engine/multiprocessing/client.py +681 -0
- vllm/engine/multiprocessing/engine.py +460 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +75 -0
- vllm/engine/output_processor/multi_step.py +216 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +326 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1278 -0
- vllm/entrypoints/cli/__init__.py +12 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +58 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +36 -0
- vllm/entrypoints/cli/main.py +71 -0
- vllm/entrypoints/cli/openai.py +201 -0
- vllm/entrypoints/cli/run_batch.py +69 -0
- vllm/entrypoints/cli/serve.py +265 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/launcher.py +147 -0
- vllm/entrypoints/llm.py +1599 -0
- vllm/entrypoints/logger.py +50 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1495 -0
- vllm/entrypoints/openai/cli_args.py +331 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +2096 -0
- vllm/entrypoints/openai/run_batch.py +473 -0
- vllm/entrypoints/openai/serving_chat.py +1258 -0
- vllm/entrypoints/openai/serving_classification.py +160 -0
- vllm/entrypoints/openai/serving_completion.py +618 -0
- vllm/entrypoints/openai/serving_embedding.py +201 -0
- vllm/entrypoints/openai/serving_engine.py +988 -0
- vllm/entrypoints/openai/serving_models.py +315 -0
- vllm/entrypoints/openai/serving_pooling.py +234 -0
- vllm/entrypoints/openai/serving_score.py +431 -0
- vllm/entrypoints/openai/serving_tokenization.py +157 -0
- vllm/entrypoints/openai/serving_transcription.py +132 -0
- vllm/entrypoints/openai/speech_to_text.py +395 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +25 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +466 -0
- vllm/entrypoints/score_utils.py +50 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/utils.py +262 -0
- vllm/env_override.py +41 -0
- vllm/envs.py +1029 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +401 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +30 -0
- vllm/executor/multiproc_worker_utils.py +313 -0
- vllm/executor/ray_distributed_executor.py +701 -0
- vllm/executor/ray_utils.py +399 -0
- vllm/executor/uniproc_executor.py +139 -0
- vllm/forward_context.py +185 -0
- vllm/inputs/__init__.py +41 -0
- vllm/inputs/data.py +331 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +924 -0
- vllm/inputs/registry.py +245 -0
- vllm/jsontree.py +80 -0
- vllm/logger.py +212 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +81 -0
- vllm/logging_utils/formatter.py +18 -0
- vllm/logits_process.py +119 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +355 -0
- vllm/lora/layers.py +1285 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +818 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
- vllm/lora/ops/triton_ops/utils.py +120 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +136 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +485 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +290 -0
- vllm/lora/punica_wrapper/punica_hpu.py +145 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +405 -0
- vllm/lora/punica_wrapper/utils.py +164 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +240 -0
- vllm/lora/worker_manager.py +256 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +208 -0
- vllm/model_executor/guided_decoding/__init__.py +181 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
- vllm/model_executor/guided_decoding/guided_fields.py +41 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
- vllm/model_executor/guided_decoding/utils.py +242 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +420 -0
- vllm/model_executor/layers/fused_moe/__init__.py +78 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +298 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +140 -0
- vllm/model_executor/layers/fused_moe/config.py +456 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +215 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +645 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +250 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +231 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +183 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1021 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +234 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1734 -0
- vllm/model_executor/layers/fused_moe/layer.py +1528 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +598 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +224 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +233 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +66 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +429 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +136 -0
- vllm/model_executor/layers/fused_moe/utils.py +144 -0
- vllm/model_executor/layers/layernorm.py +287 -0
- vllm/model_executor/layers/lightning_attn.py +652 -0
- vllm/model_executor/layers/linear.py +1547 -0
- vllm/model_executor/layers/logits_processor.py +197 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +731 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
- vllm/model_executor/layers/pooler.py +473 -0
- vllm/model_executor/layers/quantization/__init__.py +160 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/auto_round.py +310 -0
- vllm/model_executor/layers/quantization/awq.py +228 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +523 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +164 -0
- vllm/model_executor/layers/quantization/bitblas.py +462 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +694 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1613 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +149 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepgemm.py +83 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
- vllm/model_executor/layers/quantization/experts_int8.py +204 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +950 -0
- vllm/model_executor/layers/quantization/gguf.py +577 -0
- vllm/model_executor/layers/quantization/gptq.py +278 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +446 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +679 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +132 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/marlin.py +263 -0
- vllm/model_executor/layers/quantization/modelopt.py +747 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +457 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
- vllm/model_executor/layers/quantization/qqq.py +275 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +437 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +245 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +157 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/rtn.py +289 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +212 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +653 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +146 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
- vllm/model_executor/layers/rejection_sampler.py +406 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding.py +2025 -0
- vllm/model_executor/layers/sampler.py +1204 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
- vllm/model_executor/layers/utils.py +116 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +77 -0
- vllm/model_executor/model_loader/base_loader.py +43 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +613 -0
- vllm/model_executor/model_loader/default_loader.py +282 -0
- vllm/model_executor/model_loader/dummy_loader.py +27 -0
- vllm/model_executor/model_loader/gguf_loader.py +120 -0
- vllm/model_executor/model_loader/neuron.py +476 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
- vllm/model_executor/model_loader/tensorizer.py +602 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +127 -0
- vllm/model_executor/model_loader/tpu.py +113 -0
- vllm/model_executor/model_loader/utils.py +315 -0
- vllm/model_executor/model_loader/weight_utils.py +782 -0
- vllm/model_executor/models/__init__.py +30 -0
- vllm/model_executor/models/adapters.py +375 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +670 -0
- vllm/model_executor/models/aya_vision.py +486 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bamba.py +558 -0
- vllm/model_executor/models/bart.py +938 -0
- vllm/model_executor/models/bert.py +513 -0
- vllm/model_executor/models/bert_with_rope.py +617 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +728 -0
- vllm/model_executor/models/bloom.py +373 -0
- vllm/model_executor/models/chameleon.py +1146 -0
- vllm/model_executor/models/chatglm.py +478 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/commandr.py +471 -0
- vllm/model_executor/models/config.py +200 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +472 -0
- vllm/model_executor/models/deepseek.py +486 -0
- vllm/model_executor/models/deepseek_mtp.py +281 -0
- vllm/model_executor/models/deepseek_v2.py +935 -0
- vllm/model_executor/models/deepseek_vl2.py +660 -0
- vllm/model_executor/models/dots1.py +536 -0
- vllm/model_executor/models/eagle.py +261 -0
- vllm/model_executor/models/ernie45.py +43 -0
- vllm/model_executor/models/ernie45_moe.py +583 -0
- vllm/model_executor/models/exaone.py +551 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +510 -0
- vllm/model_executor/models/falcon_h1.py +708 -0
- vllm/model_executor/models/florence2.py +1113 -0
- vllm/model_executor/models/fuyu.py +406 -0
- vllm/model_executor/models/gemma.py +427 -0
- vllm/model_executor/models/gemma2.py +427 -0
- vllm/model_executor/models/gemma3.py +535 -0
- vllm/model_executor/models/gemma3_mm.py +729 -0
- vllm/model_executor/models/gemma3n.py +811 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4_1v.py +1590 -0
- vllm/model_executor/models/glm4v.py +657 -0
- vllm/model_executor/models/gpt2.py +382 -0
- vllm/model_executor/models/gpt_bigcode.py +335 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +332 -0
- vllm/model_executor/models/granite.py +493 -0
- vllm/model_executor/models/granite_speech.py +790 -0
- vllm/model_executor/models/granitemoe.py +437 -0
- vllm/model_executor/models/granitemoehybrid.py +653 -0
- vllm/model_executor/models/granitemoeshared.py +341 -0
- vllm/model_executor/models/gritlm.py +224 -0
- vllm/model_executor/models/grok1.py +546 -0
- vllm/model_executor/models/h2ovl.py +549 -0
- vllm/model_executor/models/hunyuan_v1_moe.py +897 -0
- vllm/model_executor/models/idefics2_vision_model.py +389 -0
- vllm/model_executor/models/idefics3.py +786 -0
- vllm/model_executor/models/interfaces.py +681 -0
- vllm/model_executor/models/interfaces_base.py +164 -0
- vllm/model_executor/models/intern_vit.py +480 -0
- vllm/model_executor/models/internlm2.py +455 -0
- vllm/model_executor/models/internlm2_ve.py +147 -0
- vllm/model_executor/models/internvl.py +1432 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +592 -0
- vllm/model_executor/models/keye.py +1736 -0
- vllm/model_executor/models/kimi_vl.py +585 -0
- vllm/model_executor/models/llama.py +644 -0
- vllm/model_executor/models/llama4.py +531 -0
- vllm/model_executor/models/llama_eagle.py +165 -0
- vllm/model_executor/models/llama_eagle3.py +263 -0
- vllm/model_executor/models/llava.py +887 -0
- vllm/model_executor/models/llava_next.py +604 -0
- vllm/model_executor/models/llava_next_video.py +492 -0
- vllm/model_executor/models/llava_onevision.py +985 -0
- vllm/model_executor/models/mamba.py +273 -0
- vllm/model_executor/models/mamba2.py +320 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/mimo.py +192 -0
- vllm/model_executor/models/mimo_mtp.py +285 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +772 -0
- vllm/model_executor/models/minicpmv.py +1307 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1301 -0
- vllm/model_executor/models/minimax_vl_01.py +374 -0
- vllm/model_executor/models/mistral3.py +624 -0
- vllm/model_executor/models/mixtral.py +488 -0
- vllm/model_executor/models/mixtral_quant.py +453 -0
- vllm/model_executor/models/mllama.py +1682 -0
- vllm/model_executor/models/mllama4.py +947 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +339 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1576 -0
- vllm/model_executor/models/moonvit.py +630 -0
- vllm/model_executor/models/mpt.py +331 -0
- vllm/model_executor/models/nemotron.py +508 -0
- vllm/model_executor/models/nemotron_h.py +588 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +389 -0
- vllm/model_executor/models/olmo2.py +414 -0
- vllm/model_executor/models/olmoe.py +468 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +349 -0
- vllm/model_executor/models/ovis.py +577 -0
- vllm/model_executor/models/paligemma.py +419 -0
- vllm/model_executor/models/persimmon.py +344 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3_small.py +465 -0
- vllm/model_executor/models/phi3v.py +733 -0
- vllm/model_executor/models/phi4mm.py +1258 -0
- vllm/model_executor/models/phi4mm_audio.py +1233 -0
- vllm/model_executor/models/phi4mm_utils.py +1884 -0
- vllm/model_executor/models/phimoe.py +674 -0
- vllm/model_executor/models/pixtral.py +1329 -0
- vllm/model_executor/models/plamo2.py +738 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +240 -0
- vllm/model_executor/models/qwen.py +362 -0
- vllm/model_executor/models/qwen2.py +501 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +923 -0
- vllm/model_executor/models/qwen2_5_vl.py +1175 -0
- vllm/model_executor/models/qwen2_audio.py +420 -0
- vllm/model_executor/models/qwen2_moe.py +540 -0
- vllm/model_executor/models/qwen2_rm.py +122 -0
- vllm/model_executor/models/qwen2_vl.py +1513 -0
- vllm/model_executor/models/qwen3.py +325 -0
- vllm/model_executor/models/qwen3_moe.py +541 -0
- vllm/model_executor/models/qwen_vl.py +796 -0
- vllm/model_executor/models/registry.py +634 -0
- vllm/model_executor/models/roberta.py +271 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/skyworkr1v.py +961 -0
- vllm/model_executor/models/smolvlm.py +52 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +343 -0
- vllm/model_executor/models/starcoder2.py +356 -0
- vllm/model_executor/models/tarsier.py +652 -0
- vllm/model_executor/models/telechat2.py +140 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/transformers.py +509 -0
- vllm/model_executor/models/ultravox.py +670 -0
- vllm/model_executor/models/utils.py +744 -0
- vllm/model_executor/models/vision.py +147 -0
- vllm/model_executor/models/whisper.py +886 -0
- vllm/model_executor/models/zamba2.py +1036 -0
- vllm/model_executor/parameter.py +459 -0
- vllm/model_executor/pooling_metadata.py +72 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +80 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +116 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/hasher.py +91 -0
- vllm/multimodal/image.py +103 -0
- vllm/multimodal/inputs.py +878 -0
- vllm/multimodal/parse.py +499 -0
- vllm/multimodal/processing.py +1948 -0
- vllm/multimodal/profiling.py +283 -0
- vllm/multimodal/registry.py +331 -0
- vllm/multimodal/utils.py +492 -0
- vllm/multimodal/video.py +227 -0
- vllm/outputs.py +516 -0
- vllm/platforms/__init__.py +291 -0
- vllm/platforms/cpu.py +281 -0
- vllm/platforms/cuda.py +568 -0
- vllm/platforms/hpu.py +106 -0
- vllm/platforms/interface.py +551 -0
- vllm/platforms/neuron.py +150 -0
- vllm/platforms/rocm.py +453 -0
- vllm/platforms/tpu.py +206 -0
- vllm/platforms/xpu.py +192 -0
- vllm/plugins/__init__.py +94 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +64 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +83 -0
- vllm/prompt_adapter/models.py +358 -0
- vllm/prompt_adapter/request.py +37 -0
- vllm/prompt_adapter/utils.py +98 -0
- vllm/prompt_adapter/worker_manager.py +179 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +15 -0
- vllm/reasoning/abs_reasoning_parsers.py +192 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/sampling_params.py +602 -0
- vllm/scalar_type.py +347 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1568 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +506 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +99 -0
- vllm/spec_decode/medusa_worker.py +138 -0
- vllm/spec_decode/metrics.py +213 -0
- vllm/spec_decode/mlp_speculator_worker.py +94 -0
- vllm/spec_decode/mqa_scorer.py +160 -0
- vllm/spec_decode/multi_step_worker.py +423 -0
- vllm/spec_decode/ngram_worker.py +196 -0
- vllm/spec_decode/proposer_worker_base.py +59 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
- vllm/spec_decode/spec_decode_worker.py +1326 -0
- vllm/spec_decode/target_model_runner.py +45 -0
- vllm/spec_decode/top1_proposer.py +275 -0
- vllm/spec_decode/util.py +277 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +131 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +60 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +922 -0
- vllm/transformers_utils/configs/__init__.py +57 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/cohere2.py +195 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +85 -0
- vllm/transformers_utils/configs/exaone.py +190 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/minimax_text_01.py +70 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
- vllm/transformers_utils/configs/mllama.py +31 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/mpt.py +180 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +259 -0
- vllm/transformers_utils/configs/nvlm_d.py +31 -0
- vllm/transformers_utils/configs/ovis.py +184 -0
- vllm/transformers_utils/configs/skyworkr1v.py +54 -0
- vllm/transformers_utils/configs/solar.py +247 -0
- vllm/transformers_utils/configs/telechat2.py +64 -0
- vllm/transformers_utils/configs/ultravox.py +108 -0
- vllm/transformers_utils/detokenizer.py +168 -0
- vllm/transformers_utils/detokenizer_utils.py +189 -0
- vllm/transformers_utils/processor.py +221 -0
- vllm/transformers_utils/processors/__init__.py +8 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/s3_utils.py +162 -0
- vllm/transformers_utils/tokenizer.py +302 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +120 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +493 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +14 -0
- vllm/triton_utils/importing.py +94 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +259 -0
- vllm/utils/__init__.py +3008 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +184 -0
- vllm/v1/attention/backends/flash_attn.py +757 -0
- vllm/v1/attention/backends/flashinfer.py +680 -0
- vllm/v1/attention/backends/flex_attention.py +491 -0
- vllm/v1/attention/backends/mamba_attn.py +192 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +978 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +98 -0
- vllm/v1/attention/backends/mla/flashmla.py +180 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +241 -0
- vllm/v1/attention/backends/mla/triton_mla.py +177 -0
- vllm/v1/attention/backends/pallas.py +320 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +609 -0
- vllm/v1/attention/backends/triton_attn.py +449 -0
- vllm/v1/attention/backends/utils.py +310 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +349 -0
- vllm/v1/core/encoder_cache_manager.py +254 -0
- vllm/v1/core/kv_cache_coordinator.py +369 -0
- vllm/v1/core/kv_cache_manager.py +398 -0
- vllm/v1/core/kv_cache_utils.py +999 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +150 -0
- vllm/v1/core/sched/output.py +157 -0
- vllm/v1/core/sched/request_queue.py +224 -0
- vllm/v1/core/sched/scheduler.py +1115 -0
- vllm/v1/core/sched/utils.py +36 -0
- vllm/v1/core/single_type_kv_cache_manager.py +444 -0
- vllm/v1/engine/__init__.py +179 -0
- vllm/v1/engine/async_llm.py +626 -0
- vllm/v1/engine/coordinator.py +278 -0
- vllm/v1/engine/core.py +1046 -0
- vllm/v1/engine/core_client.py +1049 -0
- vllm/v1/engine/detokenizer.py +292 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +322 -0
- vllm/v1/engine/logprobs.py +200 -0
- vllm/v1/engine/mm_input_cache.py +91 -0
- vllm/v1/engine/output_processor.py +477 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +422 -0
- vllm/v1/engine/utils.py +546 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +113 -0
- vllm/v1/executor/multiproc_executor.py +532 -0
- vllm/v1/executor/ray_distributed_executor.py +62 -0
- vllm/v1/kv_cache_interface.py +223 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +557 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +131 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +240 -0
- vllm/v1/outputs.py +124 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +17 -0
- vllm/v1/request.py +229 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor.py +517 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/penalties.py +43 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +296 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +226 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +145 -0
- vllm/v1/serial_utils.py +315 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +441 -0
- vllm/v1/spec_decode/medusa.py +64 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +178 -0
- vllm/v1/spec_decode/ngram_proposer.py +132 -0
- vllm/v1/spec_decode/utils.py +41 -0
- vllm/v1/structured_output/__init__.py +227 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +318 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +175 -0
- vllm/v1/utils.py +377 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +142 -0
- vllm/v1/worker/cpu_model_runner.py +91 -0
- vllm/v1/worker/cpu_worker.py +153 -0
- vllm/v1/worker/gpu_input_batch.py +757 -0
- vllm/v1/worker/gpu_model_runner.py +2739 -0
- vllm/v1/worker/gpu_worker.py +408 -0
- vllm/v1/worker/lora_model_runner_mixin.py +177 -0
- vllm/v1/worker/tpu_input_batch.py +585 -0
- vllm/v1/worker/tpu_model_runner.py +1849 -0
- vllm/v1/worker/tpu_worker.py +315 -0
- vllm/v1/worker/utils.py +112 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/v1/worker/xpu_model_runner.py +33 -0
- vllm/v1/worker/xpu_worker.py +165 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +452 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2320 -0
- vllm/worker/hpu_worker.py +484 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +282 -0
- vllm/worker/multi_step_hpu_worker.py +123 -0
- vllm/worker/multi_step_model_runner.py +911 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +108 -0
- vllm/worker/multi_step_worker.py +197 -0
- vllm/worker/neuron_model_runner.py +460 -0
- vllm/worker/neuron_worker.py +193 -0
- vllm/worker/neuronx_distributed_model_runner.py +294 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +909 -0
- vllm/worker/tpu_worker.py +337 -0
- vllm/worker/utils.py +53 -0
- vllm/worker/worker.py +577 -0
- vllm/worker/worker_base.py +646 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +186 -0
- vllm_cpu-0.9.2.post2.dist-info/METADATA +339 -0
- vllm_cpu-0.9.2.post2.dist-info/RECORD +1236 -0
- vllm_cpu-0.9.2.post2.dist-info/WHEEL +5 -0
- vllm_cpu-0.9.2.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu-0.9.2.post2.dist-info/top_level.txt +1 -0
vllm/v1/engine/core.py
ADDED
|
@@ -0,0 +1,1046 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
import os
|
|
4
|
+
import queue
|
|
5
|
+
import signal
|
|
6
|
+
import sys
|
|
7
|
+
import threading
|
|
8
|
+
import time
|
|
9
|
+
from collections import deque
|
|
10
|
+
from collections.abc import Generator
|
|
11
|
+
from concurrent.futures import Future
|
|
12
|
+
from contextlib import ExitStack, contextmanager
|
|
13
|
+
from inspect import isclass, signature
|
|
14
|
+
from logging import DEBUG
|
|
15
|
+
from typing import Any, Callable, Optional, TypeVar, Union
|
|
16
|
+
|
|
17
|
+
import msgspec
|
|
18
|
+
import zmq
|
|
19
|
+
|
|
20
|
+
from vllm.config import ParallelConfig, VllmConfig
|
|
21
|
+
from vllm.distributed import stateless_destroy_torch_distributed_process_group
|
|
22
|
+
from vllm.executor.multiproc_worker_utils import _add_prefix
|
|
23
|
+
from vllm.logger import init_logger
|
|
24
|
+
from vllm.logging_utils.dump_input import dump_engine_exception
|
|
25
|
+
from vllm.lora.request import LoRARequest
|
|
26
|
+
from vllm.transformers_utils.config import (
|
|
27
|
+
maybe_register_config_serialize_by_value)
|
|
28
|
+
from vllm.utils import make_zmq_socket, resolve_obj_by_qualname
|
|
29
|
+
from vllm.v1.core.kv_cache_utils import (get_kv_cache_config,
|
|
30
|
+
unify_kv_cache_configs)
|
|
31
|
+
from vllm.v1.core.sched.interface import SchedulerInterface
|
|
32
|
+
from vllm.v1.core.sched.output import SchedulerOutput
|
|
33
|
+
from vllm.v1.core.sched.scheduler import Scheduler as V1Scheduler
|
|
34
|
+
from vllm.v1.engine import (EngineCoreOutputs, EngineCoreRequest,
|
|
35
|
+
EngineCoreRequestType, UtilityOutput)
|
|
36
|
+
from vllm.v1.engine.mm_input_cache import MirroredProcessingCache
|
|
37
|
+
from vllm.v1.engine.utils import EngineHandshakeMetadata, EngineZmqAddresses
|
|
38
|
+
from vllm.v1.executor.abstract import Executor
|
|
39
|
+
from vllm.v1.kv_cache_interface import KVCacheConfig
|
|
40
|
+
from vllm.v1.metrics.stats import SchedulerStats
|
|
41
|
+
from vllm.v1.outputs import ModelRunnerOutput
|
|
42
|
+
from vllm.v1.request import Request, RequestStatus
|
|
43
|
+
from vllm.v1.serial_utils import MsgpackDecoder, MsgpackEncoder
|
|
44
|
+
from vllm.v1.structured_output import StructuredOutputManager
|
|
45
|
+
from vllm.version import __version__ as VLLM_VERSION
|
|
46
|
+
|
|
47
|
+
logger = init_logger(__name__)
|
|
48
|
+
|
|
49
|
+
POLLING_TIMEOUT_S = 2.5
|
|
50
|
+
HANDSHAKE_TIMEOUT_MINS = 5
|
|
51
|
+
|
|
52
|
+
_R = TypeVar('_R') # Return type for collective_rpc
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class EngineCore:
|
|
56
|
+
"""Inner loop of vLLM's Engine."""
|
|
57
|
+
|
|
58
|
+
def __init__(self,
|
|
59
|
+
vllm_config: VllmConfig,
|
|
60
|
+
executor_class: type[Executor],
|
|
61
|
+
log_stats: bool,
|
|
62
|
+
executor_fail_callback: Optional[Callable] = None):
|
|
63
|
+
|
|
64
|
+
# plugins need to be loaded at the engine/scheduler level too
|
|
65
|
+
from vllm.plugins import load_general_plugins
|
|
66
|
+
load_general_plugins()
|
|
67
|
+
|
|
68
|
+
self.vllm_config = vllm_config
|
|
69
|
+
logger.info("Initializing a V1 LLM engine (v%s) with config: %s",
|
|
70
|
+
VLLM_VERSION, vllm_config)
|
|
71
|
+
|
|
72
|
+
self.log_stats = log_stats
|
|
73
|
+
|
|
74
|
+
# Setup Model.
|
|
75
|
+
self.model_executor = executor_class(vllm_config)
|
|
76
|
+
if executor_fail_callback is not None:
|
|
77
|
+
self.model_executor.register_failure_callback(
|
|
78
|
+
executor_fail_callback)
|
|
79
|
+
|
|
80
|
+
# Setup KV Caches and update CacheConfig after profiling.
|
|
81
|
+
num_gpu_blocks, num_cpu_blocks, kv_cache_config = \
|
|
82
|
+
self._initialize_kv_caches(vllm_config)
|
|
83
|
+
|
|
84
|
+
vllm_config.cache_config.num_gpu_blocks = num_gpu_blocks
|
|
85
|
+
vllm_config.cache_config.num_cpu_blocks = num_cpu_blocks
|
|
86
|
+
self.collective_rpc("initialize_cache",
|
|
87
|
+
args=(num_gpu_blocks, num_cpu_blocks))
|
|
88
|
+
|
|
89
|
+
self.structured_output_manager = StructuredOutputManager(vllm_config)
|
|
90
|
+
|
|
91
|
+
# Setup scheduler.
|
|
92
|
+
if isinstance(vllm_config.scheduler_config.scheduler_cls, str):
|
|
93
|
+
Scheduler = resolve_obj_by_qualname(
|
|
94
|
+
vllm_config.scheduler_config.scheduler_cls)
|
|
95
|
+
else:
|
|
96
|
+
Scheduler = vllm_config.scheduler_config.scheduler_cls
|
|
97
|
+
|
|
98
|
+
# This warning can be removed once the V1 Scheduler interface is
|
|
99
|
+
# finalized and we can maintain support for scheduler classes that
|
|
100
|
+
# implement it
|
|
101
|
+
if Scheduler is not V1Scheduler:
|
|
102
|
+
logger.warning(
|
|
103
|
+
"Using configured V1 scheduler class %s. "
|
|
104
|
+
"This scheduler interface is not public and "
|
|
105
|
+
"compatibility may not be maintained.",
|
|
106
|
+
vllm_config.scheduler_config.scheduler_cls)
|
|
107
|
+
|
|
108
|
+
self.scheduler: SchedulerInterface = Scheduler(
|
|
109
|
+
vllm_config=vllm_config,
|
|
110
|
+
kv_cache_config=kv_cache_config,
|
|
111
|
+
structured_output_manager=self.structured_output_manager,
|
|
112
|
+
include_finished_set=vllm_config.parallel_config.data_parallel_size
|
|
113
|
+
> 1,
|
|
114
|
+
log_stats=self.log_stats,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Setup MM Input Mapper.
|
|
118
|
+
self.mm_input_cache_server = MirroredProcessingCache(
|
|
119
|
+
vllm_config.model_config)
|
|
120
|
+
|
|
121
|
+
# Setup batch queue for pipeline parallelism.
|
|
122
|
+
# Batch queue for scheduled batches. This enables us to asynchronously
|
|
123
|
+
# schedule and execute batches, and is required by pipeline parallelism
|
|
124
|
+
# to eliminate pipeline bubbles.
|
|
125
|
+
self.batch_queue_size = self.model_executor.max_concurrent_batches
|
|
126
|
+
self.batch_queue: Optional[queue.Queue[tuple[Future[ModelRunnerOutput],
|
|
127
|
+
SchedulerOutput]]] = None
|
|
128
|
+
if self.batch_queue_size > 1:
|
|
129
|
+
logger.info("Batch queue is enabled with size %d",
|
|
130
|
+
self.batch_queue_size)
|
|
131
|
+
self.batch_queue = queue.Queue(self.batch_queue_size)
|
|
132
|
+
|
|
133
|
+
def _initialize_kv_caches(
|
|
134
|
+
self, vllm_config: VllmConfig) -> tuple[int, int, KVCacheConfig]:
|
|
135
|
+
start = time.time()
|
|
136
|
+
|
|
137
|
+
# Get all kv cache needed by the model
|
|
138
|
+
kv_cache_specs = self.model_executor.get_kv_cache_specs()
|
|
139
|
+
|
|
140
|
+
# Profiles the peak memory usage of the model to determine how much
|
|
141
|
+
# memory can be allocated for kv cache.
|
|
142
|
+
available_gpu_memory = self.model_executor.determine_available_memory()
|
|
143
|
+
|
|
144
|
+
assert len(kv_cache_specs) == len(available_gpu_memory)
|
|
145
|
+
# Get the kv cache tensor size
|
|
146
|
+
kv_cache_configs = [
|
|
147
|
+
get_kv_cache_config(vllm_config, kv_cache_spec_one_worker,
|
|
148
|
+
available_gpu_memory_one_worker)
|
|
149
|
+
for kv_cache_spec_one_worker, available_gpu_memory_one_worker in
|
|
150
|
+
zip(kv_cache_specs, available_gpu_memory)
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
# Since we use a shared centralized controller, we need the
|
|
154
|
+
# `kv_cache_config` to be consistent across all workers to make sure
|
|
155
|
+
# all the memory operators can be applied to all workers.
|
|
156
|
+
unify_kv_cache_configs(kv_cache_configs)
|
|
157
|
+
|
|
158
|
+
# All workers have the same kv_cache_config except layer names, so use
|
|
159
|
+
# an arbitrary one to initialize the scheduler.
|
|
160
|
+
assert all([
|
|
161
|
+
cfg.num_blocks == kv_cache_configs[0].num_blocks
|
|
162
|
+
for cfg in kv_cache_configs
|
|
163
|
+
])
|
|
164
|
+
num_gpu_blocks = kv_cache_configs[0].num_blocks
|
|
165
|
+
num_cpu_blocks = 0
|
|
166
|
+
scheduler_kv_cache_config = kv_cache_configs[0]
|
|
167
|
+
|
|
168
|
+
# Initialize kv cache and warmup the execution
|
|
169
|
+
self.model_executor.initialize_from_config(kv_cache_configs)
|
|
170
|
+
|
|
171
|
+
elapsed = time.time() - start
|
|
172
|
+
logger.info(("init engine (profile, create kv cache, "
|
|
173
|
+
"warmup model) took %.2f seconds"), elapsed)
|
|
174
|
+
return num_gpu_blocks, num_cpu_blocks, scheduler_kv_cache_config
|
|
175
|
+
|
|
176
|
+
def add_request(self, request: EngineCoreRequest):
|
|
177
|
+
"""Add request to the scheduler."""
|
|
178
|
+
|
|
179
|
+
if request.mm_hashes is not None:
|
|
180
|
+
# Here, if hash exists for a multimodal input, then it will be
|
|
181
|
+
# fetched from the cache, else it will be added to the cache.
|
|
182
|
+
# Note that the cache here is mirrored with the client cache, so
|
|
183
|
+
# anything that has a hash must have a HIT cache entry here
|
|
184
|
+
# as well.
|
|
185
|
+
assert request.mm_inputs is not None
|
|
186
|
+
request.mm_inputs = self.mm_input_cache_server.get_and_update_p1(
|
|
187
|
+
request.mm_inputs, request.mm_hashes)
|
|
188
|
+
|
|
189
|
+
req = Request.from_engine_core_request(request)
|
|
190
|
+
if req.use_structured_output:
|
|
191
|
+
# Start grammar compilation asynchronously
|
|
192
|
+
self.structured_output_manager.grammar_init(req)
|
|
193
|
+
|
|
194
|
+
if req.kv_transfer_params is not None and (
|
|
195
|
+
not self.scheduler.get_kv_connector()):
|
|
196
|
+
logger.warning("Got kv_transfer_params, but no KVConnector found. "
|
|
197
|
+
"Disabling KVTransfer for this request.")
|
|
198
|
+
|
|
199
|
+
self.scheduler.add_request(req)
|
|
200
|
+
|
|
201
|
+
def abort_requests(self, request_ids: list[str]):
|
|
202
|
+
"""Abort requests from the scheduler."""
|
|
203
|
+
|
|
204
|
+
# TODO: The scheduler doesn't really need to know the
|
|
205
|
+
# specific finish reason, TBD whether we propagate that
|
|
206
|
+
# (i.e. client-aborted vs stop criteria met).
|
|
207
|
+
self.scheduler.finish_requests(request_ids,
|
|
208
|
+
RequestStatus.FINISHED_ABORTED)
|
|
209
|
+
|
|
210
|
+
def execute_model(self, scheduler_output: SchedulerOutput):
|
|
211
|
+
try:
|
|
212
|
+
return self.model_executor.execute_model(scheduler_output)
|
|
213
|
+
except Exception as err:
|
|
214
|
+
# We do not want to catch BaseException here since we're only
|
|
215
|
+
# interested in dumping info when the exception is due to an
|
|
216
|
+
# error from execute_model itself.
|
|
217
|
+
|
|
218
|
+
# NOTE: This method is exception-free
|
|
219
|
+
dump_engine_exception(self.vllm_config, scheduler_output,
|
|
220
|
+
self.scheduler.make_stats())
|
|
221
|
+
raise err
|
|
222
|
+
|
|
223
|
+
def step(self) -> tuple[dict[int, EngineCoreOutputs], bool]:
|
|
224
|
+
"""Schedule, execute, and make output.
|
|
225
|
+
|
|
226
|
+
Returns tuple of outputs and a flag indicating whether the model
|
|
227
|
+
was executed.
|
|
228
|
+
"""
|
|
229
|
+
|
|
230
|
+
# Check for any requests remaining in the scheduler - unfinished,
|
|
231
|
+
# or finished and not yet removed from the batch.
|
|
232
|
+
if not self.scheduler.has_requests():
|
|
233
|
+
return {}, False
|
|
234
|
+
scheduler_output = self.scheduler.schedule()
|
|
235
|
+
model_output = self.execute_model(scheduler_output)
|
|
236
|
+
engine_core_outputs = self.scheduler.update_from_output(
|
|
237
|
+
scheduler_output, model_output) # type: ignore
|
|
238
|
+
|
|
239
|
+
return (engine_core_outputs,
|
|
240
|
+
scheduler_output.total_num_scheduled_tokens > 0)
|
|
241
|
+
|
|
242
|
+
def step_with_batch_queue(
|
|
243
|
+
self) -> tuple[Optional[dict[int, EngineCoreOutputs]], bool]:
|
|
244
|
+
"""Schedule and execute batches with the batch queue.
|
|
245
|
+
Note that if nothing to output in this step, None is returned.
|
|
246
|
+
|
|
247
|
+
The execution flow is as follows:
|
|
248
|
+
1. Try to schedule a new batch if the batch queue is not full.
|
|
249
|
+
If a new batch is scheduled, directly return an empty engine core
|
|
250
|
+
output. In other words, fulfilling the batch queue has a higher priority
|
|
251
|
+
than getting model outputs.
|
|
252
|
+
2. If there is no new scheduled batch, meaning that the batch queue
|
|
253
|
+
is full or no other requests can be scheduled, we block until the first
|
|
254
|
+
batch in the job queue is finished.
|
|
255
|
+
3. Update the scheduler from the output.
|
|
256
|
+
"""
|
|
257
|
+
assert self.batch_queue is not None
|
|
258
|
+
|
|
259
|
+
engine_core_outputs = None
|
|
260
|
+
scheduler_output = None
|
|
261
|
+
# Try to schedule a new batch if the batch queue is not full, but
|
|
262
|
+
# the scheduler may return an empty batch if all requests are scheduled.
|
|
263
|
+
# Note that this is not blocking.
|
|
264
|
+
if not self.batch_queue.full():
|
|
265
|
+
scheduler_output = self.scheduler.schedule()
|
|
266
|
+
if scheduler_output.total_num_scheduled_tokens > 0:
|
|
267
|
+
future = self.model_executor.execute_model(scheduler_output)
|
|
268
|
+
self.batch_queue.put_nowait(
|
|
269
|
+
(future, scheduler_output)) # type: ignore
|
|
270
|
+
|
|
271
|
+
scheduled_batch = (scheduler_output is not None
|
|
272
|
+
and scheduler_output.total_num_scheduled_tokens > 0)
|
|
273
|
+
|
|
274
|
+
# If no more requests can be scheduled and the job queue is not empty,
|
|
275
|
+
# block until the first batch in the job queue is finished.
|
|
276
|
+
# TODO(comaniac): Ideally we should peek the first batch in the
|
|
277
|
+
# job queue to check if it's finished before scheduling a new batch,
|
|
278
|
+
# but peeking the first element in a queue is not thread-safe,
|
|
279
|
+
# so we need more work.
|
|
280
|
+
if not scheduled_batch and not self.batch_queue.empty():
|
|
281
|
+
future, scheduler_output = self.batch_queue.get_nowait()
|
|
282
|
+
# Blocking until the first result is available.
|
|
283
|
+
model_output = future.result()
|
|
284
|
+
self.batch_queue.task_done()
|
|
285
|
+
engine_core_outputs = (self.scheduler.update_from_output(
|
|
286
|
+
scheduler_output, model_output))
|
|
287
|
+
|
|
288
|
+
return engine_core_outputs, scheduled_batch
|
|
289
|
+
|
|
290
|
+
def shutdown(self):
|
|
291
|
+
self.structured_output_manager.clear_backend()
|
|
292
|
+
if self.model_executor:
|
|
293
|
+
self.model_executor.shutdown()
|
|
294
|
+
if self.scheduler:
|
|
295
|
+
self.scheduler.shutdown()
|
|
296
|
+
|
|
297
|
+
def profile(self, is_start: bool = True):
|
|
298
|
+
self.model_executor.profile(is_start)
|
|
299
|
+
|
|
300
|
+
def reset_mm_cache(self):
|
|
301
|
+
# NOTE: Since this is mainly for debugging, we don't attempt to
|
|
302
|
+
# re-sync the internal caches (P0 processor, P0 mirror, P1 mirror)
|
|
303
|
+
if self.scheduler.has_unfinished_requests():
|
|
304
|
+
logger.warning("Resetting the multi-modal cache when requests are "
|
|
305
|
+
"in progress may lead to desynced internal caches.")
|
|
306
|
+
|
|
307
|
+
self.mm_input_cache_server.reset()
|
|
308
|
+
|
|
309
|
+
def reset_prefix_cache(self):
|
|
310
|
+
self.scheduler.reset_prefix_cache()
|
|
311
|
+
|
|
312
|
+
def sleep(self, level: int = 1):
|
|
313
|
+
self.model_executor.sleep(level)
|
|
314
|
+
|
|
315
|
+
def wake_up(self, tags: Optional[list[str]] = None):
|
|
316
|
+
self.model_executor.wake_up(tags)
|
|
317
|
+
|
|
318
|
+
def is_sleeping(self) -> bool:
|
|
319
|
+
return self.model_executor.is_sleeping
|
|
320
|
+
|
|
321
|
+
def execute_dummy_batch(self):
|
|
322
|
+
self.model_executor.collective_rpc("execute_dummy_batch")
|
|
323
|
+
|
|
324
|
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
|
325
|
+
return self.model_executor.add_lora(lora_request)
|
|
326
|
+
|
|
327
|
+
def remove_lora(self, lora_id: int) -> bool:
|
|
328
|
+
return self.model_executor.remove_lora(lora_id)
|
|
329
|
+
|
|
330
|
+
def list_loras(self) -> set[int]:
|
|
331
|
+
return self.model_executor.list_loras()
|
|
332
|
+
|
|
333
|
+
def pin_lora(self, lora_id: int) -> bool:
|
|
334
|
+
return self.model_executor.pin_lora(lora_id)
|
|
335
|
+
|
|
336
|
+
def save_sharded_state(
|
|
337
|
+
self,
|
|
338
|
+
path: str,
|
|
339
|
+
pattern: Optional[str] = None,
|
|
340
|
+
max_size: Optional[int] = None,
|
|
341
|
+
) -> None:
|
|
342
|
+
self.model_executor.save_sharded_state(path=path,
|
|
343
|
+
pattern=pattern,
|
|
344
|
+
max_size=max_size)
|
|
345
|
+
|
|
346
|
+
def collective_rpc(self,
|
|
347
|
+
method: Union[str, Callable[..., _R]],
|
|
348
|
+
timeout: Optional[float] = None,
|
|
349
|
+
args: tuple = (),
|
|
350
|
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
|
351
|
+
return self.model_executor.collective_rpc(method, timeout, args,
|
|
352
|
+
kwargs)
|
|
353
|
+
|
|
354
|
+
def save_tensorized_model(
|
|
355
|
+
self,
|
|
356
|
+
tensorizer_config,
|
|
357
|
+
) -> None:
|
|
358
|
+
self.model_executor.save_tensorized_model(
|
|
359
|
+
tensorizer_config=tensorizer_config, )
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
class EngineCoreProc(EngineCore):
|
|
363
|
+
"""ZMQ-wrapper for running EngineCore in background process."""
|
|
364
|
+
|
|
365
|
+
ENGINE_CORE_DEAD = b'ENGINE_CORE_DEAD'
|
|
366
|
+
|
|
367
|
+
def __init__(
|
|
368
|
+
self,
|
|
369
|
+
vllm_config: VllmConfig,
|
|
370
|
+
local_client: bool,
|
|
371
|
+
handshake_address: str,
|
|
372
|
+
executor_class: type[Executor],
|
|
373
|
+
log_stats: bool,
|
|
374
|
+
client_handshake_address: Optional[str] = None,
|
|
375
|
+
engine_index: int = 0,
|
|
376
|
+
):
|
|
377
|
+
self.input_queue = queue.Queue[tuple[EngineCoreRequestType, Any]]()
|
|
378
|
+
self.output_queue = queue.Queue[Union[tuple[int, EngineCoreOutputs],
|
|
379
|
+
bytes]]()
|
|
380
|
+
executor_fail_callback = lambda: self.input_queue.put_nowait(
|
|
381
|
+
(EngineCoreRequestType.EXECUTOR_FAILED, b''))
|
|
382
|
+
|
|
383
|
+
self.engine_index = engine_index
|
|
384
|
+
identity = self.engine_index.to_bytes(length=2, byteorder="little")
|
|
385
|
+
self.engines_running = False
|
|
386
|
+
|
|
387
|
+
with self._perform_handshakes(handshake_address, identity,
|
|
388
|
+
local_client, vllm_config,
|
|
389
|
+
client_handshake_address) as addresses:
|
|
390
|
+
self.client_count = len(addresses.outputs)
|
|
391
|
+
|
|
392
|
+
# Set up data parallel environment.
|
|
393
|
+
self.has_coordinator = addresses.coordinator_output is not None
|
|
394
|
+
self.frontend_stats_publish_address = (
|
|
395
|
+
addresses.frontend_stats_publish_address)
|
|
396
|
+
# Only publish request queue stats to coordinator for "internal"
|
|
397
|
+
# LB mode.
|
|
398
|
+
self.publish_dp_lb_stats = (
|
|
399
|
+
self.has_coordinator
|
|
400
|
+
and not vllm_config.parallel_config.data_parallel_external_lb)
|
|
401
|
+
|
|
402
|
+
self._init_data_parallel(vllm_config)
|
|
403
|
+
|
|
404
|
+
super().__init__(vllm_config, executor_class, log_stats,
|
|
405
|
+
executor_fail_callback)
|
|
406
|
+
|
|
407
|
+
self.step_fn = (self.step if self.batch_queue is None else
|
|
408
|
+
self.step_with_batch_queue)
|
|
409
|
+
|
|
410
|
+
# Background Threads and Queues for IO. These enable us to
|
|
411
|
+
# overlap ZMQ socket IO with GPU since they release the GIL,
|
|
412
|
+
# and to overlap some serialization/deserialization with the
|
|
413
|
+
# model forward pass.
|
|
414
|
+
# Threads handle Socket <-> Queues and core_busy_loop uses Queue.
|
|
415
|
+
threading.Thread(target=self.process_input_sockets,
|
|
416
|
+
args=(addresses.inputs, addresses.coordinator_input,
|
|
417
|
+
identity),
|
|
418
|
+
daemon=True).start()
|
|
419
|
+
self.output_thread = threading.Thread(
|
|
420
|
+
target=self.process_output_sockets,
|
|
421
|
+
args=(addresses.outputs, addresses.coordinator_output,
|
|
422
|
+
self.engine_index),
|
|
423
|
+
daemon=True)
|
|
424
|
+
self.output_thread.start()
|
|
425
|
+
|
|
426
|
+
@contextmanager
|
|
427
|
+
def _perform_handshakes(
|
|
428
|
+
self,
|
|
429
|
+
handshake_address: str,
|
|
430
|
+
identity: bytes,
|
|
431
|
+
local_client: bool,
|
|
432
|
+
vllm_config: VllmConfig,
|
|
433
|
+
client_handshake_address: Optional[str],
|
|
434
|
+
) -> Generator[EngineZmqAddresses, None, None]:
|
|
435
|
+
"""
|
|
436
|
+
Perform startup handshakes.
|
|
437
|
+
|
|
438
|
+
For DP=1 or offline mode, this is with the colocated front-end process.
|
|
439
|
+
|
|
440
|
+
For DP>1 with internal loadbalancing this is with the shared front-end
|
|
441
|
+
process which may reside on a different node.
|
|
442
|
+
|
|
443
|
+
For DP>1 with external loadbalancing, two handshakes are performed:
|
|
444
|
+
- With the rank 0 front-end process which retrieves the
|
|
445
|
+
DP Coordinator ZMQ addresses and DP process group address.
|
|
446
|
+
- With the colocated front-end process which retrieves the
|
|
447
|
+
client input/output socket addresses.
|
|
448
|
+
with the exception of the rank 0 engine itself which doesn't require
|
|
449
|
+
the second handshake.
|
|
450
|
+
|
|
451
|
+
Here, "front-end" process can mean the process containing the engine
|
|
452
|
+
core client (which is the API server process in the case the API
|
|
453
|
+
server is not scaled out), OR the launcher process running the
|
|
454
|
+
run_multi_api_server() function in serve.py.
|
|
455
|
+
"""
|
|
456
|
+
input_ctx = zmq.Context()
|
|
457
|
+
is_local = local_client and client_handshake_address is None
|
|
458
|
+
handshake = self._perform_handshake(input_ctx, handshake_address,
|
|
459
|
+
identity, is_local, vllm_config,
|
|
460
|
+
vllm_config.parallel_config)
|
|
461
|
+
if client_handshake_address is None:
|
|
462
|
+
with handshake as addresses:
|
|
463
|
+
yield addresses
|
|
464
|
+
else:
|
|
465
|
+
local_handshake = self._perform_handshake(
|
|
466
|
+
input_ctx, client_handshake_address, identity, local_client,
|
|
467
|
+
vllm_config)
|
|
468
|
+
with handshake as addresses, local_handshake as client_addresses:
|
|
469
|
+
addresses.inputs = client_addresses.inputs
|
|
470
|
+
addresses.outputs = client_addresses.outputs
|
|
471
|
+
yield addresses
|
|
472
|
+
|
|
473
|
+
# Update config which may have changed from the handshake
|
|
474
|
+
vllm_config.__post_init__()
|
|
475
|
+
|
|
476
|
+
@contextmanager
|
|
477
|
+
def _perform_handshake(
|
|
478
|
+
self,
|
|
479
|
+
ctx: zmq.Context,
|
|
480
|
+
handshake_address: str,
|
|
481
|
+
identity: bytes,
|
|
482
|
+
local_client: bool,
|
|
483
|
+
vllm_config: VllmConfig,
|
|
484
|
+
parallel_config_to_update: Optional[ParallelConfig] = None,
|
|
485
|
+
) -> Generator[EngineZmqAddresses, None, None]:
|
|
486
|
+
with make_zmq_socket(ctx,
|
|
487
|
+
handshake_address,
|
|
488
|
+
zmq.DEALER,
|
|
489
|
+
identity=identity,
|
|
490
|
+
linger=5000,
|
|
491
|
+
bind=False) as handshake_socket:
|
|
492
|
+
# Register engine with front-end.
|
|
493
|
+
addresses = self.startup_handshake(handshake_socket, local_client,
|
|
494
|
+
parallel_config_to_update)
|
|
495
|
+
yield addresses
|
|
496
|
+
|
|
497
|
+
# Send ready message.
|
|
498
|
+
num_gpu_blocks = vllm_config.cache_config.num_gpu_blocks
|
|
499
|
+
# We pass back the coordinator stats update address here for the
|
|
500
|
+
# external LB case for our colocated front-end to use (coordinator
|
|
501
|
+
# only runs with rank 0).
|
|
502
|
+
dp_stats_address = self.frontend_stats_publish_address
|
|
503
|
+
handshake_socket.send(
|
|
504
|
+
msgspec.msgpack.encode({
|
|
505
|
+
"status": "READY",
|
|
506
|
+
"local": local_client,
|
|
507
|
+
"num_gpu_blocks": num_gpu_blocks,
|
|
508
|
+
"dp_stats_address": dp_stats_address,
|
|
509
|
+
}))
|
|
510
|
+
|
|
511
|
+
@staticmethod
|
|
512
|
+
def startup_handshake(
|
|
513
|
+
handshake_socket: zmq.Socket,
|
|
514
|
+
local_client: bool,
|
|
515
|
+
parallel_config: Optional[ParallelConfig] = None,
|
|
516
|
+
) -> EngineZmqAddresses:
|
|
517
|
+
|
|
518
|
+
# Send registration message.
|
|
519
|
+
handshake_socket.send(
|
|
520
|
+
msgspec.msgpack.encode({
|
|
521
|
+
"status": "HELLO",
|
|
522
|
+
"local": local_client,
|
|
523
|
+
}))
|
|
524
|
+
|
|
525
|
+
# Receive initialization message.
|
|
526
|
+
logger.info("Waiting for init message from front-end.")
|
|
527
|
+
if not handshake_socket.poll(timeout=HANDSHAKE_TIMEOUT_MINS * 60_000):
|
|
528
|
+
raise RuntimeError("Did not receive response from front-end "
|
|
529
|
+
f"process within {HANDSHAKE_TIMEOUT_MINS} "
|
|
530
|
+
f"minutes")
|
|
531
|
+
init_bytes = handshake_socket.recv()
|
|
532
|
+
init_message: EngineHandshakeMetadata = msgspec.msgpack.decode(
|
|
533
|
+
init_bytes, type=EngineHandshakeMetadata)
|
|
534
|
+
logger.debug("Received init message: %s", init_message)
|
|
535
|
+
|
|
536
|
+
if parallel_config is not None:
|
|
537
|
+
for key, value in init_message.parallel_config.items():
|
|
538
|
+
setattr(parallel_config, key, value)
|
|
539
|
+
|
|
540
|
+
return init_message.addresses
|
|
541
|
+
|
|
542
|
+
@staticmethod
|
|
543
|
+
def run_engine_core(*args,
|
|
544
|
+
dp_rank: int = 0,
|
|
545
|
+
local_dp_rank: int = 0,
|
|
546
|
+
**kwargs):
|
|
547
|
+
"""Launch EngineCore busy loop in background process."""
|
|
548
|
+
|
|
549
|
+
# Signal handler used for graceful termination.
|
|
550
|
+
# SystemExit exception is only raised once to allow this and worker
|
|
551
|
+
# processes to terminate without error
|
|
552
|
+
shutdown_requested = False
|
|
553
|
+
|
|
554
|
+
# Ensure we can serialize transformer config after spawning
|
|
555
|
+
maybe_register_config_serialize_by_value()
|
|
556
|
+
|
|
557
|
+
def signal_handler(signum, frame):
|
|
558
|
+
nonlocal shutdown_requested
|
|
559
|
+
if not shutdown_requested:
|
|
560
|
+
shutdown_requested = True
|
|
561
|
+
raise SystemExit()
|
|
562
|
+
|
|
563
|
+
# Either SIGTERM or SIGINT will terminate the engine_core
|
|
564
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
565
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
566
|
+
|
|
567
|
+
engine_core: Optional[EngineCoreProc] = None
|
|
568
|
+
try:
|
|
569
|
+
parallel_config: ParallelConfig = kwargs[
|
|
570
|
+
"vllm_config"].parallel_config
|
|
571
|
+
if parallel_config.data_parallel_size > 1 or dp_rank > 0:
|
|
572
|
+
# Set data parallel rank for this engine process.
|
|
573
|
+
parallel_config.data_parallel_rank = dp_rank
|
|
574
|
+
parallel_config.data_parallel_rank_local = local_dp_rank
|
|
575
|
+
engine_core = DPEngineCoreProc(*args, **kwargs)
|
|
576
|
+
else:
|
|
577
|
+
engine_core = EngineCoreProc(*args, **kwargs)
|
|
578
|
+
|
|
579
|
+
engine_core.run_busy_loop()
|
|
580
|
+
|
|
581
|
+
except SystemExit:
|
|
582
|
+
logger.debug("EngineCore exiting.")
|
|
583
|
+
raise
|
|
584
|
+
except Exception as e:
|
|
585
|
+
if engine_core is None:
|
|
586
|
+
logger.exception("EngineCore failed to start.")
|
|
587
|
+
else:
|
|
588
|
+
logger.exception("EngineCore encountered a fatal error.")
|
|
589
|
+
engine_core._send_engine_dead()
|
|
590
|
+
raise e
|
|
591
|
+
finally:
|
|
592
|
+
if engine_core is not None:
|
|
593
|
+
engine_core.shutdown()
|
|
594
|
+
|
|
595
|
+
def _init_data_parallel(self, vllm_config: VllmConfig):
|
|
596
|
+
pass
|
|
597
|
+
|
|
598
|
+
def run_busy_loop(self):
|
|
599
|
+
"""Core busy loop of the EngineCore."""
|
|
600
|
+
|
|
601
|
+
# Loop until process is sent a SIGINT or SIGTERM
|
|
602
|
+
while True:
|
|
603
|
+
# 1) Poll the input queue until there is work to do.
|
|
604
|
+
self._process_input_queue()
|
|
605
|
+
# 2) Step the engine core and return the outputs.
|
|
606
|
+
self._process_engine_step()
|
|
607
|
+
|
|
608
|
+
def _process_input_queue(self):
|
|
609
|
+
"""Exits when an engine step needs to be performed."""
|
|
610
|
+
|
|
611
|
+
waited = False
|
|
612
|
+
while not self.engines_running and not self.scheduler.has_requests():
|
|
613
|
+
if logger.isEnabledFor(DEBUG) and self.input_queue.empty():
|
|
614
|
+
logger.debug("EngineCore waiting for work.")
|
|
615
|
+
waited = True
|
|
616
|
+
req = self.input_queue.get()
|
|
617
|
+
self._handle_client_request(*req)
|
|
618
|
+
|
|
619
|
+
if waited:
|
|
620
|
+
logger.debug("EngineCore loop active.")
|
|
621
|
+
|
|
622
|
+
# Handle any more client requests.
|
|
623
|
+
while not self.input_queue.empty():
|
|
624
|
+
req = self.input_queue.get_nowait()
|
|
625
|
+
self._handle_client_request(*req)
|
|
626
|
+
|
|
627
|
+
def _process_engine_step(self) -> bool:
|
|
628
|
+
"""Called only when there are unfinished local requests."""
|
|
629
|
+
|
|
630
|
+
# Step the engine core.
|
|
631
|
+
outputs, model_executed = self.step_fn()
|
|
632
|
+
# Put EngineCoreOutputs into the output queue.
|
|
633
|
+
for output in (outputs.items() if outputs else ()):
|
|
634
|
+
self.output_queue.put_nowait(output)
|
|
635
|
+
|
|
636
|
+
return model_executed
|
|
637
|
+
|
|
638
|
+
def _handle_client_request(self, request_type: EngineCoreRequestType,
|
|
639
|
+
request: Any) -> None:
|
|
640
|
+
"""Dispatch request from client."""
|
|
641
|
+
|
|
642
|
+
if request_type == EngineCoreRequestType.ADD:
|
|
643
|
+
self.add_request(request)
|
|
644
|
+
elif request_type == EngineCoreRequestType.ABORT:
|
|
645
|
+
self.abort_requests(request)
|
|
646
|
+
elif request_type == EngineCoreRequestType.UTILITY:
|
|
647
|
+
client_idx, call_id, method_name, args = request
|
|
648
|
+
output = UtilityOutput(call_id)
|
|
649
|
+
try:
|
|
650
|
+
method = getattr(self, method_name)
|
|
651
|
+
output.result = method(
|
|
652
|
+
*self._convert_msgspec_args(method, args))
|
|
653
|
+
except BaseException as e:
|
|
654
|
+
logger.exception("Invocation of %s method failed", method_name)
|
|
655
|
+
output.failure_message = (f"Call to {method_name} method"
|
|
656
|
+
f" failed: {str(e)}")
|
|
657
|
+
self.output_queue.put_nowait(
|
|
658
|
+
(client_idx, EngineCoreOutputs(utility_output=output)))
|
|
659
|
+
elif request_type == EngineCoreRequestType.EXECUTOR_FAILED:
|
|
660
|
+
raise RuntimeError("Executor failed.")
|
|
661
|
+
else:
|
|
662
|
+
logger.error("Unrecognized input request type encountered: %s",
|
|
663
|
+
request_type)
|
|
664
|
+
|
|
665
|
+
@staticmethod
|
|
666
|
+
def _convert_msgspec_args(method, args):
|
|
667
|
+
"""If a provided arg type doesn't match corresponding target method
|
|
668
|
+
arg type, try converting to msgspec object."""
|
|
669
|
+
if not args:
|
|
670
|
+
return args
|
|
671
|
+
arg_types = signature(method).parameters.values()
|
|
672
|
+
assert len(args) <= len(arg_types)
|
|
673
|
+
return tuple(
|
|
674
|
+
msgspec.convert(v, type=p.annotation) if isclass(p.annotation)
|
|
675
|
+
and issubclass(p.annotation, msgspec.Struct)
|
|
676
|
+
and not isinstance(v, p.annotation) else v
|
|
677
|
+
for v, p in zip(args, arg_types))
|
|
678
|
+
|
|
679
|
+
def _send_engine_dead(self):
|
|
680
|
+
"""Send EngineDead status to the EngineCoreClient."""
|
|
681
|
+
|
|
682
|
+
# Put ENGINE_CORE_DEAD in the queue.
|
|
683
|
+
self.output_queue.put_nowait(EngineCoreProc.ENGINE_CORE_DEAD)
|
|
684
|
+
|
|
685
|
+
# Wait until msg sent by the daemon before shutdown.
|
|
686
|
+
self.output_thread.join(timeout=5.0)
|
|
687
|
+
if self.output_thread.is_alive():
|
|
688
|
+
logger.fatal("vLLM shutdown signal from EngineCore failed "
|
|
689
|
+
"to send. Please report this issue.")
|
|
690
|
+
|
|
691
|
+
def process_input_sockets(self, input_addresses: list[str],
|
|
692
|
+
coord_input_address: Optional[str],
|
|
693
|
+
identity: bytes):
|
|
694
|
+
"""Input socket IO thread."""
|
|
695
|
+
|
|
696
|
+
# Msgpack serialization decoding.
|
|
697
|
+
add_request_decoder = MsgpackDecoder(EngineCoreRequest)
|
|
698
|
+
generic_decoder = MsgpackDecoder()
|
|
699
|
+
|
|
700
|
+
with ExitStack() as stack, zmq.Context() as ctx:
|
|
701
|
+
input_sockets = [
|
|
702
|
+
stack.enter_context(
|
|
703
|
+
make_zmq_socket(ctx,
|
|
704
|
+
input_address,
|
|
705
|
+
zmq.DEALER,
|
|
706
|
+
identity=identity,
|
|
707
|
+
bind=False))
|
|
708
|
+
for input_address in input_addresses
|
|
709
|
+
]
|
|
710
|
+
if coord_input_address is None:
|
|
711
|
+
coord_socket = None
|
|
712
|
+
else:
|
|
713
|
+
coord_socket = stack.enter_context(
|
|
714
|
+
make_zmq_socket(ctx,
|
|
715
|
+
coord_input_address,
|
|
716
|
+
zmq.XSUB,
|
|
717
|
+
identity=identity,
|
|
718
|
+
bind=False))
|
|
719
|
+
# Send subscription message to coordinator.
|
|
720
|
+
coord_socket.send(b'\x01')
|
|
721
|
+
|
|
722
|
+
# Register sockets with poller.
|
|
723
|
+
poller = zmq.Poller()
|
|
724
|
+
for input_socket in input_sockets:
|
|
725
|
+
# Send initial message to each input socket - this is required
|
|
726
|
+
# before the front-end ROUTER socket can send input messages
|
|
727
|
+
# back to us.
|
|
728
|
+
input_socket.send(b'')
|
|
729
|
+
poller.register(input_socket, zmq.POLLIN)
|
|
730
|
+
if coord_socket is not None:
|
|
731
|
+
poller.register(coord_socket, zmq.POLLIN)
|
|
732
|
+
|
|
733
|
+
while True:
|
|
734
|
+
for input_socket, _ in poller.poll():
|
|
735
|
+
# (RequestType, RequestData)
|
|
736
|
+
type_frame, *data_frames = input_socket.recv_multipart(
|
|
737
|
+
copy=False)
|
|
738
|
+
request_type = EngineCoreRequestType(
|
|
739
|
+
bytes(type_frame.buffer))
|
|
740
|
+
|
|
741
|
+
# Deserialize the request data.
|
|
742
|
+
decoder = add_request_decoder if (
|
|
743
|
+
request_type
|
|
744
|
+
== EngineCoreRequestType.ADD) else generic_decoder
|
|
745
|
+
request = decoder.decode(data_frames)
|
|
746
|
+
|
|
747
|
+
# Push to input queue for core busy loop.
|
|
748
|
+
self.input_queue.put_nowait((request_type, request))
|
|
749
|
+
|
|
750
|
+
def process_output_sockets(self, output_paths: list[str],
|
|
751
|
+
coord_output_path: Optional[str],
|
|
752
|
+
engine_index: int):
|
|
753
|
+
"""Output socket IO thread."""
|
|
754
|
+
|
|
755
|
+
# Msgpack serialization encoding.
|
|
756
|
+
encoder = MsgpackEncoder()
|
|
757
|
+
# Send buffers to reuse.
|
|
758
|
+
reuse_buffers: list[bytearray] = []
|
|
759
|
+
# Keep references to outputs and buffers until zmq is finished
|
|
760
|
+
# with them (outputs may contain tensors/np arrays whose
|
|
761
|
+
# backing buffers were extracted for zero-copy send).
|
|
762
|
+
pending = deque[tuple[zmq.MessageTracker, Any, bytearray]]()
|
|
763
|
+
|
|
764
|
+
# We must set linger to ensure the ENGINE_CORE_DEAD
|
|
765
|
+
# message is sent prior to closing the socket.
|
|
766
|
+
with ExitStack() as stack, zmq.Context() as ctx:
|
|
767
|
+
sockets = [
|
|
768
|
+
stack.enter_context(
|
|
769
|
+
make_zmq_socket(ctx, output_path, zmq.PUSH, linger=4000))
|
|
770
|
+
for output_path in output_paths
|
|
771
|
+
]
|
|
772
|
+
coord_socket = stack.enter_context(
|
|
773
|
+
make_zmq_socket(
|
|
774
|
+
ctx, coord_output_path, zmq.PUSH, bind=False,
|
|
775
|
+
linger=4000)) if coord_output_path is not None else None
|
|
776
|
+
max_reuse_bufs = len(sockets) + 1
|
|
777
|
+
|
|
778
|
+
while True:
|
|
779
|
+
output = self.output_queue.get()
|
|
780
|
+
if output == EngineCoreProc.ENGINE_CORE_DEAD:
|
|
781
|
+
for socket in sockets:
|
|
782
|
+
socket.send(output)
|
|
783
|
+
break
|
|
784
|
+
assert not isinstance(output, bytes)
|
|
785
|
+
client_index, outputs = output
|
|
786
|
+
outputs.engine_index = engine_index
|
|
787
|
+
|
|
788
|
+
if client_index == -1:
|
|
789
|
+
# Don't reuse buffer for coordinator message
|
|
790
|
+
# which will be very small.
|
|
791
|
+
assert coord_socket is not None
|
|
792
|
+
coord_socket.send_multipart(encoder.encode(outputs))
|
|
793
|
+
continue
|
|
794
|
+
|
|
795
|
+
# Reclaim buffers that zmq is finished with.
|
|
796
|
+
while pending and pending[-1][0].done:
|
|
797
|
+
reuse_buffers.append(pending.pop()[2])
|
|
798
|
+
|
|
799
|
+
buffer = reuse_buffers.pop() if reuse_buffers else bytearray()
|
|
800
|
+
buffers = encoder.encode_into(outputs, buffer)
|
|
801
|
+
tracker = sockets[client_index].send_multipart(buffers,
|
|
802
|
+
copy=False,
|
|
803
|
+
track=True)
|
|
804
|
+
if not tracker.done:
|
|
805
|
+
ref = outputs if len(buffers) > 1 else None
|
|
806
|
+
pending.appendleft((tracker, ref, buffer))
|
|
807
|
+
elif len(reuse_buffers) < max_reuse_bufs:
|
|
808
|
+
# Limit the number of buffers to reuse.
|
|
809
|
+
reuse_buffers.append(buffer)
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
class DPEngineCoreProc(EngineCoreProc):
|
|
813
|
+
"""ZMQ-wrapper for running EngineCore in background process
|
|
814
|
+
in a data parallel context."""
|
|
815
|
+
|
|
816
|
+
def __init__(
|
|
817
|
+
self,
|
|
818
|
+
vllm_config: VllmConfig,
|
|
819
|
+
local_client: bool,
|
|
820
|
+
handshake_address: str,
|
|
821
|
+
executor_class: type[Executor],
|
|
822
|
+
log_stats: bool,
|
|
823
|
+
client_handshake_address: Optional[str] = None,
|
|
824
|
+
):
|
|
825
|
+
self._decorate_logs()
|
|
826
|
+
|
|
827
|
+
# Counts forward-passes of the model so that we can synchronize
|
|
828
|
+
# finished with DP peers every N steps.
|
|
829
|
+
self.counter = 0
|
|
830
|
+
self.current_wave = 0
|
|
831
|
+
self.last_counts = (0, 0)
|
|
832
|
+
|
|
833
|
+
# Initialize the engine.
|
|
834
|
+
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
|
835
|
+
super().__init__(vllm_config, local_client, handshake_address,
|
|
836
|
+
executor_class, log_stats, client_handshake_address,
|
|
837
|
+
dp_rank)
|
|
838
|
+
|
|
839
|
+
def _decorate_logs(self):
|
|
840
|
+
# Add process-specific prefix to stdout and stderr before
|
|
841
|
+
# we initialize the engine.
|
|
842
|
+
from multiprocessing import current_process
|
|
843
|
+
process_name = current_process().name
|
|
844
|
+
pid = os.getpid()
|
|
845
|
+
_add_prefix(sys.stdout, process_name, pid)
|
|
846
|
+
_add_prefix(sys.stderr, process_name, pid)
|
|
847
|
+
|
|
848
|
+
def _init_data_parallel(self, vllm_config: VllmConfig):
|
|
849
|
+
|
|
850
|
+
# Configure GPUs and stateless process group for data parallel.
|
|
851
|
+
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
|
852
|
+
dp_size = vllm_config.parallel_config.data_parallel_size
|
|
853
|
+
local_dp_rank = vllm_config.parallel_config.data_parallel_rank_local
|
|
854
|
+
|
|
855
|
+
assert dp_size > 1
|
|
856
|
+
assert 0 <= local_dp_rank <= dp_rank < dp_size
|
|
857
|
+
|
|
858
|
+
if vllm_config.kv_transfer_config is not None:
|
|
859
|
+
# modify the engine_id and append the local_dp_rank to it to ensure
|
|
860
|
+
# that the kv_transfer_config is unique for each DP rank.
|
|
861
|
+
vllm_config.kv_transfer_config.engine_id = (
|
|
862
|
+
f"{vllm_config.kv_transfer_config.engine_id}_dp{local_dp_rank}"
|
|
863
|
+
)
|
|
864
|
+
logger.debug("Setting kv_transfer_config.engine_id to %s",
|
|
865
|
+
vllm_config.kv_transfer_config.engine_id)
|
|
866
|
+
|
|
867
|
+
from vllm.platforms import current_platform
|
|
868
|
+
device_control_env_var = current_platform.device_control_env_var
|
|
869
|
+
world_size = vllm_config.parallel_config.world_size
|
|
870
|
+
# Set CUDA_VISIBLE_DEVICES or equivalent.
|
|
871
|
+
try:
|
|
872
|
+
os.environ[device_control_env_var] = ",".join(
|
|
873
|
+
str(current_platform.device_id_to_physical_device_id(i))
|
|
874
|
+
for i in range(local_dp_rank *
|
|
875
|
+
world_size, (local_dp_rank + 1) * world_size))
|
|
876
|
+
except IndexError as e:
|
|
877
|
+
raise Exception(
|
|
878
|
+
f"Error setting {device_control_env_var}: "
|
|
879
|
+
f"local range: [{local_dp_rank * world_size}, "
|
|
880
|
+
f"{(local_dp_rank + 1) * world_size}) "
|
|
881
|
+
f"base value: \"{os.getenv(device_control_env_var)}\"") from e
|
|
882
|
+
|
|
883
|
+
self.dp_rank = dp_rank
|
|
884
|
+
self.dp_group = vllm_config.parallel_config.stateless_init_dp_group()
|
|
885
|
+
|
|
886
|
+
def shutdown(self):
|
|
887
|
+
super().shutdown()
|
|
888
|
+
if dp_group := getattr(self, "dp_group", None):
|
|
889
|
+
stateless_destroy_torch_distributed_process_group(dp_group)
|
|
890
|
+
|
|
891
|
+
def add_request(self, request: EngineCoreRequest):
|
|
892
|
+
if self.has_coordinator and request.current_wave != self.current_wave:
|
|
893
|
+
if request.current_wave > self.current_wave:
|
|
894
|
+
self.current_wave = request.current_wave
|
|
895
|
+
elif not self.engines_running:
|
|
896
|
+
# Request received for an already-completed wave, notify
|
|
897
|
+
# front-end that we need to start the next one.
|
|
898
|
+
self.output_queue.put_nowait(
|
|
899
|
+
(-1, EngineCoreOutputs(start_wave=self.current_wave)))
|
|
900
|
+
|
|
901
|
+
super().add_request(request)
|
|
902
|
+
|
|
903
|
+
def _handle_client_request(self, request_type: EngineCoreRequestType,
|
|
904
|
+
request: Any) -> None:
|
|
905
|
+
if request_type == EngineCoreRequestType.START_DP_WAVE:
|
|
906
|
+
new_wave, exclude_eng_index = request
|
|
907
|
+
if exclude_eng_index != self.engine_index and (
|
|
908
|
+
new_wave >= self.current_wave):
|
|
909
|
+
self.current_wave = new_wave
|
|
910
|
+
if not self.engines_running:
|
|
911
|
+
logger.debug("EngineCore starting idle loop for wave %d.",
|
|
912
|
+
new_wave)
|
|
913
|
+
self.engines_running = True
|
|
914
|
+
else:
|
|
915
|
+
super()._handle_client_request(request_type, request)
|
|
916
|
+
|
|
917
|
+
def _maybe_publish_request_counts(self):
|
|
918
|
+
if not self.publish_dp_lb_stats:
|
|
919
|
+
return
|
|
920
|
+
|
|
921
|
+
# Publish our request counts (if they've changed).
|
|
922
|
+
counts = self.scheduler.get_request_counts()
|
|
923
|
+
if counts != self.last_counts:
|
|
924
|
+
self.last_counts = counts
|
|
925
|
+
stats = SchedulerStats(*counts)
|
|
926
|
+
self.output_queue.put_nowait(
|
|
927
|
+
(-1, EngineCoreOutputs(scheduler_stats=stats)))
|
|
928
|
+
|
|
929
|
+
def run_busy_loop(self):
|
|
930
|
+
"""Core busy loop of the EngineCore for data parallel case."""
|
|
931
|
+
|
|
932
|
+
# Loop until process is sent a SIGINT or SIGTERM
|
|
933
|
+
while True:
|
|
934
|
+
# 1) Poll the input queue until there is work to do.
|
|
935
|
+
self._process_input_queue()
|
|
936
|
+
|
|
937
|
+
# 2) Step the engine core.
|
|
938
|
+
executed = self._process_engine_step()
|
|
939
|
+
self._maybe_publish_request_counts()
|
|
940
|
+
|
|
941
|
+
local_unfinished_reqs = self.scheduler.has_unfinished_requests()
|
|
942
|
+
if not executed:
|
|
943
|
+
if not local_unfinished_reqs and not self.engines_running:
|
|
944
|
+
# All engines are idle.
|
|
945
|
+
continue
|
|
946
|
+
|
|
947
|
+
# We are in a running state and so must execute a dummy pass
|
|
948
|
+
# if the model didn't execute any ready requests.
|
|
949
|
+
self.execute_dummy_batch()
|
|
950
|
+
|
|
951
|
+
# 3) All-reduce operation to determine global unfinished reqs.
|
|
952
|
+
self.engines_running = self._has_global_unfinished_reqs(
|
|
953
|
+
local_unfinished_reqs)
|
|
954
|
+
|
|
955
|
+
if not self.engines_running:
|
|
956
|
+
if self.dp_rank == 0 or not self.has_coordinator:
|
|
957
|
+
# Notify client that we are pausing the loop.
|
|
958
|
+
logger.debug("Wave %d finished, pausing engine loop.",
|
|
959
|
+
self.current_wave)
|
|
960
|
+
# In the coordinator case, dp rank 0 sends updates to the
|
|
961
|
+
# coordinator. Otherwise (offline spmd case), each rank
|
|
962
|
+
# sends the update to its colocated front-end process.
|
|
963
|
+
client_index = -1 if self.has_coordinator else 0
|
|
964
|
+
self.output_queue.put_nowait(
|
|
965
|
+
(client_index,
|
|
966
|
+
EngineCoreOutputs(wave_complete=self.current_wave)))
|
|
967
|
+
self.current_wave += 1
|
|
968
|
+
|
|
969
|
+
def _has_global_unfinished_reqs(self, local_unfinished: bool) -> bool:
|
|
970
|
+
|
|
971
|
+
# Optimization - only perform finish-sync all-reduce every 32 steps.
|
|
972
|
+
self.counter += 1
|
|
973
|
+
if self.counter != 32:
|
|
974
|
+
return True
|
|
975
|
+
self.counter = 0
|
|
976
|
+
|
|
977
|
+
return ParallelConfig.has_unfinished_dp(self.dp_group,
|
|
978
|
+
local_unfinished)
|
|
979
|
+
|
|
980
|
+
|
|
981
|
+
class DPEngineCoreActor(DPEngineCoreProc):
|
|
982
|
+
"""
|
|
983
|
+
Ray actor for running EngineCore in a data parallel context
|
|
984
|
+
"""
|
|
985
|
+
|
|
986
|
+
def __init__(
|
|
987
|
+
self,
|
|
988
|
+
vllm_config: VllmConfig,
|
|
989
|
+
local_client: bool,
|
|
990
|
+
addresses: EngineZmqAddresses,
|
|
991
|
+
executor_class: type[Executor],
|
|
992
|
+
log_stats: bool,
|
|
993
|
+
dp_rank: int = 0,
|
|
994
|
+
local_dp_rank: int = 0,
|
|
995
|
+
):
|
|
996
|
+
self.addresses = addresses
|
|
997
|
+
vllm_config.parallel_config.data_parallel_rank = dp_rank
|
|
998
|
+
vllm_config.parallel_config.data_parallel_rank_local = \
|
|
999
|
+
local_dp_rank
|
|
1000
|
+
|
|
1001
|
+
# Ray sets CUDA_VISIBLE_DEVICES to empty string,
|
|
1002
|
+
# we clean this up to be able to properly initialize
|
|
1003
|
+
# data parallel groups.
|
|
1004
|
+
del os.environ['CUDA_VISIBLE_DEVICES']
|
|
1005
|
+
|
|
1006
|
+
super().__init__(vllm_config, local_client, "", executor_class,
|
|
1007
|
+
log_stats)
|
|
1008
|
+
|
|
1009
|
+
def _decorate_logs(self):
|
|
1010
|
+
pass
|
|
1011
|
+
|
|
1012
|
+
@contextmanager
|
|
1013
|
+
def _perform_handshakes(self, handshake_address: str, identity: bytes,
|
|
1014
|
+
local_client: bool, vllm_config: VllmConfig,
|
|
1015
|
+
client_handshake_address: Optional[str]):
|
|
1016
|
+
"""
|
|
1017
|
+
For Ray, we don't need to actually perform handshake.
|
|
1018
|
+
All addresses information is known before the actor creation.
|
|
1019
|
+
Therefore, we simply yield these addresses.
|
|
1020
|
+
"""
|
|
1021
|
+
yield self.addresses
|
|
1022
|
+
|
|
1023
|
+
def wait_for_init(self):
|
|
1024
|
+
"""
|
|
1025
|
+
Wait until the engine core is initialized.
|
|
1026
|
+
|
|
1027
|
+
This is just an empty method. When ray.get() on this method
|
|
1028
|
+
(or any other method of the actor) returns, it is guaranteed
|
|
1029
|
+
that actor creation (i.e., __init__) is complete.
|
|
1030
|
+
"""
|
|
1031
|
+
pass
|
|
1032
|
+
|
|
1033
|
+
def run(self):
|
|
1034
|
+
"""
|
|
1035
|
+
Run the engine core busy loop.
|
|
1036
|
+
"""
|
|
1037
|
+
try:
|
|
1038
|
+
self.run_busy_loop()
|
|
1039
|
+
except SystemExit:
|
|
1040
|
+
logger.debug("EngineCore exiting.")
|
|
1041
|
+
raise
|
|
1042
|
+
except Exception:
|
|
1043
|
+
logger.exception("EngineCore encountered a fatal error.")
|
|
1044
|
+
raise
|
|
1045
|
+
finally:
|
|
1046
|
+
self.shutdown()
|