vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +220 -0
- vllm/_bc_linter.py +59 -0
- vllm/_custom_ops.py +2044 -0
- vllm/_ipex_ops.py +393 -0
- vllm/_version.py +34 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +50 -0
- vllm/assets/video.py +145 -0
- vllm/attention/__init__.py +15 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +204 -0
- vllm/attention/backends/utils.py +33 -0
- vllm/attention/layer.py +645 -0
- vllm/attention/layers/__init__.py +0 -0
- vllm/attention/layers/chunked_local_attention.py +93 -0
- vllm/attention/layers/cross_attention.py +162 -0
- vllm/attention/layers/encoder_only_attention.py +86 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
- vllm/attention/ops/common.py +345 -0
- vllm/attention/ops/flashmla.py +192 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/paged_attn.py +262 -0
- vllm/attention/ops/pallas_kv_cache_update.py +124 -0
- vllm/attention/ops/prefix_prefill.py +928 -0
- vllm/attention/ops/rocm_aiter_mla.py +104 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +691 -0
- vllm/attention/ops/triton_flash_attention.py +984 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
- vllm/attention/ops/triton_unified_attention.py +894 -0
- vllm/attention/selector.py +245 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/fa_utils.py +85 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +2723 -0
- vllm/benchmarks/latency.py +170 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +533 -0
- vllm/benchmarks/lib/ready_checker.py +73 -0
- vllm/benchmarks/lib/utils.py +80 -0
- vllm/benchmarks/serve.py +1358 -0
- vllm/benchmarks/throughput.py +696 -0
- vllm/collect_env.py +823 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +189 -0
- vllm/compilation/backends.py +650 -0
- vllm/compilation/base_static_graph.py +56 -0
- vllm/compilation/collective_fusion.py +1188 -0
- vllm/compilation/compiler_interface.py +573 -0
- vllm/compilation/counter.py +47 -0
- vllm/compilation/cuda_graph.py +199 -0
- vllm/compilation/cuda_piecewise_backend.py +117 -0
- vllm/compilation/decorators.py +400 -0
- vllm/compilation/fix_functionalization.py +205 -0
- vllm/compilation/fusion.py +383 -0
- vllm/compilation/fusion_attn.py +295 -0
- vllm/compilation/fx_utils.py +84 -0
- vllm/compilation/inductor_pass.py +136 -0
- vllm/compilation/monitor.py +57 -0
- vllm/compilation/noop_elimination.py +158 -0
- vllm/compilation/pass_manager.py +125 -0
- vllm/compilation/post_cleanup.py +20 -0
- vllm/compilation/sequence_parallelism.py +478 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +156 -0
- vllm/compilation/wrapper.py +136 -0
- vllm/config/__init__.py +814 -0
- vllm/config/cache.py +220 -0
- vllm/config/compilation.py +673 -0
- vllm/config/device.py +74 -0
- vllm/config/kv_events.py +50 -0
- vllm/config/kv_transfer.py +111 -0
- vllm/config/load.py +113 -0
- vllm/config/lora.py +132 -0
- vllm/config/model.py +1912 -0
- vllm/config/multimodal.py +129 -0
- vllm/config/observability.py +99 -0
- vllm/config/parallel.py +524 -0
- vllm/config/pooler.py +97 -0
- vllm/config/scheduler.py +287 -0
- vllm/config/speculative.py +568 -0
- vllm/config/speech_to_text.py +39 -0
- vllm/config/structured_outputs.py +64 -0
- vllm/config/utils.py +145 -0
- vllm/connections.py +186 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +311 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +440 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
- vllm/distributed/device_communicators/base_device_communicator.py +295 -0
- vllm/distributed/device_communicators/cpu_communicator.py +201 -0
- vllm/distributed/device_communicators/cuda_communicator.py +323 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
- vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
- vllm/distributed/device_communicators/pynccl.py +340 -0
- vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
- vllm/distributed/device_communicators/ray_communicator.py +258 -0
- vllm/distributed/device_communicators/shm_broadcast.py +589 -0
- vllm/distributed/device_communicators/shm_object_storage.py +635 -0
- vllm/distributed/device_communicators/symm_mem.py +136 -0
- vllm/distributed/device_communicators/tpu_communicator.py +102 -0
- vllm/distributed/device_communicators/xpu_communicator.py +94 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/eplb_state.py +620 -0
- vllm/distributed/eplb/rebalance_algo.py +239 -0
- vllm/distributed/eplb/rebalance_execute.py +424 -0
- vllm/distributed/kv_events.py +362 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +13 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
- vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
- vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
- vllm/distributed/parallel_state.py +1532 -0
- vllm/distributed/tpu_distributed_utils.py +178 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1778 -0
- vllm/engine/async_llm_engine.py +6 -0
- vllm/engine/llm_engine.py +6 -0
- vllm/engine/metrics.py +577 -0
- vllm/engine/metrics_types.py +84 -0
- vllm/engine/protocol.py +333 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1705 -0
- vllm/entrypoints/cli/__init__.py +12 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +55 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +36 -0
- vllm/entrypoints/cli/main.py +60 -0
- vllm/entrypoints/cli/openai.py +233 -0
- vllm/entrypoints/cli/run_batch.py +67 -0
- vllm/entrypoints/cli/serve.py +232 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +10 -0
- vllm/entrypoints/context.py +481 -0
- vllm/entrypoints/harmony_utils.py +436 -0
- vllm/entrypoints/launcher.py +164 -0
- vllm/entrypoints/llm.py +1629 -0
- vllm/entrypoints/logger.py +79 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1953 -0
- vllm/entrypoints/openai/cli_args.py +288 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +2757 -0
- vllm/entrypoints/openai/run_batch.py +491 -0
- vllm/entrypoints/openai/serving_chat.py +1597 -0
- vllm/entrypoints/openai/serving_classification.py +173 -0
- vllm/entrypoints/openai/serving_completion.py +692 -0
- vllm/entrypoints/openai/serving_embedding.py +631 -0
- vllm/entrypoints/openai/serving_engine.py +992 -0
- vllm/entrypoints/openai/serving_models.py +288 -0
- vllm/entrypoints/openai/serving_pooling.py +276 -0
- vllm/entrypoints/openai/serving_responses.py +1709 -0
- vllm/entrypoints/openai/serving_score.py +479 -0
- vllm/entrypoints/openai/serving_tokenization.py +196 -0
- vllm/entrypoints/openai/serving_transcription.py +136 -0
- vllm/entrypoints/openai/speech_to_text.py +388 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
- vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
- vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
- vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
- vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
- vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
- vllm/entrypoints/renderer.py +395 -0
- vllm/entrypoints/score_utils.py +232 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/tool.py +139 -0
- vllm/entrypoints/tool_server.py +206 -0
- vllm/entrypoints/utils.py +233 -0
- vllm/env_override.py +23 -0
- vllm/envs.py +1590 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +381 -0
- vllm/executor/msgspec_utils.py +35 -0
- vllm/executor/ray_distributed_executor.py +699 -0
- vllm/executor/ray_utils.py +410 -0
- vllm/executor/uniproc_executor.py +176 -0
- vllm/forward_context.py +402 -0
- vllm/inputs/__init__.py +30 -0
- vllm/inputs/data.py +356 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +664 -0
- vllm/logger.py +229 -0
- vllm/logging_utils/__init__.py +10 -0
- vllm/logging_utils/dump_input.py +81 -0
- vllm/logging_utils/formatter.py +79 -0
- vllm/logging_utils/log_time.py +32 -0
- vllm/logits_process.py +119 -0
- vllm/logprobs.py +28 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +34 -0
- vllm/lora/layers/base.py +69 -0
- vllm/lora/layers/base_linear.py +185 -0
- vllm/lora/layers/column_parallel_linear.py +609 -0
- vllm/lora/layers/logits_processor.py +247 -0
- vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
- vllm/lora/layers/replicated_linear.py +60 -0
- vllm/lora/layers/row_parallel_linear.py +196 -0
- vllm/lora/layers/utils.py +65 -0
- vllm/lora/layers/vocal_parallel_embedding.py +174 -0
- vllm/lora/lora_weights.py +199 -0
- vllm/lora/models.py +816 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +7 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
- vllm/lora/ops/triton_ops/utils.py +126 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +144 -0
- vllm/lora/peft_helper.py +127 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +458 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +272 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +391 -0
- vllm/lora/punica_wrapper/punica_xpu.py +276 -0
- vllm/lora/punica_wrapper/utils.py +136 -0
- vllm/lora/request.py +97 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +246 -0
- vllm/lora/worker_manager.py +267 -0
- vllm/model_executor/__init__.py +12 -0
- vllm/model_executor/custom_op.py +194 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +575 -0
- vllm/model_executor/layers/attention_layer_base.py +23 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +225 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
- vllm/model_executor/layers/fla/ops/index.py +39 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
- vllm/model_executor/layers/fla/ops/op.py +39 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
- vllm/model_executor/layers/fla/ops/utils.py +180 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
- vllm/model_executor/layers/fused_moe/__init__.py +89 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
- vllm/model_executor/layers/fused_moe/config.py +804 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
- vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
- vllm/model_executor/layers/fused_moe/layer.py +2195 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
- vllm/model_executor/layers/fused_moe/utils.py +274 -0
- vllm/model_executor/layers/layernorm.py +395 -0
- vllm/model_executor/layers/lightning_attn.py +661 -0
- vllm/model_executor/layers/linear.py +1603 -0
- vllm/model_executor/layers/logits_processor.py +106 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +42 -0
- vllm/model_executor/layers/mamba/linear_attn.py +403 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
- vllm/model_executor/layers/mamba/short_conv.py +253 -0
- vllm/model_executor/layers/mla.py +173 -0
- vllm/model_executor/layers/pooler.py +719 -0
- vllm/model_executor/layers/quantization/__init__.py +157 -0
- vllm/model_executor/layers/quantization/auto_round.py +388 -0
- vllm/model_executor/layers/quantization/awq.py +228 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +170 -0
- vllm/model_executor/layers/quantization/bitblas.py +464 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
- vllm/model_executor/layers/quantization/experts_int8.py +223 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +1098 -0
- vllm/model_executor/layers/quantization/gguf.py +599 -0
- vllm/model_executor/layers/quantization/gptq.py +340 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
- vllm/model_executor/layers/quantization/inc.py +61 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
- vllm/model_executor/layers/quantization/kv_cache.py +143 -0
- vllm/model_executor/layers/quantization/modelopt.py +1596 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
- vllm/model_executor/layers/quantization/mxfp4.py +988 -0
- vllm/model_executor/layers/quantization/petit.py +306 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +432 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/rtn.py +466 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +214 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
- vllm/model_executor/layers/rotary_embedding/base.py +177 -0
- vllm/model_executor/layers/rotary_embedding/common.py +150 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
- vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
- vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
- vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
- vllm/model_executor/layers/utils.py +195 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +138 -0
- vllm/model_executor/model_loader/base_loader.py +52 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
- vllm/model_executor/model_loader/default_loader.py +277 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +155 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
- vllm/model_executor/model_loader/tensorizer.py +738 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
- vllm/model_executor/model_loader/tpu.py +114 -0
- vllm/model_executor/model_loader/utils.py +292 -0
- vllm/model_executor/model_loader/weight_utils.py +990 -0
- vllm/model_executor/models/__init__.py +33 -0
- vllm/model_executor/models/adapters.py +542 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/apertus.py +579 -0
- vllm/model_executor/models/arcee.py +422 -0
- vllm/model_executor/models/arctic.py +558 -0
- vllm/model_executor/models/aria.py +650 -0
- vllm/model_executor/models/aya_vision.py +468 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bailing_moe.py +642 -0
- vllm/model_executor/models/bamba.py +514 -0
- vllm/model_executor/models/bert.py +665 -0
- vllm/model_executor/models/bert_with_rope.py +687 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +712 -0
- vllm/model_executor/models/bloom.py +374 -0
- vllm/model_executor/models/chameleon.py +1139 -0
- vllm/model_executor/models/chatglm.py +476 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/cohere2_vision.py +481 -0
- vllm/model_executor/models/commandr.py +465 -0
- vllm/model_executor/models/config.py +445 -0
- vllm/model_executor/models/dbrx.py +471 -0
- vllm/model_executor/models/deepseek.py +497 -0
- vllm/model_executor/models/deepseek_eagle.py +240 -0
- vllm/model_executor/models/deepseek_mtp.py +289 -0
- vllm/model_executor/models/deepseek_v2.py +1444 -0
- vllm/model_executor/models/deepseek_vl2.py +658 -0
- vllm/model_executor/models/dots1.py +546 -0
- vllm/model_executor/models/dots_ocr.py +873 -0
- vllm/model_executor/models/ernie45.py +43 -0
- vllm/model_executor/models/ernie45_moe.py +607 -0
- vllm/model_executor/models/ernie45_vl.py +1527 -0
- vllm/model_executor/models/ernie45_vl_moe.py +727 -0
- vllm/model_executor/models/ernie_mtp.py +268 -0
- vllm/model_executor/models/exaone.py +550 -0
- vllm/model_executor/models/exaone4.py +533 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +509 -0
- vllm/model_executor/models/falcon_h1.py +674 -0
- vllm/model_executor/models/fuyu.py +399 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +422 -0
- vllm/model_executor/models/gemma3.py +555 -0
- vllm/model_executor/models/gemma3_mm.py +721 -0
- vllm/model_executor/models/gemma3n.py +1113 -0
- vllm/model_executor/models/gemma3n_mm.py +761 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +304 -0
- vllm/model_executor/models/glm4_1v.py +1690 -0
- vllm/model_executor/models/glm4_moe.py +727 -0
- vllm/model_executor/models/glm4_moe_mtp.py +301 -0
- vllm/model_executor/models/glm4v.py +654 -0
- vllm/model_executor/models/gpt2.py +380 -0
- vllm/model_executor/models/gpt_bigcode.py +344 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +330 -0
- vllm/model_executor/models/gpt_oss.py +712 -0
- vllm/model_executor/models/granite.py +489 -0
- vllm/model_executor/models/granite_speech.py +794 -0
- vllm/model_executor/models/granitemoe.py +550 -0
- vllm/model_executor/models/granitemoehybrid.py +614 -0
- vllm/model_executor/models/granitemoeshared.py +332 -0
- vllm/model_executor/models/gritlm.py +262 -0
- vllm/model_executor/models/grok1.py +547 -0
- vllm/model_executor/models/h2ovl.py +536 -0
- vllm/model_executor/models/hunyuan_v1.py +1042 -0
- vllm/model_executor/models/hyperclovax_vision.py +1192 -0
- vllm/model_executor/models/idefics2_vision_model.py +417 -0
- vllm/model_executor/models/idefics3.py +756 -0
- vllm/model_executor/models/interfaces.py +959 -0
- vllm/model_executor/models/interfaces_base.py +192 -0
- vllm/model_executor/models/intern_vit.py +441 -0
- vllm/model_executor/models/internlm2.py +450 -0
- vllm/model_executor/models/internlm2_ve.py +148 -0
- vllm/model_executor/models/interns1.py +838 -0
- vllm/model_executor/models/interns1_vit.py +418 -0
- vllm/model_executor/models/internvl.py +1423 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +591 -0
- vllm/model_executor/models/jina_vl.py +144 -0
- vllm/model_executor/models/keye.py +1680 -0
- vllm/model_executor/models/keye_vl1_5.py +602 -0
- vllm/model_executor/models/kimi_vl.py +618 -0
- vllm/model_executor/models/lfm2.py +548 -0
- vllm/model_executor/models/llama.py +669 -0
- vllm/model_executor/models/llama4.py +746 -0
- vllm/model_executor/models/llama4_eagle.py +239 -0
- vllm/model_executor/models/llama_eagle.py +179 -0
- vllm/model_executor/models/llama_eagle3.py +296 -0
- vllm/model_executor/models/llava.py +870 -0
- vllm/model_executor/models/llava_next.py +571 -0
- vllm/model_executor/models/llava_next_video.py +476 -0
- vllm/model_executor/models/llava_onevision.py +942 -0
- vllm/model_executor/models/longcat_flash.py +715 -0
- vllm/model_executor/models/longcat_flash_mtp.py +352 -0
- vllm/model_executor/models/mamba.py +275 -0
- vllm/model_executor/models/mamba2.py +291 -0
- vllm/model_executor/models/medusa.py +169 -0
- vllm/model_executor/models/midashenglm.py +792 -0
- vllm/model_executor/models/mimo.py +188 -0
- vllm/model_executor/models/mimo_mtp.py +280 -0
- vllm/model_executor/models/minicpm.py +631 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +389 -0
- vllm/model_executor/models/minicpmo.py +770 -0
- vllm/model_executor/models/minicpmv.py +1784 -0
- vllm/model_executor/models/minimax_text_01.py +986 -0
- vllm/model_executor/models/minimax_vl_01.py +426 -0
- vllm/model_executor/models/mistral3.py +628 -0
- vllm/model_executor/models/mixtral.py +606 -0
- vllm/model_executor/models/mllama4.py +1076 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +374 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1567 -0
- vllm/model_executor/models/moonvit.py +673 -0
- vllm/model_executor/models/motif.py +345 -0
- vllm/model_executor/models/mpt.py +329 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
- vllm/model_executor/models/nemotron.py +507 -0
- vllm/model_executor/models/nemotron_h.py +565 -0
- vllm/model_executor/models/nemotron_nas.py +481 -0
- vllm/model_executor/models/nemotron_vl.py +652 -0
- vllm/model_executor/models/nvlm_d.py +203 -0
- vllm/model_executor/models/olmo.py +404 -0
- vllm/model_executor/models/olmo2.py +439 -0
- vllm/model_executor/models/olmoe.py +483 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +348 -0
- vllm/model_executor/models/ovis.py +559 -0
- vllm/model_executor/models/ovis2_5.py +642 -0
- vllm/model_executor/models/paligemma.py +411 -0
- vllm/model_executor/models/persimmon.py +343 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3v.py +698 -0
- vllm/model_executor/models/phi4_multimodal.py +1475 -0
- vllm/model_executor/models/phi4mm.py +1279 -0
- vllm/model_executor/models/phi4mm_audio.py +1254 -0
- vllm/model_executor/models/phi4mm_utils.py +1875 -0
- vllm/model_executor/models/phimoe.py +679 -0
- vllm/model_executor/models/pixtral.py +1345 -0
- vllm/model_executor/models/plamo2.py +978 -0
- vllm/model_executor/models/qwen.py +361 -0
- vllm/model_executor/models/qwen2.py +523 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
- vllm/model_executor/models/qwen2_5_vl.py +1481 -0
- vllm/model_executor/models/qwen2_audio.py +489 -0
- vllm/model_executor/models/qwen2_moe.py +558 -0
- vllm/model_executor/models/qwen2_rm.py +122 -0
- vllm/model_executor/models/qwen2_vl.py +1670 -0
- vllm/model_executor/models/qwen3.py +341 -0
- vllm/model_executor/models/qwen3_moe.py +692 -0
- vllm/model_executor/models/qwen3_next.py +1266 -0
- vllm/model_executor/models/qwen3_next_mtp.py +281 -0
- vllm/model_executor/models/qwen3_vl.py +1613 -0
- vllm/model_executor/models/qwen3_vl_moe.py +358 -0
- vllm/model_executor/models/qwen_vl.py +795 -0
- vllm/model_executor/models/radio.py +576 -0
- vllm/model_executor/models/registry.py +990 -0
- vllm/model_executor/models/roberta.py +252 -0
- vllm/model_executor/models/rvl.py +103 -0
- vllm/model_executor/models/seed_oss.py +485 -0
- vllm/model_executor/models/siglip.py +540 -0
- vllm/model_executor/models/siglip2navit.py +689 -0
- vllm/model_executor/models/skyworkr1v.py +911 -0
- vllm/model_executor/models/smolvlm.py +44 -0
- vllm/model_executor/models/solar.py +504 -0
- vllm/model_executor/models/stablelm.py +341 -0
- vllm/model_executor/models/starcoder2.py +354 -0
- vllm/model_executor/models/step3_text.py +510 -0
- vllm/model_executor/models/step3_vl.py +1072 -0
- vllm/model_executor/models/swin.py +475 -0
- vllm/model_executor/models/tarsier.py +639 -0
- vllm/model_executor/models/telechat2.py +151 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/terratorch.py +294 -0
- vllm/model_executor/models/transformers.py +948 -0
- vllm/model_executor/models/ultravox.py +654 -0
- vllm/model_executor/models/utils.py +808 -0
- vllm/model_executor/models/vision.py +404 -0
- vllm/model_executor/models/voxtral.py +786 -0
- vllm/model_executor/models/whisper.py +963 -0
- vllm/model_executor/models/zamba2.py +960 -0
- vllm/model_executor/parameter.py +620 -0
- vllm/model_executor/utils.py +86 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
- vllm/model_executor/warmup/kernel_warmup.py +83 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +116 -0
- vllm/multimodal/base.py +27 -0
- vllm/multimodal/cache.py +697 -0
- vllm/multimodal/evs.py +273 -0
- vllm/multimodal/hasher.py +102 -0
- vllm/multimodal/image.py +130 -0
- vllm/multimodal/inputs.py +987 -0
- vllm/multimodal/parse.py +511 -0
- vllm/multimodal/processing.py +2148 -0
- vllm/multimodal/profiling.py +284 -0
- vllm/multimodal/registry.py +345 -0
- vllm/multimodal/utils.py +503 -0
- vllm/multimodal/video.py +319 -0
- vllm/outputs.py +324 -0
- vllm/platforms/__init__.py +263 -0
- vllm/platforms/cpu.py +340 -0
- vllm/platforms/cuda.py +668 -0
- vllm/platforms/interface.py +620 -0
- vllm/platforms/rocm.py +497 -0
- vllm/platforms/tpu.py +233 -0
- vllm/platforms/xpu.py +243 -0
- vllm/plugins/__init__.py +72 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +67 -0
- vllm/plugins/lora_resolvers/README.md +16 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +191 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +22 -0
- vllm/ray/ray_env.py +72 -0
- vllm/reasoning/__init__.py +29 -0
- vllm/reasoning/abs_reasoning_parsers.py +202 -0
- vllm/reasoning/basic_parsers.py +156 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
- vllm/reasoning/gptoss_reasoning_parser.py +87 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
- vllm/reasoning/mistral_reasoning_parser.py +56 -0
- vllm/reasoning/qwen3_reasoning_parser.py +72 -0
- vllm/reasoning/seedoss_reasoning_parser.py +28 -0
- vllm/reasoning/step3_reasoning_parser.py +109 -0
- vllm/sampling_params.py +593 -0
- vllm/scalar_type.py +349 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +103 -0
- vllm/tasks.py +11 -0
- vllm/test_utils.py +129 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +136 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +70 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1102 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +63 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/deepseek_v3.py +101 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/dotsocr.py +69 -0
- vllm/transformers_utils/configs/eagle.py +84 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/jais.py +237 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/midashenglm.py +101 -0
- vllm/transformers_utils/configs/mistral.py +165 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +259 -0
- vllm/transformers_utils/configs/nemotron_vl.py +56 -0
- vllm/transformers_utils/configs/olmo3.py +80 -0
- vllm/transformers_utils/configs/ovis.py +176 -0
- vllm/transformers_utils/configs/qwen3_next.py +275 -0
- vllm/transformers_utils/configs/radio.py +91 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +32 -0
- vllm/transformers_utils/configs/speculators/base.py +111 -0
- vllm/transformers_utils/configs/step3_vl.py +123 -0
- vllm/transformers_utils/configs/ultravox.py +116 -0
- vllm/transformers_utils/detokenizer_utils.py +199 -0
- vllm/transformers_utils/dynamic_module.py +60 -0
- vllm/transformers_utils/processor.py +299 -0
- vllm/transformers_utils/processors/__init__.py +16 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/processors/ovis2_5.py +458 -0
- vllm/transformers_utils/runai_utils.py +104 -0
- vllm/transformers_utils/s3_utils.py +93 -0
- vllm/transformers_utils/tokenizer.py +292 -0
- vllm/transformers_utils/tokenizer_base.py +154 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +521 -0
- vllm/transformers_utils/utils.py +108 -0
- vllm/triton_utils/__init__.py +16 -0
- vllm/triton_utils/importing.py +96 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +259 -0
- vllm/utils/__init__.py +3566 -0
- vllm/utils/deep_gemm.py +319 -0
- vllm/utils/flashinfer.py +443 -0
- vllm/utils/jsontree.py +178 -0
- vllm/utils/tensor_schema.py +235 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +919 -0
- vllm/v1/attention/backends/flash_attn.py +795 -0
- vllm/v1/attention/backends/flashinfer.py +1181 -0
- vllm/v1/attention/backends/flex_attention.py +861 -0
- vllm/v1/attention/backends/gdn_attn.py +332 -0
- vllm/v1/attention/backends/linear_attn.py +67 -0
- vllm/v1/attention/backends/mamba1_attn.py +81 -0
- vllm/v1/attention/backends/mamba2_attn.py +232 -0
- vllm/v1/attention/backends/mamba_attn.py +52 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +1783 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
- vllm/v1/attention/backends/mla/flashmla.py +203 -0
- vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
- vllm/v1/attention/backends/mla/indexer.py +342 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
- vllm/v1/attention/backends/mla/triton_mla.py +177 -0
- vllm/v1/attention/backends/pallas.py +409 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
- vllm/v1/attention/backends/rocm_attn.py +426 -0
- vllm/v1/attention/backends/short_conv_attn.py +94 -0
- vllm/v1/attention/backends/tree_attn.py +451 -0
- vllm/v1/attention/backends/triton_attn.py +361 -0
- vllm/v1/attention/backends/utils.py +990 -0
- vllm/v1/attention/backends/xformers.py +438 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +416 -0
- vllm/v1/core/encoder_cache_manager.py +333 -0
- vllm/v1/core/kv_cache_coordinator.py +440 -0
- vllm/v1/core/kv_cache_manager.py +399 -0
- vllm/v1/core/kv_cache_utils.py +1291 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +47 -0
- vllm/v1/core/sched/interface.py +158 -0
- vllm/v1/core/sched/output.py +166 -0
- vllm/v1/core/sched/request_queue.py +224 -0
- vllm/v1/core/sched/scheduler.py +1296 -0
- vllm/v1/core/sched/utils.py +69 -0
- vllm/v1/core/single_type_kv_cache_manager.py +671 -0
- vllm/v1/cudagraph_dispatcher.py +125 -0
- vllm/v1/engine/__init__.py +203 -0
- vllm/v1/engine/async_llm.py +742 -0
- vllm/v1/engine/coordinator.py +357 -0
- vllm/v1/engine/core.py +1235 -0
- vllm/v1/engine/core_client.py +1334 -0
- vllm/v1/engine/detokenizer.py +349 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +370 -0
- vllm/v1/engine/logprobs.py +201 -0
- vllm/v1/engine/output_processor.py +576 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +545 -0
- vllm/v1/engine/utils.py +860 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +137 -0
- vllm/v1/executor/multiproc_executor.py +726 -0
- vllm/v1/executor/ray_distributed_executor.py +108 -0
- vllm/v1/executor/utils.py +23 -0
- vllm/v1/kv_cache_interface.py +375 -0
- vllm/v1/kv_offload/__init__.py +0 -0
- vllm/v1/kv_offload/abstract.py +165 -0
- vllm/v1/kv_offload/backend.py +96 -0
- vllm/v1/kv_offload/backends/__init__.py +0 -0
- vllm/v1/kv_offload/backends/cpu.py +61 -0
- vllm/v1/kv_offload/cpu.py +75 -0
- vllm/v1/kv_offload/factory.py +56 -0
- vllm/v1/kv_offload/lru_manager.py +132 -0
- vllm/v1/kv_offload/mediums.py +39 -0
- vllm/v1/kv_offload/spec.py +61 -0
- vllm/v1/kv_offload/worker/__init__.py +0 -0
- vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
- vllm/v1/kv_offload/worker/worker.py +142 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +741 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +152 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +257 -0
- vllm/v1/outputs.py +161 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +77 -0
- vllm/v1/request.py +241 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +294 -0
- vllm/v1/sample/logits_processor/builtin.py +275 -0
- vllm/v1/sample/logits_processor/interface.py +97 -0
- vllm/v1/sample/logits_processor/state.py +161 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/logprobs.py +26 -0
- vllm/v1/sample/ops/penalties.py +43 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
- vllm/v1/sample/rejection_sampler.py +623 -0
- vllm/v1/sample/sampler.py +285 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +213 -0
- vllm/v1/serial_utils.py +423 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +1011 -0
- vllm/v1/spec_decode/medusa.py +66 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +211 -0
- vllm/v1/spec_decode/ngram_proposer.py +276 -0
- vllm/v1/spec_decode/utils.py +14 -0
- vllm/v1/structured_output/__init__.py +295 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
- vllm/v1/structured_output/backend_outlines.py +320 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +327 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +454 -0
- vllm/v1/utils.py +396 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +210 -0
- vllm/v1/worker/cpu_model_runner.py +175 -0
- vllm/v1/worker/cpu_worker.py +156 -0
- vllm/v1/worker/gpu_input_batch.py +863 -0
- vllm/v1/worker/gpu_model_runner.py +4160 -0
- vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
- vllm/v1/worker/gpu_worker.py +710 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
- vllm/v1/worker/lora_model_runner_mixin.py +183 -0
- vllm/v1/worker/tpu_input_batch.py +587 -0
- vllm/v1/worker/tpu_model_runner.py +1946 -0
- vllm/v1/worker/tpu_worker.py +346 -0
- vllm/v1/worker/ubatch_splitting.py +192 -0
- vllm/v1/worker/ubatch_utils.py +27 -0
- vllm/v1/worker/ubatching.py +224 -0
- vllm/v1/worker/utils.py +344 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/v1/worker/xpu_model_runner.py +57 -0
- vllm/v1/worker/xpu_worker.py +179 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/worker_base.py +279 -0
- vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
- vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
- vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
- vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
vllm/v1/engine/core.py
ADDED
|
@@ -0,0 +1,1235 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
import gc
|
|
4
|
+
import os
|
|
5
|
+
import queue
|
|
6
|
+
import signal
|
|
7
|
+
import threading
|
|
8
|
+
import time
|
|
9
|
+
from collections import deque
|
|
10
|
+
from collections.abc import Generator
|
|
11
|
+
from concurrent.futures import Future
|
|
12
|
+
from contextlib import ExitStack, contextmanager
|
|
13
|
+
from inspect import isclass, signature
|
|
14
|
+
from logging import DEBUG
|
|
15
|
+
from typing import Any, Callable, Optional, TypeVar, Union
|
|
16
|
+
|
|
17
|
+
import msgspec
|
|
18
|
+
import zmq
|
|
19
|
+
|
|
20
|
+
from vllm.config import ParallelConfig, VllmConfig
|
|
21
|
+
from vllm.distributed import stateless_destroy_torch_distributed_process_group
|
|
22
|
+
from vllm.logger import init_logger
|
|
23
|
+
from vllm.logging_utils.dump_input import dump_engine_exception
|
|
24
|
+
from vllm.lora.request import LoRARequest
|
|
25
|
+
from vllm.multimodal import MULTIMODAL_REGISTRY
|
|
26
|
+
from vllm.multimodal.cache import engine_receiver_cache_from_config
|
|
27
|
+
from vllm.tasks import POOLING_TASKS, SupportedTask
|
|
28
|
+
from vllm.transformers_utils.config import (
|
|
29
|
+
maybe_register_config_serialize_by_value)
|
|
30
|
+
from vllm.utils import (decorate_logs, get_hash_fn_by_name, make_zmq_socket,
|
|
31
|
+
resolve_obj_by_qualname, set_process_title)
|
|
32
|
+
from vllm.v1.core.kv_cache_utils import (BlockHash,
|
|
33
|
+
generate_scheduler_kv_cache_config,
|
|
34
|
+
get_kv_cache_configs,
|
|
35
|
+
get_request_block_hasher,
|
|
36
|
+
init_none_hash)
|
|
37
|
+
from vllm.v1.core.sched.interface import SchedulerInterface
|
|
38
|
+
from vllm.v1.core.sched.output import SchedulerOutput
|
|
39
|
+
from vllm.v1.core.sched.scheduler import Scheduler as V1Scheduler
|
|
40
|
+
from vllm.v1.engine import (EngineCoreOutputs, EngineCoreRequest,
|
|
41
|
+
EngineCoreRequestType,
|
|
42
|
+
ReconfigureDistributedRequest, ReconfigureRankType,
|
|
43
|
+
UtilityOutput, UtilityResult)
|
|
44
|
+
from vllm.v1.engine.utils import (EngineHandshakeMetadata, EngineZmqAddresses,
|
|
45
|
+
get_device_indices)
|
|
46
|
+
from vllm.v1.executor.abstract import Executor
|
|
47
|
+
from vllm.v1.kv_cache_interface import KVCacheConfig
|
|
48
|
+
from vllm.v1.metrics.stats import SchedulerStats
|
|
49
|
+
from vllm.v1.outputs import ModelRunnerOutput
|
|
50
|
+
from vllm.v1.request import Request, RequestStatus
|
|
51
|
+
from vllm.v1.serial_utils import MsgpackDecoder, MsgpackEncoder
|
|
52
|
+
from vllm.v1.structured_output import StructuredOutputManager
|
|
53
|
+
from vllm.version import __version__ as VLLM_VERSION
|
|
54
|
+
|
|
55
|
+
logger = init_logger(__name__)
|
|
56
|
+
|
|
57
|
+
POLLING_TIMEOUT_S = 2.5
|
|
58
|
+
HANDSHAKE_TIMEOUT_MINS = 5
|
|
59
|
+
|
|
60
|
+
_R = TypeVar('_R') # Return type for collective_rpc
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class EngineCore:
|
|
64
|
+
"""Inner loop of vLLM's Engine."""
|
|
65
|
+
|
|
66
|
+
def __init__(self,
|
|
67
|
+
vllm_config: VllmConfig,
|
|
68
|
+
executor_class: type[Executor],
|
|
69
|
+
log_stats: bool,
|
|
70
|
+
executor_fail_callback: Optional[Callable] = None):
|
|
71
|
+
|
|
72
|
+
# plugins need to be loaded at the engine/scheduler level too
|
|
73
|
+
from vllm.plugins import load_general_plugins
|
|
74
|
+
load_general_plugins()
|
|
75
|
+
|
|
76
|
+
self.vllm_config = vllm_config
|
|
77
|
+
logger.info("Initializing a V1 LLM engine (v%s) with config: %s",
|
|
78
|
+
VLLM_VERSION, vllm_config)
|
|
79
|
+
|
|
80
|
+
self.log_stats = log_stats
|
|
81
|
+
|
|
82
|
+
# Setup Model.
|
|
83
|
+
self.model_executor = executor_class(vllm_config)
|
|
84
|
+
if executor_fail_callback is not None:
|
|
85
|
+
self.model_executor.register_failure_callback(
|
|
86
|
+
executor_fail_callback)
|
|
87
|
+
|
|
88
|
+
self.available_gpu_memory_for_kv_cache = -1
|
|
89
|
+
|
|
90
|
+
# Setup KV Caches and update CacheConfig after profiling.
|
|
91
|
+
num_gpu_blocks, num_cpu_blocks, kv_cache_config = \
|
|
92
|
+
self._initialize_kv_caches(vllm_config)
|
|
93
|
+
|
|
94
|
+
vllm_config.cache_config.num_gpu_blocks = num_gpu_blocks
|
|
95
|
+
vllm_config.cache_config.num_cpu_blocks = num_cpu_blocks
|
|
96
|
+
self.collective_rpc("initialize_cache",
|
|
97
|
+
args=(num_gpu_blocks, num_cpu_blocks))
|
|
98
|
+
|
|
99
|
+
self.structured_output_manager = StructuredOutputManager(vllm_config)
|
|
100
|
+
|
|
101
|
+
# Setup scheduler.
|
|
102
|
+
if isinstance(vllm_config.scheduler_config.scheduler_cls, str):
|
|
103
|
+
Scheduler = resolve_obj_by_qualname(
|
|
104
|
+
vllm_config.scheduler_config.scheduler_cls)
|
|
105
|
+
else:
|
|
106
|
+
Scheduler = vllm_config.scheduler_config.scheduler_cls
|
|
107
|
+
|
|
108
|
+
# This warning can be removed once the V1 Scheduler interface is
|
|
109
|
+
# finalized and we can maintain support for scheduler classes that
|
|
110
|
+
# implement it
|
|
111
|
+
if Scheduler is not V1Scheduler:
|
|
112
|
+
logger.warning(
|
|
113
|
+
"Using configured V1 scheduler class %s. "
|
|
114
|
+
"This scheduler interface is not public and "
|
|
115
|
+
"compatibility may not be maintained.",
|
|
116
|
+
vllm_config.scheduler_config.scheduler_cls)
|
|
117
|
+
|
|
118
|
+
if len(kv_cache_config.kv_cache_groups) == 0:
|
|
119
|
+
# Encoder models without KV cache don't support
|
|
120
|
+
# chunked prefill. But do SSM models?
|
|
121
|
+
logger.info("Disabling chunked prefill for model without KVCache")
|
|
122
|
+
vllm_config.scheduler_config.chunked_prefill_enabled = False
|
|
123
|
+
|
|
124
|
+
self.scheduler: SchedulerInterface = Scheduler(
|
|
125
|
+
vllm_config=vllm_config,
|
|
126
|
+
kv_cache_config=kv_cache_config,
|
|
127
|
+
structured_output_manager=self.structured_output_manager,
|
|
128
|
+
include_finished_set=vllm_config.parallel_config.data_parallel_size
|
|
129
|
+
> 1,
|
|
130
|
+
log_stats=self.log_stats,
|
|
131
|
+
)
|
|
132
|
+
self.use_spec_decode = vllm_config.speculative_config is not None
|
|
133
|
+
if self.scheduler.connector is not None: # type: ignore
|
|
134
|
+
self.model_executor.init_kv_output_aggregator(
|
|
135
|
+
self.scheduler.connector.get_finished_count()) # type: ignore
|
|
136
|
+
|
|
137
|
+
self.mm_registry = mm_registry = MULTIMODAL_REGISTRY
|
|
138
|
+
self.mm_receiver_cache = engine_receiver_cache_from_config(
|
|
139
|
+
vllm_config, mm_registry)
|
|
140
|
+
|
|
141
|
+
# Setup batch queue for pipeline parallelism.
|
|
142
|
+
# Batch queue for scheduled batches. This enables us to asynchronously
|
|
143
|
+
# schedule and execute batches, and is required by pipeline parallelism
|
|
144
|
+
# to eliminate pipeline bubbles.
|
|
145
|
+
self.batch_queue_size = self.model_executor.max_concurrent_batches
|
|
146
|
+
self.batch_queue: Optional[deque[tuple[Future[ModelRunnerOutput],
|
|
147
|
+
SchedulerOutput]]] = None
|
|
148
|
+
if self.batch_queue_size > 1:
|
|
149
|
+
logger.info("Batch queue is enabled with size %d",
|
|
150
|
+
self.batch_queue_size)
|
|
151
|
+
self.batch_queue = deque(maxlen=self.batch_queue_size)
|
|
152
|
+
|
|
153
|
+
self.request_block_hasher: Optional[Callable[[Request],
|
|
154
|
+
list[BlockHash]]] = None
|
|
155
|
+
if (self.vllm_config.cache_config.enable_prefix_caching
|
|
156
|
+
or self.scheduler.get_kv_connector() is not None):
|
|
157
|
+
|
|
158
|
+
block_size = vllm_config.cache_config.block_size
|
|
159
|
+
caching_hash_fn = get_hash_fn_by_name(
|
|
160
|
+
vllm_config.cache_config.prefix_caching_hash_algo)
|
|
161
|
+
init_none_hash(caching_hash_fn)
|
|
162
|
+
|
|
163
|
+
self.request_block_hasher = get_request_block_hasher(
|
|
164
|
+
block_size, caching_hash_fn)
|
|
165
|
+
|
|
166
|
+
self.step_fn = (self.step if self.batch_queue is None else
|
|
167
|
+
self.step_with_batch_queue)
|
|
168
|
+
|
|
169
|
+
def _initialize_kv_caches(
|
|
170
|
+
self, vllm_config: VllmConfig) -> tuple[int, int, KVCacheConfig]:
|
|
171
|
+
start = time.time()
|
|
172
|
+
|
|
173
|
+
# Get all kv cache needed by the model
|
|
174
|
+
kv_cache_specs = self.model_executor.get_kv_cache_specs()
|
|
175
|
+
|
|
176
|
+
has_kv_cache = any(kv_cache_spec for kv_cache_spec in kv_cache_specs)
|
|
177
|
+
if has_kv_cache:
|
|
178
|
+
if os.environ.get("VLLM_ELASTIC_EP_SCALE_UP_LAUNCH") == "1":
|
|
179
|
+
dp_group = getattr(self, "dp_group", None)
|
|
180
|
+
assert dp_group is not None
|
|
181
|
+
self.available_gpu_memory_for_kv_cache = \
|
|
182
|
+
ParallelConfig.sync_kv_cache_memory_size(dp_group, -1)
|
|
183
|
+
available_gpu_memory = [
|
|
184
|
+
self.available_gpu_memory_for_kv_cache
|
|
185
|
+
] * len(kv_cache_specs)
|
|
186
|
+
else:
|
|
187
|
+
# Profiles the peak memory usage of the model to determine how
|
|
188
|
+
# much memory can be allocated for kv cache.
|
|
189
|
+
available_gpu_memory = (
|
|
190
|
+
self.model_executor.determine_available_memory())
|
|
191
|
+
self.available_gpu_memory_for_kv_cache = \
|
|
192
|
+
available_gpu_memory[0]
|
|
193
|
+
else:
|
|
194
|
+
# Attention free models don't need memory for kv cache
|
|
195
|
+
available_gpu_memory = [0] * len(kv_cache_specs)
|
|
196
|
+
|
|
197
|
+
assert len(kv_cache_specs) == len(available_gpu_memory)
|
|
198
|
+
|
|
199
|
+
kv_cache_configs = get_kv_cache_configs(vllm_config, kv_cache_specs,
|
|
200
|
+
available_gpu_memory)
|
|
201
|
+
scheduler_kv_cache_config = generate_scheduler_kv_cache_config(
|
|
202
|
+
kv_cache_configs)
|
|
203
|
+
num_gpu_blocks = scheduler_kv_cache_config.num_blocks
|
|
204
|
+
num_cpu_blocks = 0
|
|
205
|
+
|
|
206
|
+
# Initialize kv cache and warmup the execution
|
|
207
|
+
self.model_executor.initialize_from_config(kv_cache_configs)
|
|
208
|
+
|
|
209
|
+
elapsed = time.time() - start
|
|
210
|
+
logger.info(("init engine (profile, create kv cache, "
|
|
211
|
+
"warmup model) took %.2f seconds"), elapsed)
|
|
212
|
+
return num_gpu_blocks, num_cpu_blocks, scheduler_kv_cache_config
|
|
213
|
+
|
|
214
|
+
def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
|
|
215
|
+
return self.model_executor.supported_tasks
|
|
216
|
+
|
|
217
|
+
def add_request(self, request: Request, request_wave: int = 0):
|
|
218
|
+
"""Add request to the scheduler.
|
|
219
|
+
|
|
220
|
+
`request_wave`: indicate which wave of requests this is expected to
|
|
221
|
+
belong to in DP case
|
|
222
|
+
"""
|
|
223
|
+
# Validate the request_id type.
|
|
224
|
+
if not isinstance(request.request_id, str):
|
|
225
|
+
raise TypeError(
|
|
226
|
+
f"request_id must be a string, got {type(request.request_id)}")
|
|
227
|
+
|
|
228
|
+
if pooling_params := request.pooling_params:
|
|
229
|
+
supported_pooling_tasks = [
|
|
230
|
+
task for task in self.get_supported_tasks()
|
|
231
|
+
if task in POOLING_TASKS
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
if pooling_params.task not in supported_pooling_tasks:
|
|
235
|
+
raise ValueError(f"Unsupported task: {pooling_params.task!r} "
|
|
236
|
+
f"Supported tasks: {supported_pooling_tasks}")
|
|
237
|
+
|
|
238
|
+
if request.kv_transfer_params is not None and (
|
|
239
|
+
not self.scheduler.get_kv_connector()):
|
|
240
|
+
logger.warning("Got kv_transfer_params, but no KVConnector found. "
|
|
241
|
+
"Disabling KVTransfer for this request.")
|
|
242
|
+
|
|
243
|
+
self.scheduler.add_request(request)
|
|
244
|
+
|
|
245
|
+
def abort_requests(self, request_ids: list[str]):
|
|
246
|
+
"""Abort requests from the scheduler."""
|
|
247
|
+
|
|
248
|
+
# TODO: The scheduler doesn't really need to know the
|
|
249
|
+
# specific finish reason, TBD whether we propagate that
|
|
250
|
+
# (i.e. client-aborted vs stop criteria met).
|
|
251
|
+
self.scheduler.finish_requests(request_ids,
|
|
252
|
+
RequestStatus.FINISHED_ABORTED)
|
|
253
|
+
|
|
254
|
+
def execute_model_with_error_logging(
|
|
255
|
+
self,
|
|
256
|
+
model_fn: Callable[[SchedulerOutput], ModelRunnerOutput],
|
|
257
|
+
scheduler_output: SchedulerOutput,
|
|
258
|
+
) -> ModelRunnerOutput:
|
|
259
|
+
"""Execute the model and log detailed info on failure."""
|
|
260
|
+
try:
|
|
261
|
+
return model_fn(scheduler_output)
|
|
262
|
+
except Exception as err:
|
|
263
|
+
# We do not want to catch BaseException here since we're only
|
|
264
|
+
# interested in dumping info when the exception is due to an
|
|
265
|
+
# error from execute_model itself.
|
|
266
|
+
|
|
267
|
+
# NOTE: This method is exception-free
|
|
268
|
+
dump_engine_exception(self.vllm_config, scheduler_output,
|
|
269
|
+
self.scheduler.make_stats())
|
|
270
|
+
raise err
|
|
271
|
+
|
|
272
|
+
def step(self) -> tuple[dict[int, EngineCoreOutputs], bool]:
|
|
273
|
+
"""Schedule, execute, and make output.
|
|
274
|
+
|
|
275
|
+
Returns tuple of outputs and a flag indicating whether the model
|
|
276
|
+
was executed.
|
|
277
|
+
"""
|
|
278
|
+
|
|
279
|
+
# Check for any requests remaining in the scheduler - unfinished,
|
|
280
|
+
# or finished and not yet removed from the batch.
|
|
281
|
+
if not self.scheduler.has_requests():
|
|
282
|
+
return {}, False
|
|
283
|
+
scheduler_output = self.scheduler.schedule()
|
|
284
|
+
model_output = self.execute_model_with_error_logging(
|
|
285
|
+
self.model_executor.execute_model, # type: ignore
|
|
286
|
+
scheduler_output)
|
|
287
|
+
engine_core_outputs = self.scheduler.update_from_output(
|
|
288
|
+
scheduler_output, model_output) # type: ignore
|
|
289
|
+
|
|
290
|
+
return (engine_core_outputs,
|
|
291
|
+
scheduler_output.total_num_scheduled_tokens > 0)
|
|
292
|
+
|
|
293
|
+
def post_step(self, model_executed: bool) -> None:
|
|
294
|
+
if self.use_spec_decode and model_executed:
|
|
295
|
+
# Take the draft token ids.
|
|
296
|
+
draft_token_ids = self.model_executor.take_draft_token_ids()
|
|
297
|
+
if draft_token_ids is not None:
|
|
298
|
+
self.scheduler.update_draft_token_ids(draft_token_ids)
|
|
299
|
+
|
|
300
|
+
def step_with_batch_queue(
|
|
301
|
+
self) -> tuple[Optional[dict[int, EngineCoreOutputs]], bool]:
|
|
302
|
+
"""Schedule and execute batches with the batch queue.
|
|
303
|
+
Note that if nothing to output in this step, None is returned.
|
|
304
|
+
|
|
305
|
+
The execution flow is as follows:
|
|
306
|
+
1. Try to schedule a new batch if the batch queue is not full.
|
|
307
|
+
If a new batch is scheduled, directly return an empty engine core
|
|
308
|
+
output. In other words, fulfilling the batch queue has a higher priority
|
|
309
|
+
than getting model outputs.
|
|
310
|
+
2. If there is no new scheduled batch, meaning that the batch queue
|
|
311
|
+
is full or no other requests can be scheduled, we block until the first
|
|
312
|
+
batch in the job queue is finished.
|
|
313
|
+
3. Update the scheduler from the output.
|
|
314
|
+
"""
|
|
315
|
+
batch_queue = self.batch_queue
|
|
316
|
+
assert batch_queue is not None
|
|
317
|
+
|
|
318
|
+
# Try to schedule a new batch if the batch queue is not full, but
|
|
319
|
+
# the scheduler may return an empty batch if all requests are scheduled.
|
|
320
|
+
# Note that this is not blocking.
|
|
321
|
+
assert len(batch_queue) < self.batch_queue_size
|
|
322
|
+
|
|
323
|
+
model_executed = False
|
|
324
|
+
if self.scheduler.has_requests():
|
|
325
|
+
scheduler_output = self.scheduler.schedule()
|
|
326
|
+
future = self.model_executor.execute_model(scheduler_output,
|
|
327
|
+
non_block=True)
|
|
328
|
+
batch_queue.appendleft(
|
|
329
|
+
(future, scheduler_output)) # type: ignore[arg-type]
|
|
330
|
+
|
|
331
|
+
model_executed = scheduler_output.total_num_scheduled_tokens > 0
|
|
332
|
+
if model_executed and len(batch_queue) < self.batch_queue_size \
|
|
333
|
+
and not batch_queue[-1][0].done():
|
|
334
|
+
# Don't block on next worker response unless the queue is full
|
|
335
|
+
# or there are no more requests to schedule.
|
|
336
|
+
return None, True
|
|
337
|
+
|
|
338
|
+
elif not batch_queue:
|
|
339
|
+
# Queue is empty. We should not reach here since this method should
|
|
340
|
+
# only be called when the scheduler contains requests or the queue
|
|
341
|
+
# is non-empty.
|
|
342
|
+
return None, False
|
|
343
|
+
|
|
344
|
+
# Block until the next result is available.
|
|
345
|
+
future, scheduler_output = batch_queue.pop()
|
|
346
|
+
model_output = self.execute_model_with_error_logging(
|
|
347
|
+
lambda _: future.result(), scheduler_output)
|
|
348
|
+
|
|
349
|
+
engine_core_outputs = self.scheduler.update_from_output(
|
|
350
|
+
scheduler_output, model_output)
|
|
351
|
+
|
|
352
|
+
return engine_core_outputs, model_executed
|
|
353
|
+
|
|
354
|
+
def shutdown(self):
|
|
355
|
+
self.structured_output_manager.clear_backend()
|
|
356
|
+
if self.model_executor:
|
|
357
|
+
self.model_executor.shutdown()
|
|
358
|
+
if self.scheduler:
|
|
359
|
+
self.scheduler.shutdown()
|
|
360
|
+
|
|
361
|
+
def profile(self, is_start: bool = True):
|
|
362
|
+
self.model_executor.profile(is_start)
|
|
363
|
+
|
|
364
|
+
def reset_mm_cache(self):
|
|
365
|
+
# NOTE: Since this is mainly for debugging, we don't attempt to
|
|
366
|
+
# re-sync the internal caches (P0 processor, P0 mirror, P1 mirror)
|
|
367
|
+
if self.scheduler.has_unfinished_requests():
|
|
368
|
+
logger.warning("Resetting the multi-modal cache when requests are "
|
|
369
|
+
"in progress may lead to desynced internal caches.")
|
|
370
|
+
|
|
371
|
+
if self.mm_receiver_cache is not None:
|
|
372
|
+
self.mm_receiver_cache.clear_cache()
|
|
373
|
+
|
|
374
|
+
def reset_prefix_cache(self):
|
|
375
|
+
self.scheduler.reset_prefix_cache()
|
|
376
|
+
|
|
377
|
+
def sleep(self, level: int = 1):
|
|
378
|
+
self.model_executor.sleep(level)
|
|
379
|
+
|
|
380
|
+
def wake_up(self, tags: Optional[list[str]] = None):
|
|
381
|
+
self.model_executor.wake_up(tags)
|
|
382
|
+
|
|
383
|
+
def is_sleeping(self) -> bool:
|
|
384
|
+
return self.model_executor.is_sleeping
|
|
385
|
+
|
|
386
|
+
def execute_dummy_batch(self):
|
|
387
|
+
self.model_executor.execute_dummy_batch()
|
|
388
|
+
|
|
389
|
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
|
390
|
+
return self.model_executor.add_lora(lora_request)
|
|
391
|
+
|
|
392
|
+
def remove_lora(self, lora_id: int) -> bool:
|
|
393
|
+
return self.model_executor.remove_lora(lora_id)
|
|
394
|
+
|
|
395
|
+
def list_loras(self) -> set[int]:
|
|
396
|
+
return self.model_executor.list_loras()
|
|
397
|
+
|
|
398
|
+
def pin_lora(self, lora_id: int) -> bool:
|
|
399
|
+
return self.model_executor.pin_lora(lora_id)
|
|
400
|
+
|
|
401
|
+
def save_sharded_state(
|
|
402
|
+
self,
|
|
403
|
+
path: str,
|
|
404
|
+
pattern: Optional[str] = None,
|
|
405
|
+
max_size: Optional[int] = None,
|
|
406
|
+
) -> None:
|
|
407
|
+
self.model_executor.save_sharded_state(path=path,
|
|
408
|
+
pattern=pattern,
|
|
409
|
+
max_size=max_size)
|
|
410
|
+
|
|
411
|
+
def collective_rpc(self,
|
|
412
|
+
method: Union[str, Callable[..., _R]],
|
|
413
|
+
timeout: Optional[float] = None,
|
|
414
|
+
args: tuple = (),
|
|
415
|
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
|
416
|
+
return self.model_executor.collective_rpc(method, timeout, args,
|
|
417
|
+
kwargs)
|
|
418
|
+
|
|
419
|
+
def save_tensorized_model(
|
|
420
|
+
self,
|
|
421
|
+
tensorizer_config,
|
|
422
|
+
) -> None:
|
|
423
|
+
self.model_executor.save_tensorized_model(
|
|
424
|
+
tensorizer_config=tensorizer_config, )
|
|
425
|
+
|
|
426
|
+
def preprocess_add_request(
|
|
427
|
+
self, request: EngineCoreRequest) -> tuple[Request, int]:
|
|
428
|
+
"""Preprocess the request.
|
|
429
|
+
|
|
430
|
+
This function could be directly used in input processing thread to allow
|
|
431
|
+
request initialization running in parallel with Model forward
|
|
432
|
+
"""
|
|
433
|
+
# Note on thread safety: no race condition.
|
|
434
|
+
# `mm_receiver_cache` is reset at the end of LLMEngine init,
|
|
435
|
+
# and will only be accessed in the input processing thread afterwards.
|
|
436
|
+
if self.mm_receiver_cache is not None and request.mm_features:
|
|
437
|
+
request.mm_features = (
|
|
438
|
+
self.mm_receiver_cache.get_and_update_features(
|
|
439
|
+
request.mm_features))
|
|
440
|
+
|
|
441
|
+
req = Request.from_engine_core_request(request,
|
|
442
|
+
self.request_block_hasher)
|
|
443
|
+
if req.use_structured_output:
|
|
444
|
+
# Note on thread safety: no race condition.
|
|
445
|
+
# `grammar_init` is only invoked in input processing thread. For
|
|
446
|
+
# `structured_output_manager`, each request is independent and
|
|
447
|
+
# grammar compilation is async. Scheduler always checks grammar
|
|
448
|
+
# compilation status before scheduling request.
|
|
449
|
+
self.structured_output_manager.grammar_init(req)
|
|
450
|
+
return req, request.current_wave
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
class EngineCoreProc(EngineCore):
|
|
454
|
+
"""ZMQ-wrapper for running EngineCore in background process."""
|
|
455
|
+
|
|
456
|
+
ENGINE_CORE_DEAD = b'ENGINE_CORE_DEAD'
|
|
457
|
+
|
|
458
|
+
def __init__(
|
|
459
|
+
self,
|
|
460
|
+
vllm_config: VllmConfig,
|
|
461
|
+
local_client: bool,
|
|
462
|
+
handshake_address: str,
|
|
463
|
+
executor_class: type[Executor],
|
|
464
|
+
log_stats: bool,
|
|
465
|
+
client_handshake_address: Optional[str] = None,
|
|
466
|
+
engine_index: int = 0,
|
|
467
|
+
):
|
|
468
|
+
self.input_queue = queue.Queue[tuple[EngineCoreRequestType, Any]]()
|
|
469
|
+
self.output_queue = queue.Queue[Union[tuple[int, EngineCoreOutputs],
|
|
470
|
+
bytes]]()
|
|
471
|
+
executor_fail_callback = lambda: self.input_queue.put_nowait(
|
|
472
|
+
(EngineCoreRequestType.EXECUTOR_FAILED, b''))
|
|
473
|
+
|
|
474
|
+
self.engine_index = engine_index
|
|
475
|
+
identity = self.engine_index.to_bytes(length=2, byteorder="little")
|
|
476
|
+
self.engines_running = False
|
|
477
|
+
|
|
478
|
+
with self._perform_handshakes(handshake_address, identity,
|
|
479
|
+
local_client, vllm_config,
|
|
480
|
+
client_handshake_address) as addresses:
|
|
481
|
+
self.client_count = len(addresses.outputs)
|
|
482
|
+
|
|
483
|
+
# Set up data parallel environment.
|
|
484
|
+
self.has_coordinator = addresses.coordinator_output is not None
|
|
485
|
+
self.frontend_stats_publish_address = (
|
|
486
|
+
addresses.frontend_stats_publish_address)
|
|
487
|
+
logger.debug("Has DP Coordinator: %s, stats publish address: %s",
|
|
488
|
+
self.has_coordinator,
|
|
489
|
+
self.frontend_stats_publish_address)
|
|
490
|
+
# Only publish request queue stats to coordinator for "internal"
|
|
491
|
+
# and "hybrid" LB modes .
|
|
492
|
+
self.publish_dp_lb_stats = (
|
|
493
|
+
self.has_coordinator
|
|
494
|
+
and not vllm_config.parallel_config.data_parallel_external_lb)
|
|
495
|
+
|
|
496
|
+
self._init_data_parallel(vllm_config)
|
|
497
|
+
|
|
498
|
+
super().__init__(vllm_config, executor_class, log_stats,
|
|
499
|
+
executor_fail_callback)
|
|
500
|
+
|
|
501
|
+
# Background Threads and Queues for IO. These enable us to
|
|
502
|
+
# overlap ZMQ socket IO with GPU since they release the GIL,
|
|
503
|
+
# and to overlap some serialization/deserialization with the
|
|
504
|
+
# model forward pass.
|
|
505
|
+
# Threads handle Socket <-> Queues and core_busy_loop uses Queue.
|
|
506
|
+
ready_event = threading.Event()
|
|
507
|
+
input_thread = threading.Thread(target=self.process_input_sockets,
|
|
508
|
+
args=(addresses.inputs,
|
|
509
|
+
addresses.coordinator_input,
|
|
510
|
+
identity, ready_event),
|
|
511
|
+
daemon=True)
|
|
512
|
+
input_thread.start()
|
|
513
|
+
|
|
514
|
+
self.output_thread = threading.Thread(
|
|
515
|
+
target=self.process_output_sockets,
|
|
516
|
+
args=(addresses.outputs, addresses.coordinator_output,
|
|
517
|
+
self.engine_index),
|
|
518
|
+
daemon=True)
|
|
519
|
+
self.output_thread.start()
|
|
520
|
+
|
|
521
|
+
# Don't complete handshake until DP coordinator ready message is
|
|
522
|
+
# received.
|
|
523
|
+
while not ready_event.wait(timeout=10):
|
|
524
|
+
if not input_thread.is_alive():
|
|
525
|
+
raise RuntimeError(
|
|
526
|
+
"Input socket thread died during startup")
|
|
527
|
+
assert addresses.coordinator_input is not None
|
|
528
|
+
logger.info("Waiting for READY message from DP Coordinator...")
|
|
529
|
+
|
|
530
|
+
# Mark the startup heap as static so that it's ignored by GC.
|
|
531
|
+
# Reduces pause times of oldest generation collections.
|
|
532
|
+
gc.collect()
|
|
533
|
+
gc.freeze()
|
|
534
|
+
|
|
535
|
+
@contextmanager
|
|
536
|
+
def _perform_handshakes(
|
|
537
|
+
self,
|
|
538
|
+
handshake_address: str,
|
|
539
|
+
identity: bytes,
|
|
540
|
+
local_client: bool,
|
|
541
|
+
vllm_config: VllmConfig,
|
|
542
|
+
client_handshake_address: Optional[str],
|
|
543
|
+
) -> Generator[EngineZmqAddresses, None, None]:
|
|
544
|
+
"""
|
|
545
|
+
Perform startup handshakes.
|
|
546
|
+
|
|
547
|
+
For DP=1 or offline mode, this is with the colocated front-end process.
|
|
548
|
+
|
|
549
|
+
For DP>1 with internal load-balancing this is with the shared front-end
|
|
550
|
+
process which may reside on a different node.
|
|
551
|
+
|
|
552
|
+
For DP>1 with external or hybrid load-balancing, two handshakes are
|
|
553
|
+
performed:
|
|
554
|
+
- With the rank 0 front-end process which retrieves the
|
|
555
|
+
DP Coordinator ZMQ addresses and DP process group address.
|
|
556
|
+
- With the colocated front-end process which retrieves the
|
|
557
|
+
client input/output socket addresses.
|
|
558
|
+
with the exception of the rank 0 and colocated engines themselves which
|
|
559
|
+
don't require the second handshake.
|
|
560
|
+
|
|
561
|
+
Here, "front-end" process can mean the process containing the engine
|
|
562
|
+
core client (which is the API server process in the case the API
|
|
563
|
+
server is not scaled out), OR the launcher process running the
|
|
564
|
+
run_multi_api_server() function in serve.py.
|
|
565
|
+
"""
|
|
566
|
+
input_ctx = zmq.Context()
|
|
567
|
+
is_local = local_client and client_handshake_address is None
|
|
568
|
+
headless = not local_client
|
|
569
|
+
handshake = self._perform_handshake(input_ctx, handshake_address,
|
|
570
|
+
identity, is_local, headless,
|
|
571
|
+
vllm_config,
|
|
572
|
+
vllm_config.parallel_config)
|
|
573
|
+
if client_handshake_address is None:
|
|
574
|
+
with handshake as addresses:
|
|
575
|
+
yield addresses
|
|
576
|
+
else:
|
|
577
|
+
assert local_client
|
|
578
|
+
local_handshake = self._perform_handshake(
|
|
579
|
+
input_ctx, client_handshake_address, identity, True, False,
|
|
580
|
+
vllm_config)
|
|
581
|
+
with handshake as addresses, local_handshake as client_addresses:
|
|
582
|
+
addresses.inputs = client_addresses.inputs
|
|
583
|
+
addresses.outputs = client_addresses.outputs
|
|
584
|
+
yield addresses
|
|
585
|
+
|
|
586
|
+
# Update config which may have changed from the handshake
|
|
587
|
+
vllm_config.__post_init__()
|
|
588
|
+
|
|
589
|
+
@contextmanager
|
|
590
|
+
def _perform_handshake(
|
|
591
|
+
self,
|
|
592
|
+
ctx: zmq.Context,
|
|
593
|
+
handshake_address: str,
|
|
594
|
+
identity: bytes,
|
|
595
|
+
local_client: bool,
|
|
596
|
+
headless: bool,
|
|
597
|
+
vllm_config: VllmConfig,
|
|
598
|
+
parallel_config_to_update: Optional[ParallelConfig] = None,
|
|
599
|
+
) -> Generator[EngineZmqAddresses, None, None]:
|
|
600
|
+
with make_zmq_socket(ctx,
|
|
601
|
+
handshake_address,
|
|
602
|
+
zmq.DEALER,
|
|
603
|
+
identity=identity,
|
|
604
|
+
linger=5000,
|
|
605
|
+
bind=False) as handshake_socket:
|
|
606
|
+
# Register engine with front-end.
|
|
607
|
+
addresses = self.startup_handshake(handshake_socket, local_client,
|
|
608
|
+
headless,
|
|
609
|
+
parallel_config_to_update)
|
|
610
|
+
yield addresses
|
|
611
|
+
|
|
612
|
+
# Send ready message.
|
|
613
|
+
num_gpu_blocks = vllm_config.cache_config.num_gpu_blocks
|
|
614
|
+
# We pass back the coordinator stats update address here for the
|
|
615
|
+
# external LB case for our colocated front-end to use (coordinator
|
|
616
|
+
# only runs with rank 0).
|
|
617
|
+
dp_stats_address = self.frontend_stats_publish_address
|
|
618
|
+
handshake_socket.send(
|
|
619
|
+
msgspec.msgpack.encode({
|
|
620
|
+
"status": "READY",
|
|
621
|
+
"local": local_client,
|
|
622
|
+
"headless": headless,
|
|
623
|
+
"num_gpu_blocks": num_gpu_blocks,
|
|
624
|
+
"dp_stats_address": dp_stats_address,
|
|
625
|
+
}))
|
|
626
|
+
|
|
627
|
+
@staticmethod
|
|
628
|
+
def startup_handshake(
|
|
629
|
+
handshake_socket: zmq.Socket,
|
|
630
|
+
local_client: bool,
|
|
631
|
+
headless: bool,
|
|
632
|
+
parallel_config: Optional[ParallelConfig] = None,
|
|
633
|
+
) -> EngineZmqAddresses:
|
|
634
|
+
|
|
635
|
+
# Send registration message.
|
|
636
|
+
handshake_socket.send(
|
|
637
|
+
msgspec.msgpack.encode({
|
|
638
|
+
"status": "HELLO",
|
|
639
|
+
"local": local_client,
|
|
640
|
+
"headless": headless,
|
|
641
|
+
}))
|
|
642
|
+
|
|
643
|
+
# Receive initialization message.
|
|
644
|
+
logger.info("Waiting for init message from front-end.")
|
|
645
|
+
if not handshake_socket.poll(timeout=HANDSHAKE_TIMEOUT_MINS * 60_000):
|
|
646
|
+
raise RuntimeError("Did not receive response from front-end "
|
|
647
|
+
f"process within {HANDSHAKE_TIMEOUT_MINS} "
|
|
648
|
+
f"minutes")
|
|
649
|
+
init_bytes = handshake_socket.recv()
|
|
650
|
+
init_message: EngineHandshakeMetadata = msgspec.msgpack.decode(
|
|
651
|
+
init_bytes, type=EngineHandshakeMetadata)
|
|
652
|
+
logger.debug("Received init message: %s", init_message)
|
|
653
|
+
|
|
654
|
+
if parallel_config is not None:
|
|
655
|
+
for key, value in init_message.parallel_config.items():
|
|
656
|
+
setattr(parallel_config, key, value)
|
|
657
|
+
|
|
658
|
+
return init_message.addresses
|
|
659
|
+
|
|
660
|
+
@staticmethod
|
|
661
|
+
def run_engine_core(*args,
|
|
662
|
+
dp_rank: int = 0,
|
|
663
|
+
local_dp_rank: int = 0,
|
|
664
|
+
**kwargs):
|
|
665
|
+
"""Launch EngineCore busy loop in background process."""
|
|
666
|
+
|
|
667
|
+
# Signal handler used for graceful termination.
|
|
668
|
+
# SystemExit exception is only raised once to allow this and worker
|
|
669
|
+
# processes to terminate without error
|
|
670
|
+
shutdown_requested = False
|
|
671
|
+
|
|
672
|
+
# Ensure we can serialize transformer config after spawning
|
|
673
|
+
maybe_register_config_serialize_by_value()
|
|
674
|
+
|
|
675
|
+
def signal_handler(signum, frame):
|
|
676
|
+
nonlocal shutdown_requested
|
|
677
|
+
if not shutdown_requested:
|
|
678
|
+
shutdown_requested = True
|
|
679
|
+
raise SystemExit()
|
|
680
|
+
|
|
681
|
+
# Either SIGTERM or SIGINT will terminate the engine_core
|
|
682
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
683
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
684
|
+
|
|
685
|
+
engine_core: Optional[EngineCoreProc] = None
|
|
686
|
+
try:
|
|
687
|
+
parallel_config: ParallelConfig = kwargs[
|
|
688
|
+
"vllm_config"].parallel_config
|
|
689
|
+
if parallel_config.data_parallel_size > 1 or dp_rank > 0:
|
|
690
|
+
set_process_title("EngineCore", f"DP{dp_rank}")
|
|
691
|
+
decorate_logs()
|
|
692
|
+
# Set data parallel rank for this engine process.
|
|
693
|
+
parallel_config.data_parallel_rank = dp_rank
|
|
694
|
+
parallel_config.data_parallel_rank_local = local_dp_rank
|
|
695
|
+
engine_core = DPEngineCoreProc(*args, **kwargs)
|
|
696
|
+
else:
|
|
697
|
+
set_process_title("EngineCore")
|
|
698
|
+
decorate_logs()
|
|
699
|
+
engine_core = EngineCoreProc(*args, **kwargs)
|
|
700
|
+
|
|
701
|
+
engine_core.run_busy_loop()
|
|
702
|
+
|
|
703
|
+
except SystemExit:
|
|
704
|
+
logger.debug("EngineCore exiting.")
|
|
705
|
+
raise
|
|
706
|
+
except Exception as e:
|
|
707
|
+
if engine_core is None:
|
|
708
|
+
logger.exception("EngineCore failed to start.")
|
|
709
|
+
else:
|
|
710
|
+
logger.exception("EngineCore encountered a fatal error.")
|
|
711
|
+
engine_core._send_engine_dead()
|
|
712
|
+
raise e
|
|
713
|
+
finally:
|
|
714
|
+
if engine_core is not None:
|
|
715
|
+
engine_core.shutdown()
|
|
716
|
+
|
|
717
|
+
def _init_data_parallel(self, vllm_config: VllmConfig):
|
|
718
|
+
pass
|
|
719
|
+
|
|
720
|
+
def run_busy_loop(self):
|
|
721
|
+
"""Core busy loop of the EngineCore."""
|
|
722
|
+
|
|
723
|
+
# Loop until process is sent a SIGINT or SIGTERM
|
|
724
|
+
while True:
|
|
725
|
+
# 1) Poll the input queue until there is work to do.
|
|
726
|
+
self._process_input_queue()
|
|
727
|
+
# 2) Step the engine core and return the outputs.
|
|
728
|
+
self._process_engine_step()
|
|
729
|
+
|
|
730
|
+
def _process_input_queue(self):
|
|
731
|
+
"""Exits when an engine step needs to be performed."""
|
|
732
|
+
|
|
733
|
+
waited = False
|
|
734
|
+
while not self.engines_running and not self.scheduler.has_requests() \
|
|
735
|
+
and not self.batch_queue:
|
|
736
|
+
if logger.isEnabledFor(DEBUG) and self.input_queue.empty():
|
|
737
|
+
logger.debug("EngineCore waiting for work.")
|
|
738
|
+
waited = True
|
|
739
|
+
req = self.input_queue.get()
|
|
740
|
+
self._handle_client_request(*req)
|
|
741
|
+
|
|
742
|
+
if waited:
|
|
743
|
+
logger.debug("EngineCore loop active.")
|
|
744
|
+
|
|
745
|
+
# Handle any more client requests.
|
|
746
|
+
while not self.input_queue.empty():
|
|
747
|
+
req = self.input_queue.get_nowait()
|
|
748
|
+
self._handle_client_request(*req)
|
|
749
|
+
|
|
750
|
+
def _process_engine_step(self) -> bool:
|
|
751
|
+
"""Called only when there are unfinished local requests."""
|
|
752
|
+
|
|
753
|
+
# Step the engine core.
|
|
754
|
+
outputs, model_executed = self.step_fn()
|
|
755
|
+
# Put EngineCoreOutputs into the output queue.
|
|
756
|
+
for output in (outputs.items() if outputs else ()):
|
|
757
|
+
self.output_queue.put_nowait(output)
|
|
758
|
+
# Post-step hook.
|
|
759
|
+
self.post_step(model_executed)
|
|
760
|
+
|
|
761
|
+
return model_executed
|
|
762
|
+
|
|
763
|
+
def _handle_client_request(self, request_type: EngineCoreRequestType,
|
|
764
|
+
request: Any) -> None:
|
|
765
|
+
"""Dispatch request from client."""
|
|
766
|
+
|
|
767
|
+
if request_type == EngineCoreRequestType.ADD:
|
|
768
|
+
req, request_wave = request
|
|
769
|
+
self.add_request(req, request_wave)
|
|
770
|
+
elif request_type == EngineCoreRequestType.ABORT:
|
|
771
|
+
self.abort_requests(request)
|
|
772
|
+
elif request_type == EngineCoreRequestType.UTILITY:
|
|
773
|
+
client_idx, call_id, method_name, args = request
|
|
774
|
+
output = UtilityOutput(call_id)
|
|
775
|
+
try:
|
|
776
|
+
method = getattr(self, method_name)
|
|
777
|
+
result = method(*self._convert_msgspec_args(method, args))
|
|
778
|
+
output.result = UtilityResult(result)
|
|
779
|
+
except BaseException as e:
|
|
780
|
+
logger.exception("Invocation of %s method failed", method_name)
|
|
781
|
+
output.failure_message = (f"Call to {method_name} method"
|
|
782
|
+
f" failed: {str(e)}")
|
|
783
|
+
self.output_queue.put_nowait(
|
|
784
|
+
(client_idx, EngineCoreOutputs(utility_output=output)))
|
|
785
|
+
elif request_type == EngineCoreRequestType.EXECUTOR_FAILED:
|
|
786
|
+
raise RuntimeError("Executor failed.")
|
|
787
|
+
else:
|
|
788
|
+
logger.error("Unrecognized input request type encountered: %s",
|
|
789
|
+
request_type)
|
|
790
|
+
|
|
791
|
+
@staticmethod
|
|
792
|
+
def _convert_msgspec_args(method, args):
|
|
793
|
+
"""If a provided arg type doesn't match corresponding target method
|
|
794
|
+
arg type, try converting to msgspec object."""
|
|
795
|
+
if not args:
|
|
796
|
+
return args
|
|
797
|
+
arg_types = signature(method).parameters.values()
|
|
798
|
+
assert len(args) <= len(arg_types)
|
|
799
|
+
return tuple(
|
|
800
|
+
msgspec.convert(v, type=p.annotation) if isclass(p.annotation)
|
|
801
|
+
and issubclass(p.annotation, msgspec.Struct)
|
|
802
|
+
and not isinstance(v, p.annotation) else v
|
|
803
|
+
for v, p in zip(args, arg_types))
|
|
804
|
+
|
|
805
|
+
def _send_engine_dead(self):
|
|
806
|
+
"""Send EngineDead status to the EngineCoreClient."""
|
|
807
|
+
|
|
808
|
+
# Put ENGINE_CORE_DEAD in the queue.
|
|
809
|
+
self.output_queue.put_nowait(EngineCoreProc.ENGINE_CORE_DEAD)
|
|
810
|
+
|
|
811
|
+
# Wait until msg sent by the daemon before shutdown.
|
|
812
|
+
self.output_thread.join(timeout=5.0)
|
|
813
|
+
if self.output_thread.is_alive():
|
|
814
|
+
logger.fatal("vLLM shutdown signal from EngineCore failed "
|
|
815
|
+
"to send. Please report this issue.")
|
|
816
|
+
|
|
817
|
+
def process_input_sockets(self, input_addresses: list[str],
|
|
818
|
+
coord_input_address: Optional[str],
|
|
819
|
+
identity: bytes, ready_event: threading.Event):
|
|
820
|
+
"""Input socket IO thread."""
|
|
821
|
+
|
|
822
|
+
# Msgpack serialization decoding.
|
|
823
|
+
add_request_decoder = MsgpackDecoder(EngineCoreRequest)
|
|
824
|
+
generic_decoder = MsgpackDecoder()
|
|
825
|
+
|
|
826
|
+
with ExitStack() as stack, zmq.Context() as ctx:
|
|
827
|
+
input_sockets = [
|
|
828
|
+
stack.enter_context(
|
|
829
|
+
make_zmq_socket(ctx,
|
|
830
|
+
input_address,
|
|
831
|
+
zmq.DEALER,
|
|
832
|
+
identity=identity,
|
|
833
|
+
bind=False))
|
|
834
|
+
for input_address in input_addresses
|
|
835
|
+
]
|
|
836
|
+
if coord_input_address is None:
|
|
837
|
+
coord_socket = None
|
|
838
|
+
else:
|
|
839
|
+
coord_socket = stack.enter_context(
|
|
840
|
+
make_zmq_socket(ctx,
|
|
841
|
+
coord_input_address,
|
|
842
|
+
zmq.XSUB,
|
|
843
|
+
identity=identity,
|
|
844
|
+
bind=False))
|
|
845
|
+
# Send subscription message to coordinator.
|
|
846
|
+
coord_socket.send(b'\x01')
|
|
847
|
+
|
|
848
|
+
# Register sockets with poller.
|
|
849
|
+
poller = zmq.Poller()
|
|
850
|
+
for input_socket in input_sockets:
|
|
851
|
+
# Send initial message to each input socket - this is required
|
|
852
|
+
# before the front-end ROUTER socket can send input messages
|
|
853
|
+
# back to us.
|
|
854
|
+
input_socket.send(b'')
|
|
855
|
+
poller.register(input_socket, zmq.POLLIN)
|
|
856
|
+
|
|
857
|
+
if coord_socket is not None:
|
|
858
|
+
# Wait for ready message from coordinator.
|
|
859
|
+
assert coord_socket.recv() == b"READY"
|
|
860
|
+
poller.register(coord_socket, zmq.POLLIN)
|
|
861
|
+
|
|
862
|
+
ready_event.set()
|
|
863
|
+
del ready_event
|
|
864
|
+
while True:
|
|
865
|
+
for input_socket, _ in poller.poll():
|
|
866
|
+
# (RequestType, RequestData)
|
|
867
|
+
type_frame, *data_frames = input_socket.recv_multipart(
|
|
868
|
+
copy=False)
|
|
869
|
+
request_type = EngineCoreRequestType(
|
|
870
|
+
bytes(type_frame.buffer))
|
|
871
|
+
|
|
872
|
+
# Deserialize the request data.
|
|
873
|
+
if request_type == EngineCoreRequestType.ADD:
|
|
874
|
+
request = add_request_decoder.decode(data_frames)
|
|
875
|
+
request = self.preprocess_add_request(request)
|
|
876
|
+
else:
|
|
877
|
+
request = generic_decoder.decode(data_frames)
|
|
878
|
+
|
|
879
|
+
# Push to input queue for core busy loop.
|
|
880
|
+
self.input_queue.put_nowait((request_type, request))
|
|
881
|
+
|
|
882
|
+
def process_output_sockets(self, output_paths: list[str],
|
|
883
|
+
coord_output_path: Optional[str],
|
|
884
|
+
engine_index: int):
|
|
885
|
+
"""Output socket IO thread."""
|
|
886
|
+
|
|
887
|
+
# Msgpack serialization encoding.
|
|
888
|
+
encoder = MsgpackEncoder()
|
|
889
|
+
# Send buffers to reuse.
|
|
890
|
+
reuse_buffers: list[bytearray] = []
|
|
891
|
+
# Keep references to outputs and buffers until zmq is finished
|
|
892
|
+
# with them (outputs may contain tensors/np arrays whose
|
|
893
|
+
# backing buffers were extracted for zero-copy send).
|
|
894
|
+
pending = deque[tuple[zmq.MessageTracker, Any, bytearray]]()
|
|
895
|
+
|
|
896
|
+
# We must set linger to ensure the ENGINE_CORE_DEAD
|
|
897
|
+
# message is sent prior to closing the socket.
|
|
898
|
+
with ExitStack() as stack, zmq.Context() as ctx:
|
|
899
|
+
sockets = [
|
|
900
|
+
stack.enter_context(
|
|
901
|
+
make_zmq_socket(ctx, output_path, zmq.PUSH, linger=4000))
|
|
902
|
+
for output_path in output_paths
|
|
903
|
+
]
|
|
904
|
+
coord_socket = stack.enter_context(
|
|
905
|
+
make_zmq_socket(
|
|
906
|
+
ctx, coord_output_path, zmq.PUSH, bind=False,
|
|
907
|
+
linger=4000)) if coord_output_path is not None else None
|
|
908
|
+
max_reuse_bufs = len(sockets) + 1
|
|
909
|
+
|
|
910
|
+
while True:
|
|
911
|
+
output = self.output_queue.get()
|
|
912
|
+
if output == EngineCoreProc.ENGINE_CORE_DEAD:
|
|
913
|
+
for socket in sockets:
|
|
914
|
+
socket.send(output)
|
|
915
|
+
break
|
|
916
|
+
assert not isinstance(output, bytes)
|
|
917
|
+
client_index, outputs = output
|
|
918
|
+
outputs.engine_index = engine_index
|
|
919
|
+
|
|
920
|
+
if client_index == -1:
|
|
921
|
+
# Don't reuse buffer for coordinator message
|
|
922
|
+
# which will be very small.
|
|
923
|
+
assert coord_socket is not None
|
|
924
|
+
coord_socket.send_multipart(encoder.encode(outputs))
|
|
925
|
+
continue
|
|
926
|
+
|
|
927
|
+
# Reclaim buffers that zmq is finished with.
|
|
928
|
+
while pending and pending[-1][0].done:
|
|
929
|
+
reuse_buffers.append(pending.pop()[2])
|
|
930
|
+
|
|
931
|
+
buffer = reuse_buffers.pop() if reuse_buffers else bytearray()
|
|
932
|
+
buffers = encoder.encode_into(outputs, buffer)
|
|
933
|
+
tracker = sockets[client_index].send_multipart(buffers,
|
|
934
|
+
copy=False,
|
|
935
|
+
track=True)
|
|
936
|
+
if not tracker.done:
|
|
937
|
+
ref = outputs if len(buffers) > 1 else None
|
|
938
|
+
pending.appendleft((tracker, ref, buffer))
|
|
939
|
+
elif len(reuse_buffers) < max_reuse_bufs:
|
|
940
|
+
# Limit the number of buffers to reuse.
|
|
941
|
+
reuse_buffers.append(buffer)
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
class DPEngineCoreProc(EngineCoreProc):
|
|
945
|
+
"""ZMQ-wrapper for running EngineCore in background process
|
|
946
|
+
in a data parallel context."""
|
|
947
|
+
|
|
948
|
+
def __init__(
|
|
949
|
+
self,
|
|
950
|
+
vllm_config: VllmConfig,
|
|
951
|
+
local_client: bool,
|
|
952
|
+
handshake_address: str,
|
|
953
|
+
executor_class: type[Executor],
|
|
954
|
+
log_stats: bool,
|
|
955
|
+
client_handshake_address: Optional[str] = None,
|
|
956
|
+
):
|
|
957
|
+
# Counts forward-passes of the model so that we can synchronize
|
|
958
|
+
# finished with DP peers every N steps.
|
|
959
|
+
self.step_counter = 0
|
|
960
|
+
self.current_wave = 0
|
|
961
|
+
self.last_counts = (0, 0)
|
|
962
|
+
|
|
963
|
+
# Initialize the engine.
|
|
964
|
+
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
|
965
|
+
super().__init__(vllm_config, local_client, handshake_address,
|
|
966
|
+
executor_class, log_stats, client_handshake_address,
|
|
967
|
+
dp_rank)
|
|
968
|
+
|
|
969
|
+
def _init_data_parallel(self, vllm_config: VllmConfig):
|
|
970
|
+
|
|
971
|
+
# Configure GPUs and stateless process group for data parallel.
|
|
972
|
+
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
|
973
|
+
dp_size = vllm_config.parallel_config.data_parallel_size
|
|
974
|
+
local_dp_rank = vllm_config.parallel_config.data_parallel_rank_local
|
|
975
|
+
|
|
976
|
+
assert dp_size > 1
|
|
977
|
+
assert 0 <= local_dp_rank <= dp_rank < dp_size
|
|
978
|
+
|
|
979
|
+
if vllm_config.kv_transfer_config is not None:
|
|
980
|
+
# modify the engine_id and append the local_dp_rank to it to ensure
|
|
981
|
+
# that the kv_transfer_config is unique for each DP rank.
|
|
982
|
+
vllm_config.kv_transfer_config.engine_id = (
|
|
983
|
+
f"{vllm_config.kv_transfer_config.engine_id}_dp{local_dp_rank}"
|
|
984
|
+
)
|
|
985
|
+
logger.debug("Setting kv_transfer_config.engine_id to %s",
|
|
986
|
+
vllm_config.kv_transfer_config.engine_id)
|
|
987
|
+
|
|
988
|
+
self.dp_rank = dp_rank
|
|
989
|
+
self.dp_group = vllm_config.parallel_config.stateless_init_dp_group()
|
|
990
|
+
|
|
991
|
+
def shutdown(self):
|
|
992
|
+
super().shutdown()
|
|
993
|
+
if dp_group := getattr(self, "dp_group", None):
|
|
994
|
+
stateless_destroy_torch_distributed_process_group(dp_group)
|
|
995
|
+
|
|
996
|
+
def add_request(self, request: Request, request_wave: int = 0):
|
|
997
|
+
if self.has_coordinator and request_wave != self.current_wave:
|
|
998
|
+
if request_wave > self.current_wave:
|
|
999
|
+
self.current_wave = request_wave
|
|
1000
|
+
elif not self.engines_running:
|
|
1001
|
+
# Request received for an already-completed wave, notify
|
|
1002
|
+
# front-end that we need to start the next one.
|
|
1003
|
+
self.output_queue.put_nowait(
|
|
1004
|
+
(-1, EngineCoreOutputs(start_wave=self.current_wave)))
|
|
1005
|
+
|
|
1006
|
+
super().add_request(request, request_wave)
|
|
1007
|
+
|
|
1008
|
+
def _handle_client_request(self, request_type: EngineCoreRequestType,
|
|
1009
|
+
request: Any) -> None:
|
|
1010
|
+
if request_type == EngineCoreRequestType.START_DP_WAVE:
|
|
1011
|
+
new_wave, exclude_eng_index = request
|
|
1012
|
+
if exclude_eng_index != self.engine_index and (
|
|
1013
|
+
new_wave >= self.current_wave):
|
|
1014
|
+
self.current_wave = new_wave
|
|
1015
|
+
if not self.engines_running:
|
|
1016
|
+
logger.debug("EngineCore starting idle loop for wave %d.",
|
|
1017
|
+
new_wave)
|
|
1018
|
+
self.engines_running = True
|
|
1019
|
+
else:
|
|
1020
|
+
super()._handle_client_request(request_type, request)
|
|
1021
|
+
|
|
1022
|
+
def _maybe_publish_request_counts(self):
|
|
1023
|
+
if not self.publish_dp_lb_stats:
|
|
1024
|
+
return
|
|
1025
|
+
|
|
1026
|
+
# Publish our request counts (if they've changed).
|
|
1027
|
+
counts = self.scheduler.get_request_counts()
|
|
1028
|
+
if counts != self.last_counts:
|
|
1029
|
+
self.last_counts = counts
|
|
1030
|
+
stats = SchedulerStats(*counts,
|
|
1031
|
+
step_counter=self.step_counter,
|
|
1032
|
+
current_wave=self.current_wave)
|
|
1033
|
+
self.output_queue.put_nowait(
|
|
1034
|
+
(-1, EngineCoreOutputs(scheduler_stats=stats)))
|
|
1035
|
+
|
|
1036
|
+
def run_busy_loop(self):
|
|
1037
|
+
"""Core busy loop of the EngineCore for data parallel case."""
|
|
1038
|
+
|
|
1039
|
+
# Loop until process is sent a SIGINT or SIGTERM
|
|
1040
|
+
while True:
|
|
1041
|
+
# 1) Poll the input queue until there is work to do.
|
|
1042
|
+
self._process_input_queue()
|
|
1043
|
+
|
|
1044
|
+
# 2) Step the engine core.
|
|
1045
|
+
executed = self._process_engine_step()
|
|
1046
|
+
self._maybe_publish_request_counts()
|
|
1047
|
+
|
|
1048
|
+
local_unfinished_reqs = self.scheduler.has_unfinished_requests()
|
|
1049
|
+
if not executed:
|
|
1050
|
+
if not local_unfinished_reqs and not self.engines_running:
|
|
1051
|
+
# All engines are idle.
|
|
1052
|
+
continue
|
|
1053
|
+
|
|
1054
|
+
# We are in a running state and so must execute a dummy pass
|
|
1055
|
+
# if the model didn't execute any ready requests.
|
|
1056
|
+
self.execute_dummy_batch()
|
|
1057
|
+
|
|
1058
|
+
# 3) All-reduce operation to determine global unfinished reqs.
|
|
1059
|
+
self.engines_running = self._has_global_unfinished_reqs(
|
|
1060
|
+
local_unfinished_reqs)
|
|
1061
|
+
|
|
1062
|
+
if not self.engines_running:
|
|
1063
|
+
if self.dp_rank == 0 or not self.has_coordinator:
|
|
1064
|
+
# Notify client that we are pausing the loop.
|
|
1065
|
+
logger.debug("Wave %d finished, pausing engine loop.",
|
|
1066
|
+
self.current_wave)
|
|
1067
|
+
# In the coordinator case, dp rank 0 sends updates to the
|
|
1068
|
+
# coordinator. Otherwise (offline spmd case), each rank
|
|
1069
|
+
# sends the update to its colocated front-end process.
|
|
1070
|
+
client_index = -1 if self.has_coordinator else 0
|
|
1071
|
+
self.output_queue.put_nowait(
|
|
1072
|
+
(client_index,
|
|
1073
|
+
EngineCoreOutputs(wave_complete=self.current_wave)))
|
|
1074
|
+
# Increment wave count and reset step counter.
|
|
1075
|
+
self.current_wave += 1
|
|
1076
|
+
self.step_counter = 0
|
|
1077
|
+
|
|
1078
|
+
def _has_global_unfinished_reqs(self, local_unfinished: bool) -> bool:
|
|
1079
|
+
|
|
1080
|
+
# Optimization - only perform finish-sync all-reduce every 32 steps.
|
|
1081
|
+
self.step_counter += 1
|
|
1082
|
+
if self.step_counter % 32 != 0:
|
|
1083
|
+
return True
|
|
1084
|
+
|
|
1085
|
+
return ParallelConfig.has_unfinished_dp(self.dp_group,
|
|
1086
|
+
local_unfinished)
|
|
1087
|
+
|
|
1088
|
+
def reinitialize_distributed(
|
|
1089
|
+
self, reconfig_request: ReconfigureDistributedRequest) -> None:
|
|
1090
|
+
stateless_destroy_torch_distributed_process_group(self.dp_group)
|
|
1091
|
+
self.shutdown()
|
|
1092
|
+
|
|
1093
|
+
parallel_config = self.vllm_config.parallel_config
|
|
1094
|
+
old_dp_size = parallel_config.data_parallel_size
|
|
1095
|
+
parallel_config.data_parallel_size = \
|
|
1096
|
+
reconfig_request.new_data_parallel_size
|
|
1097
|
+
if reconfig_request.new_data_parallel_rank != -1:
|
|
1098
|
+
parallel_config.data_parallel_rank = \
|
|
1099
|
+
reconfig_request.new_data_parallel_rank
|
|
1100
|
+
# local rank specifies device visibility, it should not be changed
|
|
1101
|
+
assert reconfig_request.new_data_parallel_rank_local == \
|
|
1102
|
+
ReconfigureRankType.KEEP_CURRENT_RANK
|
|
1103
|
+
parallel_config.data_parallel_master_ip = \
|
|
1104
|
+
reconfig_request.new_data_parallel_master_ip
|
|
1105
|
+
parallel_config.data_parallel_master_port = \
|
|
1106
|
+
reconfig_request.new_data_parallel_master_port
|
|
1107
|
+
if reconfig_request.new_data_parallel_rank != -2:
|
|
1108
|
+
self.dp_rank = parallel_config.data_parallel_rank
|
|
1109
|
+
self.dp_group = parallel_config.stateless_init_dp_group()
|
|
1110
|
+
reconfig_request.new_data_parallel_master_port = \
|
|
1111
|
+
parallel_config.data_parallel_master_port
|
|
1112
|
+
|
|
1113
|
+
self.model_executor.reinitialize_distributed(reconfig_request)
|
|
1114
|
+
if reconfig_request.new_data_parallel_size > old_dp_size:
|
|
1115
|
+
assert self.available_gpu_memory_for_kv_cache > 0
|
|
1116
|
+
# pass available_gpu_memory_for_kv_cache from existing
|
|
1117
|
+
# engine-cores to new engine-cores so they can directly
|
|
1118
|
+
# use it in _initialize_kv_caches() rather than profiling.
|
|
1119
|
+
ParallelConfig.sync_kv_cache_memory_size(
|
|
1120
|
+
self.dp_group, self.available_gpu_memory_for_kv_cache)
|
|
1121
|
+
# NOTE(yongji): newly joined workers require dummy_run even
|
|
1122
|
+
# CUDA graph is not used
|
|
1123
|
+
self.model_executor.collective_rpc("compile_or_warm_up_model")
|
|
1124
|
+
if reconfig_request.new_data_parallel_rank == \
|
|
1125
|
+
ReconfigureRankType.SHUTDOWN_CURRENT_RANK:
|
|
1126
|
+
self.shutdown()
|
|
1127
|
+
logger.info("DPEngineCoreProc %s shutdown", self.dp_rank)
|
|
1128
|
+
else:
|
|
1129
|
+
logger.info("Distributed environment reinitialized for DP rank %s",
|
|
1130
|
+
self.dp_rank)
|
|
1131
|
+
|
|
1132
|
+
|
|
1133
|
+
class DPEngineCoreActor(DPEngineCoreProc):
|
|
1134
|
+
"""
|
|
1135
|
+
Ray actor for running EngineCore in a data parallel context
|
|
1136
|
+
"""
|
|
1137
|
+
|
|
1138
|
+
def __init__(
|
|
1139
|
+
self,
|
|
1140
|
+
vllm_config: VllmConfig,
|
|
1141
|
+
local_client: bool,
|
|
1142
|
+
addresses: EngineZmqAddresses,
|
|
1143
|
+
executor_class: type[Executor],
|
|
1144
|
+
log_stats: bool,
|
|
1145
|
+
dp_rank: int = 0,
|
|
1146
|
+
local_dp_rank: int = 0,
|
|
1147
|
+
):
|
|
1148
|
+
self.addresses = addresses
|
|
1149
|
+
vllm_config.parallel_config.data_parallel_rank = dp_rank
|
|
1150
|
+
vllm_config.parallel_config.data_parallel_rank_local = \
|
|
1151
|
+
local_dp_rank
|
|
1152
|
+
|
|
1153
|
+
# Set CUDA_VISIBLE_DEVICES as early as possible in actor life cycle
|
|
1154
|
+
# NOTE: in MP we set CUDA_VISIBLE_DEVICES at process creation time,
|
|
1155
|
+
# and this cannot be done in the same way for Ray because:
|
|
1156
|
+
# 1) Ray manages life cycle of all ray workers (including
|
|
1157
|
+
# DPEngineCoreActor)
|
|
1158
|
+
# 2) Ray sets CUDA_VISIBLE_DEVICES based on num_gpus configuration
|
|
1159
|
+
# To bypass 2, we need to also set
|
|
1160
|
+
# RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES, but vLLM workers created
|
|
1161
|
+
# thereafter would have CUDA_VISIBLE_DEVICES set, which is sticky:
|
|
1162
|
+
# https://github.com/ray-project/ray/blob/e752fc319ddedd9779a0989b6d3613909bad75c9/python/ray/_private/worker.py#L456 # noqa: E501
|
|
1163
|
+
# This is problematic because when the vLLM worker (a Ray actor)
|
|
1164
|
+
# executes a task, it indexes into the sticky CUDA_VISIBLE_DEVICES
|
|
1165
|
+
# rather than directly using the GPU ID, potentially resulting in
|
|
1166
|
+
# index out of bounds error. See:
|
|
1167
|
+
# https://github.com/ray-project/ray/pull/40461/files#diff-31e8159767361e4bc259b6d9883d9c0d5e5db780fcea4a52ead4ee3ee4a59a78R1860 # noqa: E501
|
|
1168
|
+
# and get_accelerator_ids_for_accelerator_resource() in worker.py
|
|
1169
|
+
# of ray.
|
|
1170
|
+
self._set_visible_devices(vllm_config, local_dp_rank)
|
|
1171
|
+
|
|
1172
|
+
super().__init__(vllm_config, local_client, "", executor_class,
|
|
1173
|
+
log_stats)
|
|
1174
|
+
|
|
1175
|
+
def _set_visible_devices(self, vllm_config: VllmConfig,
|
|
1176
|
+
local_dp_rank: int):
|
|
1177
|
+
from vllm.platforms import current_platform
|
|
1178
|
+
if current_platform.is_xpu():
|
|
1179
|
+
pass
|
|
1180
|
+
else:
|
|
1181
|
+
device_control_env_var = current_platform.device_control_env_var
|
|
1182
|
+
self._set_cuda_visible_devices(vllm_config, local_dp_rank,
|
|
1183
|
+
device_control_env_var)
|
|
1184
|
+
|
|
1185
|
+
def _set_cuda_visible_devices(self, vllm_config: VllmConfig,
|
|
1186
|
+
local_dp_rank: int,
|
|
1187
|
+
device_control_env_var: str):
|
|
1188
|
+
world_size = vllm_config.parallel_config.world_size
|
|
1189
|
+
# Set CUDA_VISIBLE_DEVICES or equivalent.
|
|
1190
|
+
try:
|
|
1191
|
+
value = get_device_indices(device_control_env_var, local_dp_rank,
|
|
1192
|
+
world_size)
|
|
1193
|
+
os.environ[device_control_env_var] = value
|
|
1194
|
+
except IndexError as e:
|
|
1195
|
+
raise Exception(
|
|
1196
|
+
f"Error setting {device_control_env_var}: "
|
|
1197
|
+
f"local range: [{local_dp_rank * world_size}, "
|
|
1198
|
+
f"{(local_dp_rank + 1) * world_size}) "
|
|
1199
|
+
f"base value: \"{os.getenv(device_control_env_var)}\"") from e
|
|
1200
|
+
|
|
1201
|
+
@contextmanager
|
|
1202
|
+
def _perform_handshakes(self, handshake_address: str, identity: bytes,
|
|
1203
|
+
local_client: bool, vllm_config: VllmConfig,
|
|
1204
|
+
client_handshake_address: Optional[str]):
|
|
1205
|
+
"""
|
|
1206
|
+
For Ray, we don't need to actually perform handshake.
|
|
1207
|
+
All addresses information is known before the actor creation.
|
|
1208
|
+
Therefore, we simply yield these addresses.
|
|
1209
|
+
"""
|
|
1210
|
+
yield self.addresses
|
|
1211
|
+
|
|
1212
|
+
def wait_for_init(self):
|
|
1213
|
+
"""
|
|
1214
|
+
Wait until the engine core is initialized.
|
|
1215
|
+
|
|
1216
|
+
This is just an empty method. When ray.get() on this method
|
|
1217
|
+
(or any other method of the actor) returns, it is guaranteed
|
|
1218
|
+
that actor creation (i.e., __init__) is complete.
|
|
1219
|
+
"""
|
|
1220
|
+
pass
|
|
1221
|
+
|
|
1222
|
+
def run(self):
|
|
1223
|
+
"""
|
|
1224
|
+
Run the engine core busy loop.
|
|
1225
|
+
"""
|
|
1226
|
+
try:
|
|
1227
|
+
self.run_busy_loop()
|
|
1228
|
+
except SystemExit:
|
|
1229
|
+
logger.debug("EngineCore exiting.")
|
|
1230
|
+
raise
|
|
1231
|
+
except Exception:
|
|
1232
|
+
logger.exception("EngineCore encountered a fatal error.")
|
|
1233
|
+
raise
|
|
1234
|
+
finally:
|
|
1235
|
+
self.shutdown()
|