vllm-cpu 0.12.0__cp313-cp313-manylinux_2_17_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +107 -0
- vllm/_aiter_ops.py +1018 -0
- vllm/_bc_linter.py +54 -0
- vllm/_custom_ops.py +2925 -0
- vllm/_ipex_ops.py +457 -0
- vllm/_version.py +34 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +43 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +59 -0
- vllm/assets/video.py +149 -0
- vllm/attention/__init__.py +0 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +434 -0
- vllm/attention/backends/registry.py +286 -0
- vllm/attention/backends/utils.py +33 -0
- vllm/attention/layer.py +975 -0
- vllm/attention/layers/__init__.py +0 -0
- vllm/attention/layers/chunked_local_attention.py +120 -0
- vllm/attention/layers/cross_attention.py +178 -0
- vllm/attention/layers/encoder_only_attention.py +103 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
- vllm/attention/ops/common.py +469 -0
- vllm/attention/ops/flashmla.py +251 -0
- vllm/attention/ops/merge_attn_states.py +47 -0
- vllm/attention/ops/paged_attn.py +51 -0
- vllm/attention/ops/pallas_kv_cache_update.py +130 -0
- vllm/attention/ops/prefix_prefill.py +814 -0
- vllm/attention/ops/rocm_aiter_mla_sparse.py +210 -0
- vllm/attention/ops/triton_decode_attention.py +712 -0
- vllm/attention/ops/triton_merge_attn_states.py +116 -0
- vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
- vllm/attention/ops/triton_unified_attention.py +941 -0
- vllm/attention/ops/vit_attn_wrappers.py +136 -0
- vllm/attention/selector.py +268 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/fa_utils.py +117 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/attention/utils/kv_transfer_utils.py +60 -0
- vllm/beam_search.py +88 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +3222 -0
- vllm/benchmarks/latency.py +172 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +777 -0
- vllm/benchmarks/lib/ready_checker.py +72 -0
- vllm/benchmarks/lib/utils.py +79 -0
- vllm/benchmarks/serve.py +1531 -0
- vllm/benchmarks/sweep/__init__.py +0 -0
- vllm/benchmarks/sweep/cli.py +41 -0
- vllm/benchmarks/sweep/param_sweep.py +91 -0
- vllm/benchmarks/sweep/plot.py +580 -0
- vllm/benchmarks/sweep/plot_pareto.py +393 -0
- vllm/benchmarks/sweep/serve.py +448 -0
- vllm/benchmarks/sweep/serve_sla.py +492 -0
- vllm/benchmarks/sweep/server.py +114 -0
- vllm/benchmarks/sweep/sla_sweep.py +132 -0
- vllm/benchmarks/sweep/utils.py +4 -0
- vllm/benchmarks/throughput.py +799 -0
- vllm/collect_env.py +857 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +209 -0
- vllm/compilation/backends.py +827 -0
- vllm/compilation/base_static_graph.py +57 -0
- vllm/compilation/caching.py +180 -0
- vllm/compilation/collective_fusion.py +1234 -0
- vllm/compilation/compiler_interface.py +639 -0
- vllm/compilation/counter.py +48 -0
- vllm/compilation/cuda_graph.py +208 -0
- vllm/compilation/decorators.py +614 -0
- vllm/compilation/fix_functionalization.py +253 -0
- vllm/compilation/fusion.py +374 -0
- vllm/compilation/fusion_attn.py +359 -0
- vllm/compilation/fx_utils.py +91 -0
- vllm/compilation/inductor_pass.py +133 -0
- vllm/compilation/matcher_utils.py +315 -0
- vllm/compilation/monitor.py +62 -0
- vllm/compilation/noop_elimination.py +134 -0
- vllm/compilation/partition_rules.py +72 -0
- vllm/compilation/pass_manager.py +136 -0
- vllm/compilation/piecewise_backend.py +121 -0
- vllm/compilation/post_cleanup.py +21 -0
- vllm/compilation/qk_norm_rope_fusion.py +238 -0
- vllm/compilation/sequence_parallelism.py +363 -0
- vllm/compilation/torch25_custom_graph_pass.py +44 -0
- vllm/compilation/vllm_inductor_pass.py +173 -0
- vllm/compilation/wrapper.py +260 -0
- vllm/config/__init__.py +102 -0
- vllm/config/cache.py +220 -0
- vllm/config/compilation.py +1154 -0
- vllm/config/device.py +75 -0
- vllm/config/ec_transfer.py +110 -0
- vllm/config/kv_events.py +56 -0
- vllm/config/kv_transfer.py +114 -0
- vllm/config/load.py +124 -0
- vllm/config/lora.py +96 -0
- vllm/config/model.py +2274 -0
- vllm/config/multimodal.py +247 -0
- vllm/config/observability.py +131 -0
- vllm/config/parallel.py +653 -0
- vllm/config/pooler.py +124 -0
- vllm/config/scheduler.py +297 -0
- vllm/config/speculative.py +643 -0
- vllm/config/speech_to_text.py +38 -0
- vllm/config/structured_outputs.py +94 -0
- vllm/config/utils.py +324 -0
- vllm/config/vllm.py +1353 -0
- vllm/connections.py +189 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +327 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +43 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +490 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
- vllm/distributed/device_communicators/base_device_communicator.py +297 -0
- vllm/distributed/device_communicators/cpu_communicator.py +209 -0
- vllm/distributed/device_communicators/cuda_communicator.py +340 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
- vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
- vllm/distributed/device_communicators/pynccl.py +386 -0
- vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
- vllm/distributed/device_communicators/ray_communicator.py +259 -0
- vllm/distributed/device_communicators/shm_broadcast.py +733 -0
- vllm/distributed/device_communicators/shm_object_storage.py +697 -0
- vllm/distributed/device_communicators/symm_mem.py +156 -0
- vllm/distributed/device_communicators/tpu_communicator.py +99 -0
- vllm/distributed/device_communicators/xpu_communicator.py +95 -0
- vllm/distributed/ec_transfer/__init__.py +14 -0
- vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
- vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
- vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
- vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
- vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/async_worker.py +115 -0
- vllm/distributed/eplb/eplb_state.py +1154 -0
- vllm/distributed/eplb/rebalance_algo.py +260 -0
- vllm/distributed/eplb/rebalance_execute.py +532 -0
- vllm/distributed/kv_events.py +371 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +20 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +575 -0
- vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +378 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +895 -0
- vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2480 -0
- vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +538 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
- vllm/distributed/parallel_state.py +1790 -0
- vllm/distributed/tpu_distributed_utils.py +188 -0
- vllm/distributed/utils.py +545 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +2106 -0
- vllm/engine/async_llm_engine.py +6 -0
- vllm/engine/llm_engine.py +6 -0
- vllm/engine/protocol.py +188 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/anthropic/__init__.py +0 -0
- vllm/entrypoints/anthropic/protocol.py +162 -0
- vllm/entrypoints/anthropic/serving_messages.py +460 -0
- vllm/entrypoints/api_server.py +184 -0
- vllm/entrypoints/chat_utils.py +1837 -0
- vllm/entrypoints/cli/__init__.py +13 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +56 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/sweep.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +38 -0
- vllm/entrypoints/cli/main.py +79 -0
- vllm/entrypoints/cli/openai.py +256 -0
- vllm/entrypoints/cli/run_batch.py +68 -0
- vllm/entrypoints/cli/serve.py +249 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +10 -0
- vllm/entrypoints/context.py +572 -0
- vllm/entrypoints/dynamic_lora.py +57 -0
- vllm/entrypoints/harmony_utils.py +535 -0
- vllm/entrypoints/launcher.py +175 -0
- vllm/entrypoints/llm.py +1762 -0
- vllm/entrypoints/logger.py +84 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1891 -0
- vllm/entrypoints/openai/cli_args.py +302 -0
- vllm/entrypoints/openai/orca_metrics.py +120 -0
- vllm/entrypoints/openai/protocol.py +2465 -0
- vllm/entrypoints/openai/run_batch.py +631 -0
- vllm/entrypoints/openai/serving_chat.py +1782 -0
- vllm/entrypoints/openai/serving_completion.py +716 -0
- vllm/entrypoints/openai/serving_engine.py +1478 -0
- vllm/entrypoints/openai/serving_models.py +304 -0
- vllm/entrypoints/openai/serving_responses.py +2032 -0
- vllm/entrypoints/openai/serving_tokenization.py +203 -0
- vllm/entrypoints/openai/serving_tokens.py +281 -0
- vllm/entrypoints/openai/serving_transcription.py +168 -0
- vllm/entrypoints/openai/speech_to_text.py +559 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
- vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
- vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +322 -0
- vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +324 -0
- vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
- vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
- vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
- vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
- vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
- vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
- vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
- vllm/entrypoints/openai/utils.py +49 -0
- vllm/entrypoints/pooling/__init__.py +16 -0
- vllm/entrypoints/pooling/classify/__init__.py +0 -0
- vllm/entrypoints/pooling/classify/api_router.py +50 -0
- vllm/entrypoints/pooling/classify/protocol.py +181 -0
- vllm/entrypoints/pooling/classify/serving.py +237 -0
- vllm/entrypoints/pooling/embed/__init__.py +0 -0
- vllm/entrypoints/pooling/embed/api_router.py +67 -0
- vllm/entrypoints/pooling/embed/protocol.py +208 -0
- vllm/entrypoints/pooling/embed/serving.py +697 -0
- vllm/entrypoints/pooling/pooling/__init__.py +0 -0
- vllm/entrypoints/pooling/pooling/api_router.py +63 -0
- vllm/entrypoints/pooling/pooling/protocol.py +148 -0
- vllm/entrypoints/pooling/pooling/serving.py +348 -0
- vllm/entrypoints/pooling/score/__init__.py +0 -0
- vllm/entrypoints/pooling/score/api_router.py +149 -0
- vllm/entrypoints/pooling/score/protocol.py +145 -0
- vllm/entrypoints/pooling/score/serving.py +505 -0
- vllm/entrypoints/renderer.py +409 -0
- vllm/entrypoints/responses_utils.py +148 -0
- vllm/entrypoints/sagemaker/__init__.py +4 -0
- vllm/entrypoints/sagemaker/routes.py +118 -0
- vllm/entrypoints/score_utils.py +240 -0
- vllm/entrypoints/ssl.py +78 -0
- vllm/entrypoints/tool.py +143 -0
- vllm/entrypoints/tool_server.py +234 -0
- vllm/entrypoints/utils.py +319 -0
- vllm/env_override.py +378 -0
- vllm/envs.py +1710 -0
- vllm/forward_context.py +358 -0
- vllm/inputs/__init__.py +44 -0
- vllm/inputs/data.py +359 -0
- vllm/inputs/parse.py +137 -0
- vllm/inputs/preprocess.py +716 -0
- vllm/logger.py +298 -0
- vllm/logging_utils/__init__.py +13 -0
- vllm/logging_utils/dump_input.py +83 -0
- vllm/logging_utils/formatter.py +127 -0
- vllm/logging_utils/lazy.py +20 -0
- vllm/logging_utils/log_time.py +34 -0
- vllm/logits_process.py +121 -0
- vllm/logprobs.py +206 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +42 -0
- vllm/lora/layers/base.py +66 -0
- vllm/lora/layers/base_linear.py +165 -0
- vllm/lora/layers/column_parallel_linear.py +577 -0
- vllm/lora/layers/fused_moe.py +747 -0
- vllm/lora/layers/logits_processor.py +203 -0
- vllm/lora/layers/replicated_linear.py +70 -0
- vllm/lora/layers/row_parallel_linear.py +176 -0
- vllm/lora/layers/utils.py +74 -0
- vllm/lora/layers/vocal_parallel_embedding.py +140 -0
- vllm/lora/lora_weights.py +227 -0
- vllm/lora/models.py +903 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +6 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
- vllm/lora/ops/torch_ops/__init__.py +20 -0
- vllm/lora/ops/torch_ops/lora_ops.py +128 -0
- vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
- vllm/lora/ops/triton_ops/__init__.py +21 -0
- vllm/lora/ops/triton_ops/fused_moe_lora_op.py +661 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
- vllm/lora/ops/triton_ops/utils.py +295 -0
- vllm/lora/ops/xla_ops/__init__.py +6 -0
- vllm/lora/ops/xla_ops/lora_ops.py +141 -0
- vllm/lora/peft_helper.py +128 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +493 -0
- vllm/lora/punica_wrapper/punica_cpu.py +351 -0
- vllm/lora/punica_wrapper/punica_gpu.py +412 -0
- vllm/lora/punica_wrapper/punica_selector.py +21 -0
- vllm/lora/punica_wrapper/punica_tpu.py +358 -0
- vllm/lora/punica_wrapper/punica_xpu.py +276 -0
- vllm/lora/punica_wrapper/utils.py +150 -0
- vllm/lora/request.py +100 -0
- vllm/lora/resolver.py +88 -0
- vllm/lora/utils.py +306 -0
- vllm/lora/worker_manager.py +268 -0
- vllm/model_executor/__init__.py +11 -0
- vllm/model_executor/custom_op.py +194 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +595 -0
- vllm/model_executor/layers/attention_layer_base.py +32 -0
- vllm/model_executor/layers/batch_invariant.py +1058 -0
- vllm/model_executor/layers/conv.py +256 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +240 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
- vllm/model_executor/layers/fla/ops/index.py +41 -0
- vllm/model_executor/layers/fla/ops/kda.py +1351 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
- vllm/model_executor/layers/fla/ops/op.py +60 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
- vllm/model_executor/layers/fla/ops/utils.py +194 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
- vllm/model_executor/layers/fused_moe/__init__.py +110 -0
- vllm/model_executor/layers/fused_moe/all2all_utils.py +171 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
- vllm/model_executor/layers/fused_moe/config.py +938 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +292 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +434 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +376 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +821 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +2172 -0
- vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +121 -0
- vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +136 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +524 -0
- vllm/model_executor/layers/fused_moe/layer.py +2152 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +1332 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +78 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
- vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
- vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +559 -0
- vllm/model_executor/layers/fused_moe/utils.py +332 -0
- vllm/model_executor/layers/kda.py +442 -0
- vllm/model_executor/layers/layernorm.py +442 -0
- vllm/model_executor/layers/lightning_attn.py +735 -0
- vllm/model_executor/layers/linear.py +1424 -0
- vllm/model_executor/layers/logits_processor.py +106 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +68 -0
- vllm/model_executor/layers/mamba/linear_attn.py +388 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +527 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +930 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
- vllm/model_executor/layers/mamba/short_conv.py +255 -0
- vllm/model_executor/layers/mla.py +176 -0
- vllm/model_executor/layers/pooler.py +817 -0
- vllm/model_executor/layers/quantization/__init__.py +179 -0
- vllm/model_executor/layers/quantization/auto_round.py +454 -0
- vllm/model_executor/layers/quantization/awq.py +277 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +718 -0
- vllm/model_executor/layers/quantization/awq_triton.py +337 -0
- vllm/model_executor/layers/quantization/base_config.py +170 -0
- vllm/model_executor/layers/quantization/bitblas.py +502 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +644 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +963 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2387 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/cpu_wna16.py +625 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
- vllm/model_executor/layers/quantization/experts_int8.py +225 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
- vllm/model_executor/layers/quantization/fp8.py +1348 -0
- vllm/model_executor/layers/quantization/fp_quant.py +420 -0
- vllm/model_executor/layers/quantization/gguf.py +687 -0
- vllm/model_executor/layers/quantization/gptq.py +393 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +842 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
- vllm/model_executor/layers/quantization/inc.py +65 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +470 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +146 -0
- vllm/model_executor/layers/quantization/modelopt.py +1637 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +528 -0
- vllm/model_executor/layers/quantization/mxfp4.py +1175 -0
- vllm/model_executor/layers/quantization/petit.py +319 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +136 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +527 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +653 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
- vllm/model_executor/layers/quantization/rtn.py +639 -0
- vllm/model_executor/layers/quantization/schema.py +90 -0
- vllm/model_executor/layers/quantization/torchao.py +380 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
- vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +333 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +311 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +674 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +452 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +183 -0
- vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
- vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
- vllm/model_executor/layers/resampler.py +283 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +292 -0
- vllm/model_executor/layers/rotary_embedding/base.py +240 -0
- vllm/model_executor/layers/rotary_embedding/common.py +188 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
- vllm/model_executor/layers/rotary_embedding/xdrope.py +102 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
- vllm/model_executor/layers/utils.py +251 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
- vllm/model_executor/model_loader/__init__.py +150 -0
- vllm/model_executor/model_loader/base_loader.py +57 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
- vllm/model_executor/model_loader/default_loader.py +321 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +349 -0
- vllm/model_executor/model_loader/online_quantization.py +275 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
- vllm/model_executor/model_loader/tensorizer.py +790 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
- vllm/model_executor/model_loader/tpu.py +118 -0
- vllm/model_executor/model_loader/utils.py +296 -0
- vllm/model_executor/model_loader/weight_utils.py +1147 -0
- vllm/model_executor/models/__init__.py +44 -0
- vllm/model_executor/models/adapters.py +543 -0
- vllm/model_executor/models/afmoe.py +697 -0
- vllm/model_executor/models/aimv2.py +248 -0
- vllm/model_executor/models/apertus.py +569 -0
- vllm/model_executor/models/arcee.py +428 -0
- vllm/model_executor/models/arctic.py +634 -0
- vllm/model_executor/models/aria.py +655 -0
- vllm/model_executor/models/aya_vision.py +450 -0
- vllm/model_executor/models/baichuan.py +494 -0
- vllm/model_executor/models/bailing_moe.py +645 -0
- vllm/model_executor/models/bamba.py +516 -0
- vllm/model_executor/models/bee.py +157 -0
- vllm/model_executor/models/bert.py +925 -0
- vllm/model_executor/models/bert_with_rope.py +732 -0
- vllm/model_executor/models/blip.py +350 -0
- vllm/model_executor/models/blip2.py +695 -0
- vllm/model_executor/models/bloom.py +390 -0
- vllm/model_executor/models/chameleon.py +1098 -0
- vllm/model_executor/models/chatglm.py +499 -0
- vllm/model_executor/models/clip.py +1005 -0
- vllm/model_executor/models/cohere2_vision.py +472 -0
- vllm/model_executor/models/commandr.py +470 -0
- vllm/model_executor/models/config.py +510 -0
- vllm/model_executor/models/dbrx.py +485 -0
- vllm/model_executor/models/deepencoder.py +676 -0
- vllm/model_executor/models/deepseek_eagle.py +252 -0
- vllm/model_executor/models/deepseek_mtp.py +446 -0
- vllm/model_executor/models/deepseek_ocr.py +593 -0
- vllm/model_executor/models/deepseek_v2.py +1715 -0
- vllm/model_executor/models/deepseek_vl2.py +644 -0
- vllm/model_executor/models/dots1.py +566 -0
- vllm/model_executor/models/dots_ocr.py +874 -0
- vllm/model_executor/models/ernie45.py +53 -0
- vllm/model_executor/models/ernie45_moe.py +755 -0
- vllm/model_executor/models/ernie45_vl.py +1710 -0
- vllm/model_executor/models/ernie45_vl_moe.py +800 -0
- vllm/model_executor/models/ernie_mtp.py +279 -0
- vllm/model_executor/models/exaone.py +525 -0
- vllm/model_executor/models/exaone4.py +517 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +544 -0
- vllm/model_executor/models/falcon_h1.py +680 -0
- vllm/model_executor/models/flex_olmo.py +155 -0
- vllm/model_executor/models/fuyu.py +373 -0
- vllm/model_executor/models/gemma.py +426 -0
- vllm/model_executor/models/gemma2.py +436 -0
- vllm/model_executor/models/gemma3.py +577 -0
- vllm/model_executor/models/gemma3_mm.py +665 -0
- vllm/model_executor/models/gemma3n.py +1167 -0
- vllm/model_executor/models/gemma3n_mm.py +811 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +298 -0
- vllm/model_executor/models/glm4_1v.py +1854 -0
- vllm/model_executor/models/glm4_moe.py +738 -0
- vllm/model_executor/models/glm4_moe_mtp.py +359 -0
- vllm/model_executor/models/glm4v.py +785 -0
- vllm/model_executor/models/gpt2.py +397 -0
- vllm/model_executor/models/gpt_bigcode.py +339 -0
- vllm/model_executor/models/gpt_j.py +345 -0
- vllm/model_executor/models/gpt_neox.py +343 -0
- vllm/model_executor/models/gpt_oss.py +745 -0
- vllm/model_executor/models/granite.py +476 -0
- vllm/model_executor/models/granite_speech.py +913 -0
- vllm/model_executor/models/granitemoe.py +561 -0
- vllm/model_executor/models/granitemoehybrid.py +704 -0
- vllm/model_executor/models/granitemoeshared.py +328 -0
- vllm/model_executor/models/gritlm.py +245 -0
- vllm/model_executor/models/grok1.py +555 -0
- vllm/model_executor/models/h2ovl.py +554 -0
- vllm/model_executor/models/hunyuan_v1.py +1042 -0
- vllm/model_executor/models/hunyuan_vision.py +1028 -0
- vllm/model_executor/models/hyperclovax_vision.py +1166 -0
- vllm/model_executor/models/idefics2_vision_model.py +427 -0
- vllm/model_executor/models/idefics3.py +718 -0
- vllm/model_executor/models/interfaces.py +1148 -0
- vllm/model_executor/models/interfaces_base.py +243 -0
- vllm/model_executor/models/intern_vit.py +454 -0
- vllm/model_executor/models/internlm2.py +454 -0
- vllm/model_executor/models/internlm2_ve.py +139 -0
- vllm/model_executor/models/interns1.py +830 -0
- vllm/model_executor/models/interns1_vit.py +433 -0
- vllm/model_executor/models/internvl.py +1452 -0
- vllm/model_executor/models/jais.py +397 -0
- vllm/model_executor/models/jamba.py +609 -0
- vllm/model_executor/models/jina_vl.py +147 -0
- vllm/model_executor/models/keye.py +1765 -0
- vllm/model_executor/models/keye_vl1_5.py +726 -0
- vllm/model_executor/models/kimi_linear.py +658 -0
- vllm/model_executor/models/kimi_vl.py +578 -0
- vllm/model_executor/models/lfm2.py +516 -0
- vllm/model_executor/models/lfm2_moe.py +746 -0
- vllm/model_executor/models/lightonocr.py +195 -0
- vllm/model_executor/models/llama.py +704 -0
- vllm/model_executor/models/llama4.py +857 -0
- vllm/model_executor/models/llama4_eagle.py +216 -0
- vllm/model_executor/models/llama_eagle.py +213 -0
- vllm/model_executor/models/llama_eagle3.py +375 -0
- vllm/model_executor/models/llava.py +842 -0
- vllm/model_executor/models/llava_next.py +583 -0
- vllm/model_executor/models/llava_next_video.py +467 -0
- vllm/model_executor/models/llava_onevision.py +923 -0
- vllm/model_executor/models/longcat_flash.py +743 -0
- vllm/model_executor/models/longcat_flash_mtp.py +349 -0
- vllm/model_executor/models/mamba.py +276 -0
- vllm/model_executor/models/mamba2.py +288 -0
- vllm/model_executor/models/medusa.py +179 -0
- vllm/model_executor/models/midashenglm.py +828 -0
- vllm/model_executor/models/mimo.py +188 -0
- vllm/model_executor/models/mimo_mtp.py +294 -0
- vllm/model_executor/models/minicpm.py +657 -0
- vllm/model_executor/models/minicpm3.py +234 -0
- vllm/model_executor/models/minicpm_eagle.py +385 -0
- vllm/model_executor/models/minicpmo.py +768 -0
- vllm/model_executor/models/minicpmv.py +1744 -0
- vllm/model_executor/models/minimax_m2.py +546 -0
- vllm/model_executor/models/minimax_text_01.py +1010 -0
- vllm/model_executor/models/minimax_vl_01.py +396 -0
- vllm/model_executor/models/mistral3.py +637 -0
- vllm/model_executor/models/mistral_large_3.py +63 -0
- vllm/model_executor/models/mistral_large_3_eagle.py +165 -0
- vllm/model_executor/models/mixtral.py +599 -0
- vllm/model_executor/models/mllama4.py +1151 -0
- vllm/model_executor/models/mlp_speculator.py +235 -0
- vllm/model_executor/models/modernbert.py +452 -0
- vllm/model_executor/models/module_mapping.py +74 -0
- vllm/model_executor/models/molmo.py +1553 -0
- vllm/model_executor/models/moonvit.py +686 -0
- vllm/model_executor/models/mpt.py +335 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1732 -0
- vllm/model_executor/models/nemotron.py +502 -0
- vllm/model_executor/models/nemotron_h.py +850 -0
- vllm/model_executor/models/nemotron_nas.py +473 -0
- vllm/model_executor/models/nemotron_vl.py +653 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +413 -0
- vllm/model_executor/models/olmo2.py +455 -0
- vllm/model_executor/models/olmoe.py +494 -0
- vllm/model_executor/models/opencua.py +271 -0
- vllm/model_executor/models/openpangu.py +1051 -0
- vllm/model_executor/models/openpangu_mtp.py +265 -0
- vllm/model_executor/models/opt.py +426 -0
- vllm/model_executor/models/orion.py +366 -0
- vllm/model_executor/models/ouro.py +508 -0
- vllm/model_executor/models/ovis.py +559 -0
- vllm/model_executor/models/ovis2_5.py +673 -0
- vllm/model_executor/models/paddleocr_vl.py +1380 -0
- vllm/model_executor/models/paligemma.py +412 -0
- vllm/model_executor/models/persimmon.py +376 -0
- vllm/model_executor/models/phi.py +370 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3v.py +737 -0
- vllm/model_executor/models/phi4_multimodal.py +1447 -0
- vllm/model_executor/models/phi4mm.py +1253 -0
- vllm/model_executor/models/phi4mm_audio.py +1296 -0
- vllm/model_executor/models/phi4mm_utils.py +1907 -0
- vllm/model_executor/models/phimoe.py +670 -0
- vllm/model_executor/models/pixtral.py +1380 -0
- vllm/model_executor/models/plamo2.py +966 -0
- vllm/model_executor/models/plamo3.py +441 -0
- vllm/model_executor/models/qwen.py +363 -0
- vllm/model_executor/models/qwen2.py +569 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +1220 -0
- vllm/model_executor/models/qwen2_5_vl.py +1594 -0
- vllm/model_executor/models/qwen2_audio.py +473 -0
- vllm/model_executor/models/qwen2_moe.py +590 -0
- vllm/model_executor/models/qwen2_rm.py +123 -0
- vllm/model_executor/models/qwen2_vl.py +1593 -0
- vllm/model_executor/models/qwen3.py +332 -0
- vllm/model_executor/models/qwen3_moe.py +738 -0
- vllm/model_executor/models/qwen3_next.py +1390 -0
- vllm/model_executor/models/qwen3_next_mtp.py +296 -0
- vllm/model_executor/models/qwen3_omni_moe_thinker.py +1765 -0
- vllm/model_executor/models/qwen3_vl.py +1686 -0
- vllm/model_executor/models/qwen3_vl_moe.py +470 -0
- vllm/model_executor/models/qwen_vl.py +803 -0
- vllm/model_executor/models/radio.py +555 -0
- vllm/model_executor/models/registry.py +1183 -0
- vllm/model_executor/models/roberta.py +259 -0
- vllm/model_executor/models/rvl.py +107 -0
- vllm/model_executor/models/seed_oss.py +493 -0
- vllm/model_executor/models/siglip.py +1245 -0
- vllm/model_executor/models/siglip2navit.py +723 -0
- vllm/model_executor/models/skyworkr1v.py +953 -0
- vllm/model_executor/models/smolvlm.py +38 -0
- vllm/model_executor/models/solar.py +485 -0
- vllm/model_executor/models/stablelm.py +359 -0
- vllm/model_executor/models/starcoder2.py +366 -0
- vllm/model_executor/models/step3_text.py +555 -0
- vllm/model_executor/models/step3_vl.py +1149 -0
- vllm/model_executor/models/swin.py +514 -0
- vllm/model_executor/models/tarsier.py +619 -0
- vllm/model_executor/models/telechat2.py +153 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/terratorch.py +319 -0
- vllm/model_executor/models/transformers/__init__.py +127 -0
- vllm/model_executor/models/transformers/base.py +464 -0
- vllm/model_executor/models/transformers/causal.py +65 -0
- vllm/model_executor/models/transformers/legacy.py +90 -0
- vllm/model_executor/models/transformers/moe.py +325 -0
- vllm/model_executor/models/transformers/multimodal.py +411 -0
- vllm/model_executor/models/transformers/pooling.py +119 -0
- vllm/model_executor/models/transformers/utils.py +213 -0
- vllm/model_executor/models/ultravox.py +686 -0
- vllm/model_executor/models/utils.py +832 -0
- vllm/model_executor/models/vision.py +552 -0
- vllm/model_executor/models/voxtral.py +842 -0
- vllm/model_executor/models/whisper.py +963 -0
- vllm/model_executor/models/zamba2.py +980 -0
- vllm/model_executor/parameter.py +642 -0
- vllm/model_executor/utils.py +94 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
- vllm/model_executor/warmup/kernel_warmup.py +98 -0
- vllm/multimodal/__init__.py +40 -0
- vllm/multimodal/audio.py +142 -0
- vllm/multimodal/base.py +26 -0
- vllm/multimodal/cache.py +830 -0
- vllm/multimodal/evs.py +294 -0
- vllm/multimodal/hasher.py +106 -0
- vllm/multimodal/image.py +130 -0
- vllm/multimodal/inputs.py +1036 -0
- vllm/multimodal/parse.py +544 -0
- vllm/multimodal/processing.py +2240 -0
- vllm/multimodal/profiling.py +369 -0
- vllm/multimodal/registry.py +357 -0
- vllm/multimodal/utils.py +523 -0
- vllm/multimodal/video.py +333 -0
- vllm/outputs.py +345 -0
- vllm/platforms/__init__.py +277 -0
- vllm/platforms/cpu.py +410 -0
- vllm/platforms/cuda.py +642 -0
- vllm/platforms/interface.py +656 -0
- vllm/platforms/rocm.py +513 -0
- vllm/platforms/tpu.py +275 -0
- vllm/platforms/xpu.py +261 -0
- vllm/plugins/__init__.py +81 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +77 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
- vllm/pooling_params.py +230 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/gpu_profiler.py +216 -0
- vllm/profiler/layerwise_profile.py +392 -0
- vllm/profiler/utils.py +151 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +30 -0
- vllm/ray/ray_env.py +79 -0
- vllm/reasoning/__init__.py +92 -0
- vllm/reasoning/abs_reasoning_parsers.py +290 -0
- vllm/reasoning/basic_parsers.py +162 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
- vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
- vllm/reasoning/ernie45_reasoning_parser.py +165 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
- vllm/reasoning/gptoss_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
- vllm/reasoning/identity_reasoning_parser.py +58 -0
- vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
- vllm/reasoning/mistral_reasoning_parser.py +55 -0
- vllm/reasoning/olmo3_reasoning_parser.py +302 -0
- vllm/reasoning/qwen3_reasoning_parser.py +67 -0
- vllm/reasoning/seedoss_reasoning_parser.py +27 -0
- vllm/reasoning/step3_reasoning_parser.py +107 -0
- vllm/sampling_params.py +597 -0
- vllm/scalar_type.py +355 -0
- vllm/scripts.py +17 -0
- vllm/sequence.py +98 -0
- vllm/tasks.py +13 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tokenizers/__init__.py +24 -0
- vllm/tokenizers/detokenizer_utils.py +198 -0
- vllm/tokenizers/hf.py +124 -0
- vllm/tokenizers/mistral.py +554 -0
- vllm/tokenizers/protocol.py +111 -0
- vllm/tokenizers/registry.py +233 -0
- vllm/tracing.py +135 -0
- vllm/transformers_utils/__init__.py +26 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +73 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1081 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +84 -0
- vllm/transformers_utils/configs/afmoe.py +87 -0
- vllm/transformers_utils/configs/arctic.py +216 -0
- vllm/transformers_utils/configs/chatglm.py +75 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
- vllm/transformers_utils/configs/dotsocr.py +71 -0
- vllm/transformers_utils/configs/eagle.py +90 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/flex_olmo.py +82 -0
- vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
- vllm/transformers_utils/configs/jais.py +243 -0
- vllm/transformers_utils/configs/kimi_linear.py +148 -0
- vllm/transformers_utils/configs/kimi_vl.py +38 -0
- vllm/transformers_utils/configs/lfm2_moe.py +163 -0
- vllm/transformers_utils/configs/medusa.py +65 -0
- vllm/transformers_utils/configs/midashenglm.py +103 -0
- vllm/transformers_utils/configs/mistral.py +235 -0
- vllm/transformers_utils/configs/mlp_speculator.py +69 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +214 -0
- vllm/transformers_utils/configs/nemotron_h.py +282 -0
- vllm/transformers_utils/configs/olmo3.py +83 -0
- vllm/transformers_utils/configs/ovis.py +182 -0
- vllm/transformers_utils/configs/qwen3_next.py +275 -0
- vllm/transformers_utils/configs/radio.py +89 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +38 -0
- vllm/transformers_utils/configs/speculators/base.py +114 -0
- vllm/transformers_utils/configs/step3_vl.py +178 -0
- vllm/transformers_utils/configs/ultravox.py +118 -0
- vllm/transformers_utils/dynamic_module.py +59 -0
- vllm/transformers_utils/gguf_utils.py +209 -0
- vllm/transformers_utils/processor.py +423 -0
- vllm/transformers_utils/processors/__init__.py +23 -0
- vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
- vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
- vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
- vllm/transformers_utils/processors/ovis.py +453 -0
- vllm/transformers_utils/processors/ovis2_5.py +468 -0
- vllm/transformers_utils/repo_utils.py +287 -0
- vllm/transformers_utils/runai_utils.py +104 -0
- vllm/transformers_utils/s3_utils.py +95 -0
- vllm/transformers_utils/tokenizer.py +127 -0
- vllm/transformers_utils/tokenizer_base.py +33 -0
- vllm/transformers_utils/utils.py +184 -0
- vllm/triton_utils/__init__.py +20 -0
- vllm/triton_utils/importing.py +103 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +294 -0
- vllm/utils/__init__.py +66 -0
- vllm/utils/argparse_utils.py +504 -0
- vllm/utils/async_utils.py +310 -0
- vllm/utils/cache.py +214 -0
- vllm/utils/collection_utils.py +112 -0
- vllm/utils/counter.py +45 -0
- vllm/utils/deep_gemm.py +399 -0
- vllm/utils/flashinfer.py +532 -0
- vllm/utils/func_utils.py +236 -0
- vllm/utils/gc_utils.py +151 -0
- vllm/utils/hashing.py +81 -0
- vllm/utils/import_utils.py +449 -0
- vllm/utils/jsontree.py +158 -0
- vllm/utils/math_utils.py +32 -0
- vllm/utils/mem_constants.py +13 -0
- vllm/utils/mem_utils.py +232 -0
- vllm/utils/nccl.py +64 -0
- vllm/utils/network_utils.py +331 -0
- vllm/utils/platform_utils.py +59 -0
- vllm/utils/profiling.py +56 -0
- vllm/utils/registry.py +51 -0
- vllm/utils/serial_utils.py +169 -0
- vllm/utils/system_utils.py +265 -0
- vllm/utils/tensor_schema.py +255 -0
- vllm/utils/torch_utils.py +647 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +497 -0
- vllm/v1/attention/backends/flash_attn.py +1050 -0
- vllm/v1/attention/backends/flashinfer.py +1572 -0
- vllm/v1/attention/backends/flex_attention.py +945 -0
- vllm/v1/attention/backends/gdn_attn.py +387 -0
- vllm/v1/attention/backends/linear_attn.py +77 -0
- vllm/v1/attention/backends/mamba1_attn.py +165 -0
- vllm/v1/attention/backends/mamba2_attn.py +354 -0
- vllm/v1/attention/backends/mamba_attn.py +117 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/aiter_triton_mla.py +74 -0
- vllm/v1/attention/backends/mla/common.py +2069 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +340 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +174 -0
- vllm/v1/attention/backends/mla/flashmla.py +317 -0
- vllm/v1/attention/backends/mla/flashmla_sparse.py +551 -0
- vllm/v1/attention/backends/mla/indexer.py +369 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +275 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +325 -0
- vllm/v1/attention/backends/mla/triton_mla.py +171 -0
- vllm/v1/attention/backends/pallas.py +436 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
- vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
- vllm/v1/attention/backends/rocm_attn.py +359 -0
- vllm/v1/attention/backends/short_conv_attn.py +105 -0
- vllm/v1/attention/backends/tree_attn.py +428 -0
- vllm/v1/attention/backends/triton_attn.py +377 -0
- vllm/v1/attention/backends/utils.py +1149 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +466 -0
- vllm/v1/core/encoder_cache_manager.py +343 -0
- vllm/v1/core/kv_cache_coordinator.py +570 -0
- vllm/v1/core/kv_cache_manager.py +408 -0
- vllm/v1/core/kv_cache_metrics.py +96 -0
- vllm/v1/core/kv_cache_utils.py +1471 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +68 -0
- vllm/v1/core/sched/interface.py +187 -0
- vllm/v1/core/sched/output.py +230 -0
- vllm/v1/core/sched/request_queue.py +217 -0
- vllm/v1/core/sched/scheduler.py +1726 -0
- vllm/v1/core/sched/utils.py +72 -0
- vllm/v1/core/single_type_kv_cache_manager.py +801 -0
- vllm/v1/cudagraph_dispatcher.py +183 -0
- vllm/v1/engine/__init__.py +214 -0
- vllm/v1/engine/async_llm.py +874 -0
- vllm/v1/engine/coordinator.py +377 -0
- vllm/v1/engine/core.py +1421 -0
- vllm/v1/engine/core_client.py +1406 -0
- vllm/v1/engine/detokenizer.py +351 -0
- vllm/v1/engine/exceptions.py +18 -0
- vllm/v1/engine/input_processor.py +636 -0
- vllm/v1/engine/llm_engine.py +416 -0
- vllm/v1/engine/logprobs.py +189 -0
- vllm/v1/engine/output_processor.py +658 -0
- vllm/v1/engine/parallel_sampling.py +145 -0
- vllm/v1/engine/processor.py +20 -0
- vllm/v1/engine/utils.py +1068 -0
- vllm/v1/executor/__init__.py +6 -0
- vllm/v1/executor/abstract.py +352 -0
- vllm/v1/executor/multiproc_executor.py +888 -0
- vllm/v1/executor/ray_distributed_executor.py +8 -0
- vllm/v1/executor/ray_executor.py +626 -0
- vllm/v1/executor/ray_utils.py +465 -0
- vllm/v1/executor/uniproc_executor.py +183 -0
- vllm/v1/kv_cache_interface.py +404 -0
- vllm/v1/kv_offload/__init__.py +0 -0
- vllm/v1/kv_offload/abstract.py +161 -0
- vllm/v1/kv_offload/arc_manager.py +237 -0
- vllm/v1/kv_offload/backend.py +97 -0
- vllm/v1/kv_offload/backends/__init__.py +0 -0
- vllm/v1/kv_offload/backends/cpu.py +62 -0
- vllm/v1/kv_offload/cpu.py +86 -0
- vllm/v1/kv_offload/factory.py +56 -0
- vllm/v1/kv_offload/lru_manager.py +139 -0
- vllm/v1/kv_offload/mediums.py +39 -0
- vllm/v1/kv_offload/spec.py +66 -0
- vllm/v1/kv_offload/worker/__init__.py +0 -0
- vllm/v1/kv_offload/worker/cpu_gpu.py +191 -0
- vllm/v1/kv_offload/worker/worker.py +144 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +1268 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +194 -0
- vllm/v1/metrics/reader.py +257 -0
- vllm/v1/metrics/stats.py +431 -0
- vllm/v1/outputs.py +237 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +82 -0
- vllm/v1/request.py +280 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +352 -0
- vllm/v1/sample/logits_processor/builtin.py +278 -0
- vllm/v1/sample/logits_processor/interface.py +106 -0
- vllm/v1/sample/logits_processor/state.py +165 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +52 -0
- vllm/v1/sample/ops/logprobs.py +25 -0
- vllm/v1/sample/ops/penalties.py +57 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +384 -0
- vllm/v1/sample/rejection_sampler.py +805 -0
- vllm/v1/sample/sampler.py +319 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +120 -0
- vllm/v1/sample/tpu/sampler.py +215 -0
- vllm/v1/serial_utils.py +532 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +1325 -0
- vllm/v1/spec_decode/medusa.py +73 -0
- vllm/v1/spec_decode/metadata.py +66 -0
- vllm/v1/spec_decode/metrics.py +225 -0
- vllm/v1/spec_decode/ngram_proposer.py +291 -0
- vllm/v1/spec_decode/suffix_decoding.py +101 -0
- vllm/v1/spec_decode/utils.py +121 -0
- vllm/v1/structured_output/__init__.py +338 -0
- vllm/v1/structured_output/backend_guidance.py +265 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
- vllm/v1/structured_output/backend_outlines.py +324 -0
- vllm/v1/structured_output/backend_types.py +136 -0
- vllm/v1/structured_output/backend_xgrammar.py +362 -0
- vllm/v1/structured_output/request.py +94 -0
- vllm/v1/structured_output/utils.py +469 -0
- vllm/v1/utils.py +414 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +343 -0
- vllm/v1/worker/cpu_model_runner.py +122 -0
- vllm/v1/worker/cpu_worker.py +210 -0
- vllm/v1/worker/dp_utils.py +250 -0
- vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
- vllm/v1/worker/gpu/README.md +4 -0
- vllm/v1/worker/gpu/__init__.py +0 -0
- vllm/v1/worker/gpu/async_utils.py +97 -0
- vllm/v1/worker/gpu/attn_utils.py +189 -0
- vllm/v1/worker/gpu/block_table.py +314 -0
- vllm/v1/worker/gpu/cudagraph_utils.py +259 -0
- vllm/v1/worker/gpu/dp_utils.py +31 -0
- vllm/v1/worker/gpu/input_batch.py +430 -0
- vllm/v1/worker/gpu/model_runner.py +1007 -0
- vllm/v1/worker/gpu/sample/__init__.py +0 -0
- vllm/v1/worker/gpu/sample/gumbel.py +101 -0
- vllm/v1/worker/gpu/sample/logprob.py +167 -0
- vllm/v1/worker/gpu/sample/metadata.py +179 -0
- vllm/v1/worker/gpu/sample/penalties.py +154 -0
- vllm/v1/worker/gpu/sample/sampler.py +75 -0
- vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
- vllm/v1/worker/gpu/spec_decode/eagle.py +565 -0
- vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
- vllm/v1/worker/gpu/spec_decode/rejection_sample.py +83 -0
- vllm/v1/worker/gpu/states.py +309 -0
- vllm/v1/worker/gpu/structured_outputs.py +76 -0
- vllm/v1/worker/gpu_input_batch.py +971 -0
- vllm/v1/worker/gpu_model_runner.py +5360 -0
- vllm/v1/worker/gpu_ubatch_wrapper.py +472 -0
- vllm/v1/worker/gpu_worker.py +922 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +309 -0
- vllm/v1/worker/lora_model_runner_mixin.py +212 -0
- vllm/v1/worker/tpu_input_batch.py +583 -0
- vllm/v1/worker/tpu_model_runner.py +2196 -0
- vllm/v1/worker/tpu_worker.py +351 -0
- vllm/v1/worker/ubatch_utils.py +73 -0
- vllm/v1/worker/ubatching.py +231 -0
- vllm/v1/worker/utils.py +365 -0
- vllm/v1/worker/worker_base.py +377 -0
- vllm/v1/worker/xpu_model_runner.py +48 -0
- vllm/v1/worker/xpu_worker.py +198 -0
- vllm/version.py +39 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm_cpu-0.12.0.dist-info/METADATA +300 -0
- vllm_cpu-0.12.0.dist-info/RECORD +1600 -0
- vllm_cpu-0.12.0.dist-info/WHEEL +5 -0
- vllm_cpu-0.12.0.dist-info/entry_points.txt +5 -0
- vllm_cpu-0.12.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1790 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
# Copyright 2023 The vLLM team.
|
|
5
|
+
# Adapted from
|
|
6
|
+
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py
|
|
7
|
+
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
|
8
|
+
"""vLLM distributed state.
|
|
9
|
+
It takes over the control of the distributed environment from PyTorch.
|
|
10
|
+
The typical workflow is:
|
|
11
|
+
|
|
12
|
+
- call `init_distributed_environment` to initialize the distributed environment.
|
|
13
|
+
- call `initialize_model_parallel` or `ensure_model_parallel_initialized` to
|
|
14
|
+
initialize the model parallel groups.
|
|
15
|
+
|
|
16
|
+
- any code dealing with the distributed stuff
|
|
17
|
+
|
|
18
|
+
- call `destroy_model_parallel` to destroy the model parallel groups.
|
|
19
|
+
- call `destroy_distributed_environment` to destroy the distributed environment.
|
|
20
|
+
|
|
21
|
+
If you only need to use the distributed environment without model/pipeline
|
|
22
|
+
parallelism, you can skip the model parallel initialization and destruction
|
|
23
|
+
steps.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import contextlib
|
|
27
|
+
import gc
|
|
28
|
+
import pickle
|
|
29
|
+
import weakref
|
|
30
|
+
from collections import namedtuple
|
|
31
|
+
from collections.abc import Callable
|
|
32
|
+
from contextlib import contextmanager, nullcontext
|
|
33
|
+
from dataclasses import dataclass
|
|
34
|
+
from datetime import timedelta
|
|
35
|
+
from multiprocessing import shared_memory
|
|
36
|
+
from typing import Any, Optional
|
|
37
|
+
from unittest.mock import patch
|
|
38
|
+
|
|
39
|
+
import torch
|
|
40
|
+
import torch.distributed
|
|
41
|
+
import torch.distributed._functional_collectives as funcol
|
|
42
|
+
import torch.distributed._symmetric_memory
|
|
43
|
+
from torch.distributed import Backend, ProcessGroup
|
|
44
|
+
|
|
45
|
+
import vllm.envs as envs
|
|
46
|
+
from vllm.distributed.device_communicators.base_device_communicator import (
|
|
47
|
+
DeviceCommunicatorBase,
|
|
48
|
+
)
|
|
49
|
+
from vllm.distributed.utils import StatelessProcessGroup
|
|
50
|
+
from vllm.logger import init_logger
|
|
51
|
+
from vllm.utils.import_utils import resolve_obj_by_qualname
|
|
52
|
+
from vllm.utils.network_utils import get_distributed_init_method
|
|
53
|
+
from vllm.utils.system_utils import suppress_stdout
|
|
54
|
+
from vllm.utils.torch_utils import (
|
|
55
|
+
direct_register_custom_op,
|
|
56
|
+
supports_custom_op,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass
|
|
61
|
+
class GraphCaptureContext:
|
|
62
|
+
stream: torch.cuda.Stream
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
TensorMetadata = namedtuple("TensorMetadata", ["device", "dtype", "size"])
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _split_tensor_dict(
|
|
69
|
+
tensor_dict: dict[str, torch.Tensor | Any],
|
|
70
|
+
) -> tuple[list[tuple[str, Any]], list[torch.Tensor]]:
|
|
71
|
+
"""Split the tensor dictionary into two parts:
|
|
72
|
+
1. A list of (key, value) pairs. If the value is a tensor, it is replaced
|
|
73
|
+
by its metadata.
|
|
74
|
+
2. A list of tensors.
|
|
75
|
+
"""
|
|
76
|
+
metadata_list: list[tuple[str, Any]] = []
|
|
77
|
+
tensor_list: list[torch.Tensor] = []
|
|
78
|
+
for key, value in tensor_dict.items():
|
|
79
|
+
if isinstance(value, torch.Tensor):
|
|
80
|
+
# Note: we cannot use `value.device` here,
|
|
81
|
+
# because it contains not only the device type but also the device
|
|
82
|
+
# index (e.g. "cuda:0"). We only need the device type.
|
|
83
|
+
# receiving side will set the device index.
|
|
84
|
+
device = value.device.type
|
|
85
|
+
metadata_list.append(
|
|
86
|
+
(key, TensorMetadata(device, value.dtype, value.size()))
|
|
87
|
+
)
|
|
88
|
+
tensor_list.append(value)
|
|
89
|
+
else:
|
|
90
|
+
metadata_list.append((key, value))
|
|
91
|
+
return metadata_list, tensor_list
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
_group_name_counter: dict[str, int] = {}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def _get_unique_name(name: str) -> str:
|
|
98
|
+
"""Get a unique name for the group.
|
|
99
|
+
Example:
|
|
100
|
+
_get_unique_name("tp") -> "tp:0"
|
|
101
|
+
_get_unique_name("tp") -> "tp:1"
|
|
102
|
+
"""
|
|
103
|
+
if name not in _group_name_counter:
|
|
104
|
+
_group_name_counter[name] = 0
|
|
105
|
+
newname = f"{name}:{_group_name_counter[name]}"
|
|
106
|
+
_group_name_counter[name] += 1
|
|
107
|
+
return newname
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
_groups: dict[str, Callable[[], Optional["GroupCoordinator"]]] = {}
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _register_group(group: "GroupCoordinator") -> None:
|
|
114
|
+
_groups[group.unique_name] = weakref.ref(group)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def all_reduce(tensor: torch.Tensor, group_name: str) -> torch.Tensor:
|
|
118
|
+
assert group_name in _groups, f"Group {group_name} is not found."
|
|
119
|
+
group = _groups[group_name]()
|
|
120
|
+
if group is None:
|
|
121
|
+
raise ValueError(f"Group {group_name} is destroyed.")
|
|
122
|
+
return group._all_reduce_out_place(tensor)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def all_reduce_fake(tensor: torch.Tensor, group_name: str) -> torch.Tensor:
|
|
126
|
+
return torch.empty_like(tensor)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def reduce_scatter(
|
|
130
|
+
tensor: torch.Tensor, dim: int, world_size: int, group_name: str
|
|
131
|
+
) -> torch.Tensor:
|
|
132
|
+
assert group_name in _groups, f"Group {group_name} is not found."
|
|
133
|
+
group = _groups[group_name]()
|
|
134
|
+
if group is None:
|
|
135
|
+
raise ValueError(f"Group {group_name} is destroyed.")
|
|
136
|
+
return group._reduce_scatter_out_place(tensor, dim)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def reduce_scatter_fake(
|
|
140
|
+
tensor: torch.Tensor, dim: int, world_size: int, group_name: str
|
|
141
|
+
) -> torch.Tensor:
|
|
142
|
+
new_shape = list(tensor.shape)
|
|
143
|
+
new_shape[dim] = tensor.shape[dim] // world_size
|
|
144
|
+
return torch.empty(new_shape, dtype=tensor.dtype, device=tensor.device)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def all_gather(
|
|
148
|
+
tensor: torch.Tensor, dim: int, world_size: int, group_name: str
|
|
149
|
+
) -> torch.Tensor:
|
|
150
|
+
assert group_name in _groups, f"Group {group_name} is not found."
|
|
151
|
+
group = _groups[group_name]()
|
|
152
|
+
if group is None:
|
|
153
|
+
raise ValueError(f"Group {group_name} is destroyed.")
|
|
154
|
+
return group._all_gather_out_place(tensor, dim)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def all_gather_fake(
|
|
158
|
+
tensor: torch.Tensor, dim: int, world_size: int, group_name: str
|
|
159
|
+
) -> torch.Tensor:
|
|
160
|
+
new_shape = list(tensor.shape)
|
|
161
|
+
new_shape[dim] = tensor.shape[dim] * world_size
|
|
162
|
+
return torch.empty(new_shape, dtype=tensor.dtype, device=tensor.device)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def patched_fused_scaled_matmul_reduce_scatter_fake(
|
|
166
|
+
A: torch.Tensor,
|
|
167
|
+
B: torch.Tensor,
|
|
168
|
+
A_scale: torch.Tensor,
|
|
169
|
+
B_scale: torch.Tensor,
|
|
170
|
+
reduce_op: str,
|
|
171
|
+
orig_scatter_dim: int,
|
|
172
|
+
scatter_dim_after_maybe_reshape: int,
|
|
173
|
+
group_name: str,
|
|
174
|
+
output_shape: list[int],
|
|
175
|
+
bias: torch.Tensor | None = None,
|
|
176
|
+
result_scale: torch.Tensor | None = None,
|
|
177
|
+
out_dtype: torch.dtype | None = None,
|
|
178
|
+
use_fast_accum: bool = False,
|
|
179
|
+
) -> torch.Tensor:
|
|
180
|
+
# Copied from
|
|
181
|
+
# https://github.com/pytorch/pytorch/blob/50c338c2da905062449e4d9ac807832d1b5cd90e/torch/distributed/_symmetric_memory/__init__.py#L1189
|
|
182
|
+
if A_scale.numel() > 1:
|
|
183
|
+
if A_scale.shape[:-1] != A.shape[:-1]:
|
|
184
|
+
raise ValueError(
|
|
185
|
+
"For row-wise scaling, the leading dims of A_scale "
|
|
186
|
+
"must match the leading dims of A "
|
|
187
|
+
f"(A shape: {A.shape}, A_scale shape: {A_scale.shape})"
|
|
188
|
+
)
|
|
189
|
+
A_scale = A_scale.flatten(0, -2).contiguous()
|
|
190
|
+
elif A_scale.numel() != 1:
|
|
191
|
+
raise ValueError(
|
|
192
|
+
"Invalid A_scale shape "
|
|
193
|
+
f"(A shape: {A.shape}, A_scale shape: {A_scale.shape})"
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
C = torch._scaled_mm(
|
|
197
|
+
A.flatten(0, -2).contiguous(),
|
|
198
|
+
B,
|
|
199
|
+
A_scale,
|
|
200
|
+
B_scale,
|
|
201
|
+
bias,
|
|
202
|
+
result_scale,
|
|
203
|
+
out_dtype,
|
|
204
|
+
use_fast_accum,
|
|
205
|
+
)
|
|
206
|
+
C = C.view(*output_shape[:-1], B.shape[1])
|
|
207
|
+
res = funcol.reduce_scatter_tensor(
|
|
208
|
+
C,
|
|
209
|
+
reduce_op,
|
|
210
|
+
orig_scatter_dim, # need original scatter dim for 3D+ output tensor here
|
|
211
|
+
group_name,
|
|
212
|
+
)
|
|
213
|
+
res = funcol.wait_tensor(res)
|
|
214
|
+
return res
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def patched_fused_scaled_matmul_reduce_scatter(
|
|
218
|
+
A: torch.Tensor,
|
|
219
|
+
B: torch.Tensor,
|
|
220
|
+
A_scale: torch.Tensor,
|
|
221
|
+
B_scale: torch.Tensor,
|
|
222
|
+
reduce_op: str,
|
|
223
|
+
orig_scatter_dim: int,
|
|
224
|
+
scatter_dim_after_maybe_reshape: int,
|
|
225
|
+
group_name: str,
|
|
226
|
+
output_shape: list[int],
|
|
227
|
+
bias: torch.Tensor | None = None,
|
|
228
|
+
result_scale: torch.Tensor | None = None,
|
|
229
|
+
out_dtype: torch.dtype | None = None,
|
|
230
|
+
use_fast_accum: bool = False,
|
|
231
|
+
) -> torch.Tensor:
|
|
232
|
+
return torch.ops.symm_mem.fused_scaled_matmul_reduce_scatter(
|
|
233
|
+
A,
|
|
234
|
+
B,
|
|
235
|
+
A_scale,
|
|
236
|
+
B_scale,
|
|
237
|
+
reduce_op,
|
|
238
|
+
orig_scatter_dim,
|
|
239
|
+
scatter_dim_after_maybe_reshape,
|
|
240
|
+
group_name,
|
|
241
|
+
output_shape,
|
|
242
|
+
bias,
|
|
243
|
+
result_scale,
|
|
244
|
+
out_dtype,
|
|
245
|
+
use_fast_accum,
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
if supports_custom_op():
|
|
250
|
+
direct_register_custom_op(
|
|
251
|
+
op_name="all_reduce",
|
|
252
|
+
op_func=all_reduce,
|
|
253
|
+
fake_impl=all_reduce_fake,
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
direct_register_custom_op(
|
|
257
|
+
op_name="reduce_scatter",
|
|
258
|
+
op_func=reduce_scatter,
|
|
259
|
+
fake_impl=reduce_scatter_fake,
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
direct_register_custom_op(
|
|
263
|
+
op_name="all_gather",
|
|
264
|
+
op_func=all_gather,
|
|
265
|
+
fake_impl=all_gather_fake,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# TODO: Remove this once the pytorch fix
|
|
269
|
+
# (https://github.com/pytorch/pytorch/pull/165086) gets released,
|
|
270
|
+
# in either 2.9.1 or 2.10
|
|
271
|
+
direct_register_custom_op(
|
|
272
|
+
op_name="patched_fused_scaled_matmul_reduce_scatter",
|
|
273
|
+
op_func=patched_fused_scaled_matmul_reduce_scatter,
|
|
274
|
+
fake_impl=patched_fused_scaled_matmul_reduce_scatter_fake,
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
class GroupCoordinator:
|
|
279
|
+
"""
|
|
280
|
+
PyTorch ProcessGroup wrapper for a group of processes.
|
|
281
|
+
PyTorch ProcessGroup is bound to one specific communication backend,
|
|
282
|
+
e.g. NCCL, Gloo, MPI, etc.
|
|
283
|
+
GroupCoordinator takes charge of all the communication operations among
|
|
284
|
+
the processes in the group. It manages both CPU and device
|
|
285
|
+
communication.
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
# available attributes:
|
|
289
|
+
rank: int # global rank
|
|
290
|
+
ranks: list[int] # global ranks in the group
|
|
291
|
+
world_size: int # size of the group
|
|
292
|
+
# difference between `local_rank` and `rank_in_group`:
|
|
293
|
+
# if we have a group of size 4 across two nodes:
|
|
294
|
+
# Process | Node | Rank | Local Rank | Rank in Group
|
|
295
|
+
# 0 | 0 | 0 | 0 | 0
|
|
296
|
+
# 1 | 0 | 1 | 1 | 1
|
|
297
|
+
# 2 | 1 | 2 | 0 | 2
|
|
298
|
+
# 3 | 1 | 3 | 1 | 3
|
|
299
|
+
local_rank: int # local rank used to assign devices
|
|
300
|
+
rank_in_group: int # rank inside the group
|
|
301
|
+
cpu_group: ProcessGroup # group for CPU communication
|
|
302
|
+
device_group: ProcessGroup # group for device communication
|
|
303
|
+
# device communicator (if use_device_communicator=True)
|
|
304
|
+
device_communicator: DeviceCommunicatorBase | None
|
|
305
|
+
mq_broadcaster: Any | None # shared memory broadcaster
|
|
306
|
+
|
|
307
|
+
def __init__(
|
|
308
|
+
self,
|
|
309
|
+
group_ranks: list[list[int]],
|
|
310
|
+
local_rank: int,
|
|
311
|
+
torch_distributed_backend: str | Backend,
|
|
312
|
+
use_device_communicator: bool, # whether to use device communicator
|
|
313
|
+
use_message_queue_broadcaster: bool = False,
|
|
314
|
+
group_name: str | None = None,
|
|
315
|
+
):
|
|
316
|
+
group_name = group_name or "anonymous"
|
|
317
|
+
self.unique_name = _get_unique_name(group_name)
|
|
318
|
+
_register_group(self)
|
|
319
|
+
|
|
320
|
+
self.rank = torch.distributed.get_rank()
|
|
321
|
+
self.local_rank = local_rank
|
|
322
|
+
|
|
323
|
+
self_device_group = None
|
|
324
|
+
self_cpu_group = None
|
|
325
|
+
|
|
326
|
+
for ranks in group_ranks:
|
|
327
|
+
device_group = torch.distributed.new_group(
|
|
328
|
+
ranks, backend=torch_distributed_backend
|
|
329
|
+
)
|
|
330
|
+
# a group with `gloo` backend, to allow direct coordination between
|
|
331
|
+
# processes through the CPU.
|
|
332
|
+
with suppress_stdout():
|
|
333
|
+
cpu_group = torch.distributed.new_group(ranks, backend="gloo")
|
|
334
|
+
if self.rank in ranks:
|
|
335
|
+
self.ranks = ranks
|
|
336
|
+
self.world_size = len(ranks)
|
|
337
|
+
self.rank_in_group = ranks.index(self.rank)
|
|
338
|
+
self_device_group = device_group
|
|
339
|
+
self_cpu_group = cpu_group
|
|
340
|
+
|
|
341
|
+
assert self_cpu_group is not None
|
|
342
|
+
assert self_device_group is not None
|
|
343
|
+
|
|
344
|
+
self.cpu_group = self_cpu_group
|
|
345
|
+
self.device_group = self_device_group
|
|
346
|
+
|
|
347
|
+
from vllm.platforms import current_platform
|
|
348
|
+
|
|
349
|
+
if current_platform.is_cuda_alike():
|
|
350
|
+
self.device = torch.device(f"cuda:{local_rank}")
|
|
351
|
+
elif current_platform.is_xpu():
|
|
352
|
+
self.device = torch.device(f"xpu:{local_rank}")
|
|
353
|
+
elif current_platform.is_out_of_tree():
|
|
354
|
+
self.device = torch.device(f"{current_platform.device_name}:{local_rank}")
|
|
355
|
+
else:
|
|
356
|
+
self.device = torch.device("cpu")
|
|
357
|
+
|
|
358
|
+
self.use_device_communicator = use_device_communicator
|
|
359
|
+
self.device_communicator = None
|
|
360
|
+
if use_device_communicator and self.world_size > 1:
|
|
361
|
+
device_comm_cls = resolve_obj_by_qualname(
|
|
362
|
+
current_platform.get_device_communicator_cls()
|
|
363
|
+
)
|
|
364
|
+
self.device_communicator = device_comm_cls(
|
|
365
|
+
cpu_group=self.cpu_group,
|
|
366
|
+
device=self.device,
|
|
367
|
+
device_group=self.device_group,
|
|
368
|
+
unique_name=self.unique_name,
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
from vllm.distributed.device_communicators.shm_broadcast import MessageQueue
|
|
372
|
+
|
|
373
|
+
self.mq_broadcaster: MessageQueue | None = None
|
|
374
|
+
if use_message_queue_broadcaster and self.world_size > 1:
|
|
375
|
+
self.mq_broadcaster = MessageQueue.create_from_process_group(
|
|
376
|
+
self.cpu_group, 1 << 22, 6
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
from vllm.platforms import current_platform
|
|
380
|
+
|
|
381
|
+
self.use_custom_op_call = (
|
|
382
|
+
current_platform.is_cuda_alike() or current_platform.is_tpu()
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
self.use_cpu_custom_send_recv = current_platform.is_cpu() and hasattr(
|
|
386
|
+
torch.ops._C, "init_shm_manager"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
def create_mq_broadcaster(
|
|
390
|
+
self, writer_rank=0, external_writer_handle=None, blocking=True
|
|
391
|
+
):
|
|
392
|
+
from vllm.distributed.device_communicators.shm_broadcast import MessageQueue
|
|
393
|
+
|
|
394
|
+
return MessageQueue.create_from_process_group(
|
|
395
|
+
self.cpu_group,
|
|
396
|
+
1 << 22,
|
|
397
|
+
6,
|
|
398
|
+
writer_rank=writer_rank,
|
|
399
|
+
external_writer_handle=external_writer_handle,
|
|
400
|
+
blocking=blocking,
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
def create_single_reader_mq_broadcasters(
|
|
404
|
+
self, reader_rank_in_group=0, blocking=False
|
|
405
|
+
):
|
|
406
|
+
from vllm.distributed.device_communicators.shm_broadcast import MessageQueue
|
|
407
|
+
|
|
408
|
+
return MessageQueue.create_from_process_group_single_reader(
|
|
409
|
+
self.cpu_group,
|
|
410
|
+
1 << 22,
|
|
411
|
+
6,
|
|
412
|
+
reader_rank=self.ranks[reader_rank_in_group],
|
|
413
|
+
blocking=blocking,
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
@property
|
|
417
|
+
def first_rank(self):
|
|
418
|
+
"""Return the global rank of the first process in the group"""
|
|
419
|
+
return self.ranks[0]
|
|
420
|
+
|
|
421
|
+
@property
|
|
422
|
+
def last_rank(self):
|
|
423
|
+
"""Return the global rank of the last process in the group"""
|
|
424
|
+
return self.ranks[-1]
|
|
425
|
+
|
|
426
|
+
@property
|
|
427
|
+
def is_first_rank(self):
|
|
428
|
+
"""Return whether the caller is the first process in the group"""
|
|
429
|
+
return self.rank == self.first_rank
|
|
430
|
+
|
|
431
|
+
@property
|
|
432
|
+
def is_last_rank(self):
|
|
433
|
+
"""Return whether the caller is the last process in the group"""
|
|
434
|
+
return self.rank == self.last_rank
|
|
435
|
+
|
|
436
|
+
@property
|
|
437
|
+
def next_rank(self):
|
|
438
|
+
"""Return the global rank of the process that follows the caller"""
|
|
439
|
+
rank_in_group = self.rank_in_group
|
|
440
|
+
world_size = self.world_size
|
|
441
|
+
return self.ranks[(rank_in_group + 1) % world_size]
|
|
442
|
+
|
|
443
|
+
@property
|
|
444
|
+
def prev_rank(self):
|
|
445
|
+
"""Return the global rank of the process that precedes the caller"""
|
|
446
|
+
rank_in_group = self.rank_in_group
|
|
447
|
+
world_size = self.world_size
|
|
448
|
+
return self.ranks[(rank_in_group - 1) % world_size]
|
|
449
|
+
|
|
450
|
+
@contextmanager
|
|
451
|
+
def graph_capture(self, graph_capture_context: GraphCaptureContext | None = None):
|
|
452
|
+
if graph_capture_context is None:
|
|
453
|
+
stream = torch.cuda.Stream()
|
|
454
|
+
graph_capture_context = GraphCaptureContext(stream)
|
|
455
|
+
else:
|
|
456
|
+
stream = graph_capture_context.stream
|
|
457
|
+
|
|
458
|
+
# only cuda uses this function,
|
|
459
|
+
# so we don't abstract it into the base class
|
|
460
|
+
maybe_ca_context = nullcontext()
|
|
461
|
+
from vllm.distributed.device_communicators.cuda_communicator import (
|
|
462
|
+
CudaCommunicator,
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
if self.device_communicator is not None:
|
|
466
|
+
assert isinstance(self.device_communicator, CudaCommunicator)
|
|
467
|
+
ca_comm = self.device_communicator.ca_comm
|
|
468
|
+
if ca_comm is not None:
|
|
469
|
+
maybe_ca_context = ca_comm.capture() # type: ignore
|
|
470
|
+
|
|
471
|
+
# ensure all initialization operations complete before attempting to
|
|
472
|
+
# capture the graph on another stream
|
|
473
|
+
curr_stream = torch.cuda.current_stream()
|
|
474
|
+
if curr_stream != stream:
|
|
475
|
+
stream.wait_stream(curr_stream)
|
|
476
|
+
|
|
477
|
+
with torch.cuda.stream(stream), maybe_ca_context:
|
|
478
|
+
yield graph_capture_context
|
|
479
|
+
|
|
480
|
+
def all_reduce(self, input_: torch.Tensor) -> torch.Tensor:
|
|
481
|
+
"""
|
|
482
|
+
User-facing all-reduce function before we actually call the
|
|
483
|
+
all-reduce operation.
|
|
484
|
+
|
|
485
|
+
We need this because Dynamo does not support passing an arbitrary
|
|
486
|
+
object (`self` in this case) to a custom op. We need to pass the
|
|
487
|
+
group name as a string, and then look up the group coordinator from
|
|
488
|
+
the group name, dispatch the all-reduce operation to the group
|
|
489
|
+
coordinator.
|
|
490
|
+
|
|
491
|
+
In addition, PyTorch custom ops do not support mutation or returning
|
|
492
|
+
a new tensor in the same op. So we always make the all-reduce operation
|
|
493
|
+
out-of-place.
|
|
494
|
+
"""
|
|
495
|
+
# Bypass the function if we are using only 1 GPU.
|
|
496
|
+
if self.world_size == 1:
|
|
497
|
+
return input_
|
|
498
|
+
|
|
499
|
+
if self.use_custom_op_call:
|
|
500
|
+
return torch.ops.vllm.all_reduce(input_, group_name=self.unique_name)
|
|
501
|
+
else:
|
|
502
|
+
return self._all_reduce_out_place(input_)
|
|
503
|
+
|
|
504
|
+
def _all_reduce_out_place(self, input_: torch.Tensor) -> torch.Tensor:
|
|
505
|
+
if self.device_communicator is None:
|
|
506
|
+
raise ValueError("No device communicator found")
|
|
507
|
+
return self.device_communicator.all_reduce(input_)
|
|
508
|
+
|
|
509
|
+
def all_gather(self, input_: torch.Tensor, dim: int = -1) -> torch.Tensor:
|
|
510
|
+
world_size = self.world_size
|
|
511
|
+
# Bypass the function if we are using only 1 GPU.
|
|
512
|
+
if world_size == 1:
|
|
513
|
+
return input_
|
|
514
|
+
assert -input_.dim() <= dim < input_.dim(), (
|
|
515
|
+
f"Invalid dim ({dim}) for input tensor with shape {input_.size()}"
|
|
516
|
+
)
|
|
517
|
+
|
|
518
|
+
if self.use_custom_op_call:
|
|
519
|
+
return torch.ops.vllm.all_gather(
|
|
520
|
+
input_, dim, world_size, group_name=self.unique_name
|
|
521
|
+
)
|
|
522
|
+
else:
|
|
523
|
+
return self._all_gather_out_place(input_, dim)
|
|
524
|
+
|
|
525
|
+
def _all_gather_out_place(self, input_: torch.Tensor, dim: int) -> torch.Tensor:
|
|
526
|
+
if self.device_communicator is None:
|
|
527
|
+
raise ValueError("No device communicator found")
|
|
528
|
+
return self.device_communicator.all_gather(input_, dim)
|
|
529
|
+
|
|
530
|
+
def all_gatherv(
|
|
531
|
+
self,
|
|
532
|
+
input_: torch.Tensor | list[torch.Tensor],
|
|
533
|
+
dim: int = 0,
|
|
534
|
+
sizes: list[int] | None = None,
|
|
535
|
+
):
|
|
536
|
+
if self.device_communicator is None:
|
|
537
|
+
raise ValueError("No device communicator found")
|
|
538
|
+
return self.device_communicator.all_gatherv(input_, dim, sizes)
|
|
539
|
+
|
|
540
|
+
def reduce_scatter(self, input_: torch.Tensor, dim: int = -1) -> torch.Tensor:
|
|
541
|
+
world_size = self.world_size
|
|
542
|
+
# Bypass the function if we are using only 1 GPU.
|
|
543
|
+
if world_size == 1:
|
|
544
|
+
return input_
|
|
545
|
+
assert -input_.dim() <= dim < input_.dim(), (
|
|
546
|
+
f"Invalid dim ({dim}) for input tensor with shape {input_.size()}"
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
if self.use_custom_op_call:
|
|
550
|
+
return torch.ops.vllm.reduce_scatter(
|
|
551
|
+
input_, dim, world_size, group_name=self.unique_name
|
|
552
|
+
)
|
|
553
|
+
else:
|
|
554
|
+
return self._reduce_scatter_out_place(input_, dim)
|
|
555
|
+
|
|
556
|
+
def reduce_scatterv(
|
|
557
|
+
self, input_: torch.Tensor, dim: int = -1, sizes: list[int] | None = None
|
|
558
|
+
) -> torch.Tensor:
|
|
559
|
+
if self.device_communicator is None:
|
|
560
|
+
raise ValueError("No device communicator found")
|
|
561
|
+
return self.device_communicator.reduce_scatterv(input_, dim, sizes)
|
|
562
|
+
|
|
563
|
+
def _reduce_scatter_out_place(self, input_: torch.Tensor, dim: int) -> torch.Tensor:
|
|
564
|
+
if self.device_communicator is None:
|
|
565
|
+
raise ValueError("No device communicator found")
|
|
566
|
+
return self.device_communicator.reduce_scatter(input_, dim)
|
|
567
|
+
|
|
568
|
+
def gather(
|
|
569
|
+
self, input_: torch.Tensor, dst: int = 0, dim: int = -1
|
|
570
|
+
) -> torch.Tensor | None:
|
|
571
|
+
"""
|
|
572
|
+
NOTE: We assume that the input tensor is on the same device across
|
|
573
|
+
all the ranks.
|
|
574
|
+
NOTE: `dst` is the local rank of the destination rank.
|
|
575
|
+
"""
|
|
576
|
+
world_size = self.world_size
|
|
577
|
+
# Bypass the function if we are using only 1 GPU.
|
|
578
|
+
if world_size == 1:
|
|
579
|
+
return input_
|
|
580
|
+
if self.device_communicator is None:
|
|
581
|
+
raise ValueError("No device communicator found")
|
|
582
|
+
return self.device_communicator.gather(input_, dst, dim)
|
|
583
|
+
|
|
584
|
+
def broadcast(self, input_: torch.Tensor, src: int = 0):
|
|
585
|
+
"""Broadcast the input tensor.
|
|
586
|
+
NOTE: `src` is the local rank of the source rank.
|
|
587
|
+
"""
|
|
588
|
+
assert src < self.world_size, f"Invalid src rank ({src})"
|
|
589
|
+
|
|
590
|
+
# Bypass the function if we are using only 1 GPU.
|
|
591
|
+
if self.world_size == 1:
|
|
592
|
+
return input_
|
|
593
|
+
# Broadcast.
|
|
594
|
+
torch.distributed.broadcast(
|
|
595
|
+
input_, src=self.ranks[src], group=self.device_group
|
|
596
|
+
)
|
|
597
|
+
return input_
|
|
598
|
+
|
|
599
|
+
def broadcast_object(self, obj: Any | None = None, src: int = 0):
|
|
600
|
+
"""Broadcast the input object.
|
|
601
|
+
NOTE: `src` is the local rank of the source rank.
|
|
602
|
+
"""
|
|
603
|
+
assert src < self.world_size, f"Invalid src rank ({src})"
|
|
604
|
+
|
|
605
|
+
# Bypass the function if we are using only 1 GPU.
|
|
606
|
+
if self.world_size == 1:
|
|
607
|
+
return obj
|
|
608
|
+
if self.mq_broadcaster is not None:
|
|
609
|
+
assert src == 0, "Message queue broadcaster only supports src=0"
|
|
610
|
+
return self.mq_broadcaster.broadcast_object(obj)
|
|
611
|
+
if self.rank_in_group == src:
|
|
612
|
+
torch.distributed.broadcast_object_list(
|
|
613
|
+
[obj], src=self.ranks[src], group=self.cpu_group
|
|
614
|
+
)
|
|
615
|
+
return obj
|
|
616
|
+
else:
|
|
617
|
+
recv = [None]
|
|
618
|
+
torch.distributed.broadcast_object_list(
|
|
619
|
+
recv, src=self.ranks[src], group=self.cpu_group
|
|
620
|
+
)
|
|
621
|
+
return recv[0]
|
|
622
|
+
|
|
623
|
+
def broadcast_object_list(
|
|
624
|
+
self, obj_list: list[Any], src: int = 0, group: ProcessGroup | None = None
|
|
625
|
+
):
|
|
626
|
+
"""Broadcast the input object list.
|
|
627
|
+
NOTE: `src` is the local rank of the source rank.
|
|
628
|
+
"""
|
|
629
|
+
assert src < self.world_size, f"Invalid src rank ({src})"
|
|
630
|
+
|
|
631
|
+
# Bypass the function if we are using only 1 GPU.
|
|
632
|
+
if self.world_size == 1:
|
|
633
|
+
return obj_list
|
|
634
|
+
# Broadcast.
|
|
635
|
+
torch.distributed.broadcast_object_list(
|
|
636
|
+
obj_list, src=self.ranks[src], group=self.device_group
|
|
637
|
+
)
|
|
638
|
+
return obj_list
|
|
639
|
+
|
|
640
|
+
def send_object(self, obj: Any, dst: int) -> None:
|
|
641
|
+
"""Send the input object list to the destination rank."""
|
|
642
|
+
"""NOTE: `dst` is the local rank of the destination rank."""
|
|
643
|
+
|
|
644
|
+
assert dst < self.world_size, f"Invalid dst rank ({dst})"
|
|
645
|
+
|
|
646
|
+
assert dst != self.rank_in_group, (
|
|
647
|
+
"Invalid destination rank. Destination rank is the same "
|
|
648
|
+
"as the current rank."
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
# Serialize object to tensor and get the size as well
|
|
652
|
+
object_tensor = torch.frombuffer(pickle.dumps(obj), dtype=torch.uint8)
|
|
653
|
+
|
|
654
|
+
size_tensor = torch.tensor(
|
|
655
|
+
[object_tensor.numel()], dtype=torch.long, device="cpu"
|
|
656
|
+
)
|
|
657
|
+
|
|
658
|
+
# Send object size
|
|
659
|
+
|
|
660
|
+
torch.distributed.send(size_tensor, dst=self.ranks[dst], group=self.cpu_group)
|
|
661
|
+
|
|
662
|
+
# Send object
|
|
663
|
+
torch.distributed.send(object_tensor, dst=self.ranks[dst], group=self.cpu_group)
|
|
664
|
+
|
|
665
|
+
return None
|
|
666
|
+
|
|
667
|
+
def recv_object(self, src: int) -> Any:
|
|
668
|
+
"""Receive the input object list from the source rank."""
|
|
669
|
+
"""NOTE: `src` is the local rank of the source rank."""
|
|
670
|
+
|
|
671
|
+
assert src < self.world_size, f"Invalid src rank ({src})"
|
|
672
|
+
|
|
673
|
+
assert src != self.rank_in_group, (
|
|
674
|
+
"Invalid source rank. Source rank is the same as the current rank."
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
size_tensor = torch.empty(1, dtype=torch.long, device="cpu")
|
|
678
|
+
|
|
679
|
+
# Receive object size
|
|
680
|
+
rank_size = torch.distributed.recv(
|
|
681
|
+
size_tensor, src=self.ranks[src], group=self.cpu_group
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
# Tensor to receive serialized objects into.
|
|
685
|
+
object_tensor = torch.empty( # type: ignore[call-overload]
|
|
686
|
+
size_tensor.item(), # type: ignore[arg-type]
|
|
687
|
+
dtype=torch.uint8,
|
|
688
|
+
device="cpu",
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
rank_object = torch.distributed.recv(
|
|
692
|
+
object_tensor, src=self.ranks[src], group=self.cpu_group
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
assert rank_object == rank_size, (
|
|
696
|
+
"Received object sender rank does not match the size sender rank."
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
obj = pickle.loads(object_tensor.numpy().tobytes())
|
|
700
|
+
|
|
701
|
+
return obj
|
|
702
|
+
|
|
703
|
+
def broadcast_tensor_dict(
|
|
704
|
+
self,
|
|
705
|
+
tensor_dict: dict[str, torch.Tensor | Any] | None = None,
|
|
706
|
+
src: int = 0,
|
|
707
|
+
group: ProcessGroup | None = None,
|
|
708
|
+
metadata_group: ProcessGroup | None = None,
|
|
709
|
+
) -> dict[str, torch.Tensor | Any] | None:
|
|
710
|
+
"""Broadcast the input tensor dictionary.
|
|
711
|
+
NOTE: `src` is the local rank of the source rank.
|
|
712
|
+
"""
|
|
713
|
+
# Bypass the function if we are using only 1 GPU.
|
|
714
|
+
if not torch.distributed.is_initialized() or self.world_size == 1:
|
|
715
|
+
return tensor_dict
|
|
716
|
+
|
|
717
|
+
group = self.device_group
|
|
718
|
+
metadata_group = self.cpu_group
|
|
719
|
+
assert src < self.world_size, f"Invalid src rank ({src})"
|
|
720
|
+
|
|
721
|
+
rank_in_group = self.rank_in_group
|
|
722
|
+
if rank_in_group == src:
|
|
723
|
+
metadata_list: list[tuple[Any, Any]] = []
|
|
724
|
+
assert isinstance(tensor_dict, dict), (
|
|
725
|
+
f"Expecting a dictionary, got {type(tensor_dict)}"
|
|
726
|
+
)
|
|
727
|
+
metadata_list, tensor_list = _split_tensor_dict(tensor_dict)
|
|
728
|
+
# `metadata_list` lives in CPU memory.
|
|
729
|
+
# `broadcast_object_list` has serialization & deserialization,
|
|
730
|
+
# all happening on CPU. Therefore, we can use the CPU group.
|
|
731
|
+
self.broadcast_object(metadata_list, src=src)
|
|
732
|
+
async_handles = []
|
|
733
|
+
for tensor in tensor_list:
|
|
734
|
+
if tensor.numel() == 0:
|
|
735
|
+
# Skip broadcasting empty tensors.
|
|
736
|
+
continue
|
|
737
|
+
if tensor.is_cpu:
|
|
738
|
+
# use metadata_group for CPU tensors
|
|
739
|
+
handle = torch.distributed.broadcast(
|
|
740
|
+
tensor, src=self.ranks[src], group=metadata_group, async_op=True
|
|
741
|
+
)
|
|
742
|
+
else:
|
|
743
|
+
# use group for GPU tensors
|
|
744
|
+
handle = torch.distributed.broadcast(
|
|
745
|
+
tensor, src=self.ranks[src], group=group, async_op=True
|
|
746
|
+
)
|
|
747
|
+
async_handles.append(handle)
|
|
748
|
+
for async_handle in async_handles:
|
|
749
|
+
async_handle.wait()
|
|
750
|
+
|
|
751
|
+
else:
|
|
752
|
+
metadata_list = self.broadcast_object(None, src=src)
|
|
753
|
+
tensor_dict = {}
|
|
754
|
+
async_handles = []
|
|
755
|
+
for key, value in metadata_list:
|
|
756
|
+
if isinstance(value, TensorMetadata):
|
|
757
|
+
tensor = torch.empty(
|
|
758
|
+
value.size, dtype=value.dtype, device=value.device
|
|
759
|
+
)
|
|
760
|
+
if tensor.numel() == 0:
|
|
761
|
+
# Skip broadcasting empty tensors.
|
|
762
|
+
tensor_dict[key] = tensor
|
|
763
|
+
continue
|
|
764
|
+
if tensor.is_cpu:
|
|
765
|
+
# use metadata_group for CPU tensors
|
|
766
|
+
handle = torch.distributed.broadcast(
|
|
767
|
+
tensor,
|
|
768
|
+
src=self.ranks[src],
|
|
769
|
+
group=metadata_group,
|
|
770
|
+
async_op=True,
|
|
771
|
+
)
|
|
772
|
+
else:
|
|
773
|
+
# use group for GPU tensors
|
|
774
|
+
handle = torch.distributed.broadcast(
|
|
775
|
+
tensor, src=self.ranks[src], group=group, async_op=True
|
|
776
|
+
)
|
|
777
|
+
async_handles.append(handle)
|
|
778
|
+
tensor_dict[key] = tensor
|
|
779
|
+
else:
|
|
780
|
+
tensor_dict[key] = value
|
|
781
|
+
for async_handle in async_handles:
|
|
782
|
+
async_handle.wait()
|
|
783
|
+
return tensor_dict
|
|
784
|
+
|
|
785
|
+
def send_tensor_dict(
|
|
786
|
+
self,
|
|
787
|
+
tensor_dict: dict[str, torch.Tensor | Any],
|
|
788
|
+
dst: int | None = None,
|
|
789
|
+
all_gather_group: Optional["GroupCoordinator"] = None,
|
|
790
|
+
all_gather_tensors: dict[str, bool] | None = None,
|
|
791
|
+
) -> dict[str, torch.Tensor | Any] | None:
|
|
792
|
+
"""Send the input tensor dictionary.
|
|
793
|
+
NOTE: `dst` is the local rank of the source rank.
|
|
794
|
+
|
|
795
|
+
all_gather_group: The group for the all-gather operation. If provided,
|
|
796
|
+
an optimization is enabled where each rank in the group sends a
|
|
797
|
+
slice of a tensor and the receiver reconstructs it using an
|
|
798
|
+
all-gather, which can improve performance. This is typically the
|
|
799
|
+
tensor-parallel group.
|
|
800
|
+
all_gather_tensors: A dictionary to specify which tensors should use
|
|
801
|
+
the all-gather optimization, which is only effective when
|
|
802
|
+
`all_gather_group` is provided. By default, this optimization is
|
|
803
|
+
on for any tensor whose size is divisible by the
|
|
804
|
+
`all_gather_group`'s world size. However, it should be disabled
|
|
805
|
+
for tensors that are not fully replicated across the group (e.g.,
|
|
806
|
+
the residual tensor when sequence parallelism is enabled). This
|
|
807
|
+
dictionary allows overriding the default behavior on a per-tensor
|
|
808
|
+
basis.
|
|
809
|
+
"""
|
|
810
|
+
# Bypass the function if we are using only 1 GPU.
|
|
811
|
+
if not torch.distributed.is_initialized() or self.world_size == 1:
|
|
812
|
+
return tensor_dict
|
|
813
|
+
all_gather_size = 1 if all_gather_group is None else all_gather_group.world_size
|
|
814
|
+
all_gather_rank = (
|
|
815
|
+
0 if all_gather_group is None else all_gather_group.rank_in_group
|
|
816
|
+
)
|
|
817
|
+
|
|
818
|
+
group = self.device_group
|
|
819
|
+
metadata_group = self.cpu_group
|
|
820
|
+
|
|
821
|
+
if dst is None:
|
|
822
|
+
dst = (self.rank_in_group + 1) % self.world_size
|
|
823
|
+
assert dst < self.world_size, f"Invalid dst rank ({dst})"
|
|
824
|
+
|
|
825
|
+
if self.use_cpu_custom_send_recv:
|
|
826
|
+
if self.device_communicator is None:
|
|
827
|
+
raise ValueError("No device communicator found")
|
|
828
|
+
self.device_communicator.send_tensor_dict( # type: ignore
|
|
829
|
+
tensor_dict, dst
|
|
830
|
+
)
|
|
831
|
+
return None
|
|
832
|
+
|
|
833
|
+
metadata_list: list[tuple[Any, Any]] = []
|
|
834
|
+
assert isinstance(tensor_dict, dict), (
|
|
835
|
+
f"Expecting a dictionary, got {type(tensor_dict)}"
|
|
836
|
+
)
|
|
837
|
+
metadata_list, tensor_list = _split_tensor_dict(tensor_dict)
|
|
838
|
+
# `metadata_list` lives in CPU memory.
|
|
839
|
+
# `send_object_list` has serialization & deserialization,
|
|
840
|
+
# all happening on CPU. Therefore, we can use the CPU group.
|
|
841
|
+
self.send_object(metadata_list, dst=dst)
|
|
842
|
+
|
|
843
|
+
tensor_keys = [k for k, v in tensor_dict.items() if isinstance(v, torch.Tensor)]
|
|
844
|
+
assert len(tensor_keys) == len(tensor_list)
|
|
845
|
+
|
|
846
|
+
for key, tensor in zip(tensor_keys, tensor_list):
|
|
847
|
+
if tensor.numel() == 0:
|
|
848
|
+
# Skip sending empty tensors.
|
|
849
|
+
continue
|
|
850
|
+
|
|
851
|
+
# send-allgather: send only a slice, then do allgather.
|
|
852
|
+
use_all_gather = (
|
|
853
|
+
all_gather_group is not None and tensor.numel() % all_gather_size == 0
|
|
854
|
+
)
|
|
855
|
+
use_all_gather = (
|
|
856
|
+
all_gather_tensors.get(key, use_all_gather)
|
|
857
|
+
if all_gather_tensors
|
|
858
|
+
else use_all_gather
|
|
859
|
+
)
|
|
860
|
+
if use_all_gather:
|
|
861
|
+
tensor = tensor.reshape(all_gather_size, -1)[all_gather_rank]
|
|
862
|
+
|
|
863
|
+
if tensor.is_cpu:
|
|
864
|
+
# use metadata_group for CPU tensors
|
|
865
|
+
torch.distributed.send(
|
|
866
|
+
tensor, dst=self.ranks[dst], group=metadata_group
|
|
867
|
+
)
|
|
868
|
+
else:
|
|
869
|
+
# use group for GPU tensors
|
|
870
|
+
torch.distributed.send(tensor, dst=self.ranks[dst], group=group)
|
|
871
|
+
return None
|
|
872
|
+
|
|
873
|
+
def recv_tensor_dict(
|
|
874
|
+
self,
|
|
875
|
+
src: int | None = None,
|
|
876
|
+
all_gather_group: Optional["GroupCoordinator"] = None,
|
|
877
|
+
all_gather_tensors: dict[str, bool] | None = None,
|
|
878
|
+
) -> dict[str, torch.Tensor | Any] | None:
|
|
879
|
+
"""Recv the input tensor dictionary.
|
|
880
|
+
NOTE: `src` is the local rank of the source rank.
|
|
881
|
+
|
|
882
|
+
all_gather_group: The group for the all-gather operation. If provided,
|
|
883
|
+
an optimization is enabled where each rank in the group sends a
|
|
884
|
+
slice of a tensor and the receiver reconstructs it using an
|
|
885
|
+
all-gather, which can improve performance. This is typically the
|
|
886
|
+
tensor-parallel group.
|
|
887
|
+
all_gather_tensors: A dictionary to specify which tensors should use
|
|
888
|
+
the all-gather optimization, which is only effective when
|
|
889
|
+
`all_gather_group` is provided. By default, this optimization is
|
|
890
|
+
on for any tensor whose size is divisible by the
|
|
891
|
+
`all_gather_group`'s world size. However, it should be disabled
|
|
892
|
+
for tensors that are not fully replicated across the group (e.g.,
|
|
893
|
+
the residual tensor when sequence parallelism is enabled). This
|
|
894
|
+
dictionary allows overriding the default behavior on a per-tensor
|
|
895
|
+
basis.
|
|
896
|
+
"""
|
|
897
|
+
# Bypass the function if we are using only 1 GPU.
|
|
898
|
+
if not torch.distributed.is_initialized() or self.world_size == 1:
|
|
899
|
+
return None
|
|
900
|
+
all_gather_size = 1 if all_gather_group is None else all_gather_group.world_size
|
|
901
|
+
all_gather_rank = (
|
|
902
|
+
0 if all_gather_group is None else all_gather_group.rank_in_group
|
|
903
|
+
)
|
|
904
|
+
|
|
905
|
+
group = self.device_group
|
|
906
|
+
metadata_group = self.cpu_group
|
|
907
|
+
|
|
908
|
+
if src is None:
|
|
909
|
+
src = (self.rank_in_group - 1) % self.world_size
|
|
910
|
+
assert src < self.world_size, f"Invalid src rank ({src})"
|
|
911
|
+
|
|
912
|
+
if self.use_cpu_custom_send_recv:
|
|
913
|
+
if self.device_communicator is None:
|
|
914
|
+
raise ValueError("No device communicator found")
|
|
915
|
+
return self.device_communicator.recv_tensor_dict( # type: ignore
|
|
916
|
+
src
|
|
917
|
+
)
|
|
918
|
+
|
|
919
|
+
recv_metadata_list = self.recv_object(src=src)
|
|
920
|
+
tensor_dict: dict[str, Any] = {}
|
|
921
|
+
for key, value in recv_metadata_list:
|
|
922
|
+
if isinstance(value, TensorMetadata):
|
|
923
|
+
tensor = torch.empty(value.size, dtype=value.dtype, device=value.device)
|
|
924
|
+
if tensor.numel() == 0:
|
|
925
|
+
# Skip broadcasting empty tensors.
|
|
926
|
+
tensor_dict[key] = tensor
|
|
927
|
+
continue
|
|
928
|
+
|
|
929
|
+
# send-allgather: send only a slice, then do allgather.
|
|
930
|
+
use_all_gather = (
|
|
931
|
+
all_gather_group is not None
|
|
932
|
+
and tensor.numel() % all_gather_size == 0
|
|
933
|
+
)
|
|
934
|
+
use_all_gather = (
|
|
935
|
+
all_gather_tensors.get(key, use_all_gather)
|
|
936
|
+
if all_gather_tensors
|
|
937
|
+
else use_all_gather
|
|
938
|
+
)
|
|
939
|
+
|
|
940
|
+
if use_all_gather:
|
|
941
|
+
orig_shape = tensor.shape
|
|
942
|
+
tensor = tensor.reshape(all_gather_size, -1)[all_gather_rank]
|
|
943
|
+
|
|
944
|
+
if tensor.is_cpu:
|
|
945
|
+
# use metadata_group for CPU tensors
|
|
946
|
+
torch.distributed.recv(
|
|
947
|
+
tensor, src=self.ranks[src], group=metadata_group
|
|
948
|
+
)
|
|
949
|
+
else:
|
|
950
|
+
# use group for GPU tensors
|
|
951
|
+
torch.distributed.recv(tensor, src=self.ranks[src], group=group)
|
|
952
|
+
if use_all_gather:
|
|
953
|
+
# do the allgather
|
|
954
|
+
tensor = all_gather_group.all_gather( # type: ignore
|
|
955
|
+
tensor, dim=0
|
|
956
|
+
)
|
|
957
|
+
tensor = tensor.reshape(orig_shape)
|
|
958
|
+
|
|
959
|
+
tensor_dict[key] = tensor
|
|
960
|
+
else:
|
|
961
|
+
tensor_dict[key] = value
|
|
962
|
+
return tensor_dict
|
|
963
|
+
|
|
964
|
+
def barrier(self):
|
|
965
|
+
"""Barrier synchronization among the group.
|
|
966
|
+
NOTE: don't use `device_group` here! `barrier` in NCCL is
|
|
967
|
+
terrible because it is internally a broadcast operation with
|
|
968
|
+
secretly created GPU tensors. It is easy to mess up the current
|
|
969
|
+
device. Use the CPU group instead.
|
|
970
|
+
"""
|
|
971
|
+
torch.distributed.barrier(group=self.cpu_group)
|
|
972
|
+
|
|
973
|
+
def send(self, tensor: torch.Tensor, dst: int | None = None) -> None:
|
|
974
|
+
"""Sends a tensor to the destination rank in a blocking way"""
|
|
975
|
+
"""NOTE: `dst` is the local rank of the destination rank."""
|
|
976
|
+
if self.device_communicator is None:
|
|
977
|
+
raise ValueError("No device communicator found")
|
|
978
|
+
self.device_communicator.send(tensor, dst)
|
|
979
|
+
|
|
980
|
+
def recv(
|
|
981
|
+
self, size: torch.Size, dtype: torch.dtype, src: int | None = None
|
|
982
|
+
) -> torch.Tensor:
|
|
983
|
+
"""Receives a tensor from the source rank."""
|
|
984
|
+
"""NOTE: `src` is the local rank of the source rank."""
|
|
985
|
+
if self.device_communicator is None:
|
|
986
|
+
raise ValueError("No device communicator found")
|
|
987
|
+
return self.device_communicator.recv(size, dtype, src)
|
|
988
|
+
|
|
989
|
+
def destroy(self):
|
|
990
|
+
if hasattr(self, "device_group"):
|
|
991
|
+
torch.distributed.destroy_process_group(self.device_group)
|
|
992
|
+
del self.device_group
|
|
993
|
+
if hasattr(self, "cpu_group"):
|
|
994
|
+
torch.distributed.destroy_process_group(self.cpu_group)
|
|
995
|
+
del self.cpu_group
|
|
996
|
+
if self.device_communicator is not None:
|
|
997
|
+
self.device_communicator.destroy()
|
|
998
|
+
if self.mq_broadcaster is not None:
|
|
999
|
+
self.mq_broadcaster = None
|
|
1000
|
+
|
|
1001
|
+
def prepare_communication_buffer_for_model(self, model: torch.nn.Module):
|
|
1002
|
+
if self.device_communicator is not None:
|
|
1003
|
+
self.device_communicator.prepare_communication_buffer_for_model(model)
|
|
1004
|
+
|
|
1005
|
+
def dispatch(
|
|
1006
|
+
self,
|
|
1007
|
+
hidden_states: torch.Tensor,
|
|
1008
|
+
router_logits: torch.Tensor,
|
|
1009
|
+
is_sequence_parallel: bool = False,
|
|
1010
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
1011
|
+
if self.device_communicator is not None:
|
|
1012
|
+
return self.device_communicator.dispatch(
|
|
1013
|
+
hidden_states, router_logits, is_sequence_parallel
|
|
1014
|
+
)
|
|
1015
|
+
else:
|
|
1016
|
+
return hidden_states, router_logits
|
|
1017
|
+
|
|
1018
|
+
def combine(
|
|
1019
|
+
self, hidden_states, is_sequence_parallel: bool = False
|
|
1020
|
+
) -> torch.Tensor:
|
|
1021
|
+
if self.device_communicator is not None:
|
|
1022
|
+
return self.device_communicator.combine(hidden_states, is_sequence_parallel)
|
|
1023
|
+
else:
|
|
1024
|
+
return hidden_states
|
|
1025
|
+
|
|
1026
|
+
|
|
1027
|
+
_WORLD: GroupCoordinator | None = None
|
|
1028
|
+
_INNER_DP_WORLD: GroupCoordinator | None = None
|
|
1029
|
+
_NODE_COUNT: int | None = None
|
|
1030
|
+
|
|
1031
|
+
|
|
1032
|
+
def get_world_group() -> GroupCoordinator:
|
|
1033
|
+
assert _WORLD is not None, "world group is not initialized"
|
|
1034
|
+
return _WORLD
|
|
1035
|
+
|
|
1036
|
+
|
|
1037
|
+
def get_inner_dp_world_group() -> GroupCoordinator:
|
|
1038
|
+
assert _INNER_DP_WORLD is not None, "inner dp world group is not initialized"
|
|
1039
|
+
return _INNER_DP_WORLD
|
|
1040
|
+
|
|
1041
|
+
|
|
1042
|
+
def init_world_group(
|
|
1043
|
+
ranks: list[int], local_rank: int, backend: str
|
|
1044
|
+
) -> GroupCoordinator:
|
|
1045
|
+
return GroupCoordinator(
|
|
1046
|
+
group_ranks=[ranks],
|
|
1047
|
+
local_rank=local_rank,
|
|
1048
|
+
torch_distributed_backend=backend,
|
|
1049
|
+
use_device_communicator=False,
|
|
1050
|
+
group_name="world",
|
|
1051
|
+
)
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
def init_model_parallel_group(
|
|
1055
|
+
group_ranks: list[list[int]],
|
|
1056
|
+
local_rank: int,
|
|
1057
|
+
backend: str,
|
|
1058
|
+
use_message_queue_broadcaster: bool = False,
|
|
1059
|
+
group_name: str | None = None,
|
|
1060
|
+
use_device_communicator: bool = True,
|
|
1061
|
+
) -> GroupCoordinator:
|
|
1062
|
+
return GroupCoordinator(
|
|
1063
|
+
group_ranks=group_ranks,
|
|
1064
|
+
local_rank=local_rank,
|
|
1065
|
+
torch_distributed_backend=backend,
|
|
1066
|
+
use_device_communicator=use_device_communicator,
|
|
1067
|
+
use_message_queue_broadcaster=use_message_queue_broadcaster,
|
|
1068
|
+
group_name=group_name,
|
|
1069
|
+
)
|
|
1070
|
+
|
|
1071
|
+
|
|
1072
|
+
_TP: GroupCoordinator | None = None
|
|
1073
|
+
|
|
1074
|
+
|
|
1075
|
+
def get_tp_group() -> GroupCoordinator:
|
|
1076
|
+
assert _TP is not None, "tensor model parallel group is not initialized"
|
|
1077
|
+
return _TP
|
|
1078
|
+
|
|
1079
|
+
|
|
1080
|
+
_DCP: GroupCoordinator | None = None
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
def get_dcp_group() -> GroupCoordinator:
|
|
1084
|
+
assert _DCP is not None, "decode context model parallel group is not initialized"
|
|
1085
|
+
return _DCP
|
|
1086
|
+
|
|
1087
|
+
|
|
1088
|
+
# kept for backward compatibility
|
|
1089
|
+
get_context_model_parallel_group = get_dcp_group
|
|
1090
|
+
|
|
1091
|
+
_PP: GroupCoordinator | None = None
|
|
1092
|
+
|
|
1093
|
+
|
|
1094
|
+
def get_pp_group() -> GroupCoordinator:
|
|
1095
|
+
assert _PP is not None, "pipeline model parallel group is not initialized"
|
|
1096
|
+
return _PP
|
|
1097
|
+
|
|
1098
|
+
|
|
1099
|
+
_DP: GroupCoordinator | None = None
|
|
1100
|
+
|
|
1101
|
+
|
|
1102
|
+
def get_dp_group() -> GroupCoordinator:
|
|
1103
|
+
assert _DP is not None, "data parallel group is not initialized"
|
|
1104
|
+
return _DP
|
|
1105
|
+
|
|
1106
|
+
|
|
1107
|
+
_EP: GroupCoordinator | None = None
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
def get_ep_group() -> GroupCoordinator:
|
|
1111
|
+
assert _EP is not None, "expert parallel group is not initialized"
|
|
1112
|
+
return _EP
|
|
1113
|
+
|
|
1114
|
+
|
|
1115
|
+
_PCP: GroupCoordinator | None = None
|
|
1116
|
+
|
|
1117
|
+
|
|
1118
|
+
def get_pcp_group() -> GroupCoordinator:
|
|
1119
|
+
assert _PCP is not None, "prefill context parallel group is not initialized"
|
|
1120
|
+
return _PCP
|
|
1121
|
+
|
|
1122
|
+
|
|
1123
|
+
@contextmanager
|
|
1124
|
+
def graph_capture(device: torch.device):
|
|
1125
|
+
"""
|
|
1126
|
+
`graph_capture` is a context manager which should surround the code that
|
|
1127
|
+
is capturing the CUDA graph. Its main purpose is to ensure that some
|
|
1128
|
+
operations will be run after the graph is captured, before the graph
|
|
1129
|
+
is replayed. It returns a `GraphCaptureContext` object which contains the
|
|
1130
|
+
necessary data for the graph capture. Currently, it only contains the
|
|
1131
|
+
stream that the graph capture is running on. This stream is set to the
|
|
1132
|
+
current CUDA stream when the context manager is entered and reset to the
|
|
1133
|
+
default stream when the context manager is exited. This is to ensure that
|
|
1134
|
+
the graph capture is running on a separate stream from the default stream,
|
|
1135
|
+
in order to explicitly distinguish the kernels to capture
|
|
1136
|
+
from other kernels possibly launched on background in the default stream.
|
|
1137
|
+
"""
|
|
1138
|
+
context = GraphCaptureContext(torch.cuda.Stream(device=device))
|
|
1139
|
+
with get_tp_group().graph_capture(context), get_pp_group().graph_capture(context):
|
|
1140
|
+
yield context
|
|
1141
|
+
|
|
1142
|
+
|
|
1143
|
+
logger = init_logger(__name__)
|
|
1144
|
+
|
|
1145
|
+
_ENABLE_CUSTOM_ALL_REDUCE = True
|
|
1146
|
+
|
|
1147
|
+
|
|
1148
|
+
def set_custom_all_reduce(enable: bool):
|
|
1149
|
+
global _ENABLE_CUSTOM_ALL_REDUCE
|
|
1150
|
+
_ENABLE_CUSTOM_ALL_REDUCE = enable
|
|
1151
|
+
|
|
1152
|
+
|
|
1153
|
+
def init_distributed_environment(
|
|
1154
|
+
world_size: int = -1,
|
|
1155
|
+
rank: int = -1,
|
|
1156
|
+
distributed_init_method: str = "env://",
|
|
1157
|
+
local_rank: int = -1,
|
|
1158
|
+
backend: str = "nccl",
|
|
1159
|
+
timeout: timedelta | None = None,
|
|
1160
|
+
):
|
|
1161
|
+
logger.debug(
|
|
1162
|
+
"world_size=%d rank=%d local_rank=%d distributed_init_method=%s backend=%s",
|
|
1163
|
+
world_size,
|
|
1164
|
+
rank,
|
|
1165
|
+
local_rank,
|
|
1166
|
+
distributed_init_method,
|
|
1167
|
+
backend,
|
|
1168
|
+
)
|
|
1169
|
+
from vllm.config import get_current_vllm_config
|
|
1170
|
+
|
|
1171
|
+
config = get_current_vllm_config()
|
|
1172
|
+
if config is not None and config.parallel_config.nnodes > 1:
|
|
1173
|
+
parallel_config = config.parallel_config
|
|
1174
|
+
ip = parallel_config.master_addr
|
|
1175
|
+
rank = parallel_config.data_parallel_rank * world_size + rank
|
|
1176
|
+
world_size = parallel_config.world_size_across_dp
|
|
1177
|
+
port = parallel_config.master_port
|
|
1178
|
+
distributed_init_method = get_distributed_init_method(ip, port)
|
|
1179
|
+
elif (
|
|
1180
|
+
config is not None
|
|
1181
|
+
and config.parallel_config.data_parallel_size > 1
|
|
1182
|
+
and config.parallel_config.distributed_executor_backend != "external_launcher"
|
|
1183
|
+
):
|
|
1184
|
+
parallel_config = config.parallel_config
|
|
1185
|
+
# adjust to take into account data parallelism
|
|
1186
|
+
# offset the rank by the data parallel rank
|
|
1187
|
+
rank = parallel_config.data_parallel_rank * world_size + rank
|
|
1188
|
+
# adjust the world size to take into account data parallelism
|
|
1189
|
+
world_size = parallel_config.world_size_across_dp
|
|
1190
|
+
ip = parallel_config.data_parallel_master_ip
|
|
1191
|
+
port = parallel_config.get_next_dp_init_port()
|
|
1192
|
+
distributed_init_method = get_distributed_init_method(ip, port)
|
|
1193
|
+
logger.debug(
|
|
1194
|
+
"Adjusting world_size=%d rank=%d distributed_init_method=%s for DP",
|
|
1195
|
+
world_size,
|
|
1196
|
+
rank,
|
|
1197
|
+
distributed_init_method,
|
|
1198
|
+
)
|
|
1199
|
+
if not torch.distributed.is_initialized():
|
|
1200
|
+
logger.info(
|
|
1201
|
+
"world_size=%d rank=%d local_rank=%d distributed_init_method=%s backend=%s",
|
|
1202
|
+
world_size,
|
|
1203
|
+
rank,
|
|
1204
|
+
local_rank,
|
|
1205
|
+
distributed_init_method,
|
|
1206
|
+
backend,
|
|
1207
|
+
)
|
|
1208
|
+
assert distributed_init_method is not None, (
|
|
1209
|
+
"distributed_init_method must be provided when initializing "
|
|
1210
|
+
"distributed environment"
|
|
1211
|
+
)
|
|
1212
|
+
if not torch.distributed.is_backend_available(backend):
|
|
1213
|
+
logger.warning(
|
|
1214
|
+
"Distributed backend %s is not available; falling back to gloo.",
|
|
1215
|
+
backend,
|
|
1216
|
+
)
|
|
1217
|
+
assert torch.distributed.is_gloo_available(), (
|
|
1218
|
+
"Fallback Gloo backend is not available."
|
|
1219
|
+
)
|
|
1220
|
+
backend = "gloo"
|
|
1221
|
+
# this backend is used for WORLD
|
|
1222
|
+
torch.distributed.init_process_group(
|
|
1223
|
+
backend=backend,
|
|
1224
|
+
init_method=distributed_init_method,
|
|
1225
|
+
world_size=world_size,
|
|
1226
|
+
rank=rank,
|
|
1227
|
+
timeout=timeout,
|
|
1228
|
+
)
|
|
1229
|
+
# set the local rank
|
|
1230
|
+
# local_rank is not available in torch ProcessGroup,
|
|
1231
|
+
# see https://github.com/pytorch/pytorch/issues/122816
|
|
1232
|
+
if local_rank == -1:
|
|
1233
|
+
# local rank not set, this usually happens in single-node
|
|
1234
|
+
# setting, where we can use rank as local rank
|
|
1235
|
+
local_rank = envs.LOCAL_RANK if distributed_init_method == "env://" else rank
|
|
1236
|
+
global _WORLD, _NODE_COUNT, _INNER_DP_WORLD
|
|
1237
|
+
if _WORLD is None:
|
|
1238
|
+
ranks = list(range(torch.distributed.get_world_size()))
|
|
1239
|
+
_WORLD = init_world_group(ranks, local_rank, backend)
|
|
1240
|
+
if config.parallel_config.nnodes > 1:
|
|
1241
|
+
_NODE_COUNT = config.parallel_config.nnodes
|
|
1242
|
+
else:
|
|
1243
|
+
_NODE_COUNT = _node_count(_WORLD.cpu_group)
|
|
1244
|
+
logger.debug("Detected %d nodes in the distributed environment", _NODE_COUNT)
|
|
1245
|
+
else:
|
|
1246
|
+
assert _WORLD.world_size == torch.distributed.get_world_size(), (
|
|
1247
|
+
"world group already initialized with a different world size"
|
|
1248
|
+
)
|
|
1249
|
+
if config.parallel_config.nnodes_within_dp > 1:
|
|
1250
|
+
if parallel_config.data_parallel_size > 1:
|
|
1251
|
+
world_size_inner_dp = parallel_config.world_size
|
|
1252
|
+
group_ranks = [
|
|
1253
|
+
[dp_rank * world_size_inner_dp + i for i in range(world_size_inner_dp)]
|
|
1254
|
+
for dp_rank in range(parallel_config.data_parallel_size)
|
|
1255
|
+
]
|
|
1256
|
+
_INNER_DP_WORLD = init_model_parallel_group(
|
|
1257
|
+
group_ranks,
|
|
1258
|
+
get_world_group().local_rank,
|
|
1259
|
+
backend,
|
|
1260
|
+
use_message_queue_broadcaster=True,
|
|
1261
|
+
group_name="inner_dp_world",
|
|
1262
|
+
use_device_communicator=False,
|
|
1263
|
+
)
|
|
1264
|
+
else:
|
|
1265
|
+
_INNER_DP_WORLD = _WORLD
|
|
1266
|
+
|
|
1267
|
+
|
|
1268
|
+
def initialize_model_parallel(
|
|
1269
|
+
tensor_model_parallel_size: int = 1,
|
|
1270
|
+
pipeline_model_parallel_size: int = 1,
|
|
1271
|
+
prefill_context_model_parallel_size: int = 1,
|
|
1272
|
+
decode_context_model_parallel_size: int | None = 1,
|
|
1273
|
+
backend: str | None = None,
|
|
1274
|
+
) -> None:
|
|
1275
|
+
"""
|
|
1276
|
+
Initialize model parallel groups.
|
|
1277
|
+
|
|
1278
|
+
Arguments:
|
|
1279
|
+
tensor_model_parallel_size: number of GPUs used for tensor model
|
|
1280
|
+
parallelism.
|
|
1281
|
+
pipeline_model_parallel_size: number of GPUs used for pipeline model
|
|
1282
|
+
parallelism.
|
|
1283
|
+
backend: name of torch distributed communication backend.
|
|
1284
|
+
|
|
1285
|
+
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
|
|
1286
|
+
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
|
|
1287
|
+
the model pipeline. The present function will
|
|
1288
|
+
create 4 tensor model-parallel groups and 2 pipeline model-parallel groups:
|
|
1289
|
+
4 tensor model-parallel groups:
|
|
1290
|
+
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
|
|
1291
|
+
2 pipeline model-parallel groups:
|
|
1292
|
+
[g0, g2, g4, g6], [g1, g3, g5, g7]
|
|
1293
|
+
Note that for efficiency, the caller should make sure adjacent ranks
|
|
1294
|
+
are on the same DGX box. For example if we are using 2 DGX-1 boxes
|
|
1295
|
+
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
|
|
1296
|
+
ranks 8 to 15 belong to the second box.
|
|
1297
|
+
"""
|
|
1298
|
+
# Get world size and rank. Ensure some consistencies.
|
|
1299
|
+
assert torch.distributed.is_initialized()
|
|
1300
|
+
world_size: int = torch.distributed.get_world_size()
|
|
1301
|
+
rank = torch.distributed.get_rank()
|
|
1302
|
+
backend = backend or torch.distributed.get_backend(get_world_group().device_group)
|
|
1303
|
+
|
|
1304
|
+
data_parallel_size = 1
|
|
1305
|
+
from vllm.config import get_current_vllm_config
|
|
1306
|
+
|
|
1307
|
+
config = get_current_vllm_config()
|
|
1308
|
+
if config is not None:
|
|
1309
|
+
data_parallel_size = config.parallel_config.data_parallel_size
|
|
1310
|
+
|
|
1311
|
+
# the layout order is: ExternalDP x DP x PP x TP
|
|
1312
|
+
# ExternalDP is the data parallel group that is not part of the model,
|
|
1313
|
+
# every dp rank can generate independently (in verl integration).
|
|
1314
|
+
# DP is the data parallel group that is part of the model,
|
|
1315
|
+
# all the ranks in the same DP group should generate simultaneously,
|
|
1316
|
+
# i.e. the `generate` call in the same DP group should be called together,
|
|
1317
|
+
# otherwise it will cause deadlock.
|
|
1318
|
+
# to get group_ranks for each dimension, transpose that dimension to the
|
|
1319
|
+
# last dimension, then reshape to 2D, then unbind the last dimension
|
|
1320
|
+
all_ranks = torch.arange(world_size).reshape(
|
|
1321
|
+
-1,
|
|
1322
|
+
data_parallel_size,
|
|
1323
|
+
pipeline_model_parallel_size,
|
|
1324
|
+
prefill_context_model_parallel_size,
|
|
1325
|
+
tensor_model_parallel_size,
|
|
1326
|
+
) # noqa
|
|
1327
|
+
|
|
1328
|
+
# Build the tensor model-parallel groups.
|
|
1329
|
+
global _TP
|
|
1330
|
+
assert _TP is None, "tensor model parallel group is already initialized"
|
|
1331
|
+
group_ranks = all_ranks.view(-1, tensor_model_parallel_size).unbind(0)
|
|
1332
|
+
group_ranks = [x.tolist() for x in group_ranks]
|
|
1333
|
+
|
|
1334
|
+
# message queue broadcaster is only used in tensor model parallel group
|
|
1335
|
+
_TP = init_model_parallel_group(
|
|
1336
|
+
group_ranks,
|
|
1337
|
+
get_world_group().local_rank,
|
|
1338
|
+
backend,
|
|
1339
|
+
use_message_queue_broadcaster=True,
|
|
1340
|
+
group_name="tp",
|
|
1341
|
+
)
|
|
1342
|
+
|
|
1343
|
+
# Build the DCP model-parallel groups.
|
|
1344
|
+
global _DCP
|
|
1345
|
+
assert _DCP is None, "decode context model parallel group is already initialized"
|
|
1346
|
+
# Note(hc): In the current implementation of decode context parallel,
|
|
1347
|
+
# dcp_size must not exceed tp_size, because the world size does not
|
|
1348
|
+
# change by DCP, it simply reuses the GPUs of TP group, and split one
|
|
1349
|
+
# TP group into tp_size//dcp_size DCP groups.
|
|
1350
|
+
group_ranks = all_ranks.reshape(-1, decode_context_model_parallel_size).unbind(0)
|
|
1351
|
+
group_ranks = [x.tolist() for x in group_ranks]
|
|
1352
|
+
_DCP = init_model_parallel_group(
|
|
1353
|
+
group_ranks,
|
|
1354
|
+
get_world_group().local_rank,
|
|
1355
|
+
backend,
|
|
1356
|
+
use_message_queue_broadcaster=True,
|
|
1357
|
+
group_name="dcp",
|
|
1358
|
+
)
|
|
1359
|
+
|
|
1360
|
+
global _PCP
|
|
1361
|
+
assert _PCP is None, "prefill context parallel group is already initialized"
|
|
1362
|
+
group_ranks = (
|
|
1363
|
+
all_ranks.transpose(3, 4)
|
|
1364
|
+
.reshape(-1, prefill_context_model_parallel_size)
|
|
1365
|
+
.unbind(0)
|
|
1366
|
+
)
|
|
1367
|
+
group_ranks = [x.tolist() for x in group_ranks]
|
|
1368
|
+
_PCP = init_model_parallel_group(
|
|
1369
|
+
group_ranks, get_world_group().local_rank, backend, group_name="pcp"
|
|
1370
|
+
)
|
|
1371
|
+
|
|
1372
|
+
# Build the pipeline model-parallel groups.
|
|
1373
|
+
global _PP
|
|
1374
|
+
assert _PP is None, "pipeline model parallel group is already initialized"
|
|
1375
|
+
group_ranks = (
|
|
1376
|
+
all_ranks.transpose(2, 4).reshape(-1, pipeline_model_parallel_size).unbind(0)
|
|
1377
|
+
)
|
|
1378
|
+
group_ranks = [x.tolist() for x in group_ranks]
|
|
1379
|
+
_PP = init_model_parallel_group(
|
|
1380
|
+
group_ranks, get_world_group().local_rank, backend, group_name="pp"
|
|
1381
|
+
)
|
|
1382
|
+
|
|
1383
|
+
global _DP
|
|
1384
|
+
assert _DP is None, "data parallel group is already initialized"
|
|
1385
|
+
group_ranks = all_ranks.transpose(1, 4).reshape(-1, data_parallel_size).unbind(0)
|
|
1386
|
+
group_ranks = [x.tolist() for x in group_ranks]
|
|
1387
|
+
_DP = init_model_parallel_group(
|
|
1388
|
+
group_ranks, get_world_group().local_rank, backend, group_name="dp"
|
|
1389
|
+
)
|
|
1390
|
+
|
|
1391
|
+
global _EP
|
|
1392
|
+
assert _EP is None, "expert parallel group is already initialized"
|
|
1393
|
+
group_ranks = (
|
|
1394
|
+
all_ranks.transpose(1, 2)
|
|
1395
|
+
.reshape(
|
|
1396
|
+
-1,
|
|
1397
|
+
data_parallel_size
|
|
1398
|
+
* prefill_context_model_parallel_size
|
|
1399
|
+
* tensor_model_parallel_size,
|
|
1400
|
+
)
|
|
1401
|
+
.unbind(0)
|
|
1402
|
+
)
|
|
1403
|
+
group_ranks = [x.tolist() for x in group_ranks]
|
|
1404
|
+
_EP = init_model_parallel_group(
|
|
1405
|
+
group_ranks, get_world_group().local_rank, backend, group_name="ep"
|
|
1406
|
+
)
|
|
1407
|
+
|
|
1408
|
+
logger.info_once(
|
|
1409
|
+
"rank %s in world size %s is assigned as "
|
|
1410
|
+
"DP rank %s, PP rank %s, PCP rank %s, "
|
|
1411
|
+
"TP rank %s, EP rank %s",
|
|
1412
|
+
rank,
|
|
1413
|
+
world_size,
|
|
1414
|
+
_DP.rank_in_group,
|
|
1415
|
+
_PP.rank_in_group,
|
|
1416
|
+
_PCP.rank_in_group,
|
|
1417
|
+
_TP.rank_in_group,
|
|
1418
|
+
_EP.rank_in_group,
|
|
1419
|
+
)
|
|
1420
|
+
|
|
1421
|
+
|
|
1422
|
+
def ensure_model_parallel_initialized(
|
|
1423
|
+
tensor_model_parallel_size: int,
|
|
1424
|
+
pipeline_model_parallel_size: int,
|
|
1425
|
+
prefill_context_model_parallel_size: int = 1,
|
|
1426
|
+
decode_context_model_parallel_size: int | None = 1,
|
|
1427
|
+
backend: str | None = None,
|
|
1428
|
+
) -> None:
|
|
1429
|
+
"""Helper to initialize model parallel groups if they are not initialized,
|
|
1430
|
+
or ensure tensor-parallel and pipeline-parallel sizes are equal to expected
|
|
1431
|
+
values if the model parallel groups are initialized.
|
|
1432
|
+
"""
|
|
1433
|
+
backend = backend or torch.distributed.get_backend(get_world_group().device_group)
|
|
1434
|
+
if not model_parallel_is_initialized():
|
|
1435
|
+
initialize_model_parallel(
|
|
1436
|
+
tensor_model_parallel_size,
|
|
1437
|
+
pipeline_model_parallel_size,
|
|
1438
|
+
prefill_context_model_parallel_size,
|
|
1439
|
+
decode_context_model_parallel_size,
|
|
1440
|
+
backend,
|
|
1441
|
+
)
|
|
1442
|
+
return
|
|
1443
|
+
|
|
1444
|
+
assert get_tensor_model_parallel_world_size() == tensor_model_parallel_size, (
|
|
1445
|
+
"tensor parallel group already initialized, but of unexpected size. "
|
|
1446
|
+
f"got: {get_tensor_model_parallel_world_size()=} vs. "
|
|
1447
|
+
f"wanted: {tensor_model_parallel_size=}"
|
|
1448
|
+
)
|
|
1449
|
+
pp_world_size = get_pp_group().world_size
|
|
1450
|
+
assert pp_world_size == pipeline_model_parallel_size, (
|
|
1451
|
+
"pipeline parallel group already initialized, but of unexpected size. "
|
|
1452
|
+
f"got: {pp_world_size=} vs. "
|
|
1453
|
+
f"wanted: {pipeline_model_parallel_size=}"
|
|
1454
|
+
)
|
|
1455
|
+
pcp_world_size = get_pcp_group().world_size
|
|
1456
|
+
assert pcp_world_size == prefill_context_model_parallel_size, (
|
|
1457
|
+
"prefill context parallel group already initialized, but of unexpected size: "
|
|
1458
|
+
f"{pcp_world_size=} vs. "
|
|
1459
|
+
f"{prefill_context_model_parallel_size=}"
|
|
1460
|
+
)
|
|
1461
|
+
|
|
1462
|
+
|
|
1463
|
+
def prepare_communication_buffer_for_model(model: torch.nn.Module):
|
|
1464
|
+
"""Prepare the communication buffer for the model.
|
|
1465
|
+
Traditional communication libraries like NCCL are almost
|
|
1466
|
+
model agnostic. However, emerging new communication libraries like
|
|
1467
|
+
MoE all2all (DeepEP) usually allocate the communication buffer
|
|
1468
|
+
based on the model shape for optimal performance.
|
|
1469
|
+
"""
|
|
1470
|
+
if _TP is not None:
|
|
1471
|
+
_TP.prepare_communication_buffer_for_model(model)
|
|
1472
|
+
if _PCP is not None:
|
|
1473
|
+
_PCP.prepare_communication_buffer_for_model(model)
|
|
1474
|
+
if _PP is not None:
|
|
1475
|
+
_PP.prepare_communication_buffer_for_model(model)
|
|
1476
|
+
if _DP is not None:
|
|
1477
|
+
_DP.prepare_communication_buffer_for_model(model)
|
|
1478
|
+
if _EP is not None:
|
|
1479
|
+
_EP.prepare_communication_buffer_for_model(model)
|
|
1480
|
+
|
|
1481
|
+
|
|
1482
|
+
def model_parallel_is_initialized():
|
|
1483
|
+
"""Check if tensor and pipeline parallel groups are initialized."""
|
|
1484
|
+
return _TP is not None and _PP is not None
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
_TP_STATE_PATCHED = False
|
|
1488
|
+
|
|
1489
|
+
|
|
1490
|
+
@contextmanager
|
|
1491
|
+
def patch_tensor_parallel_group(tp_group: GroupCoordinator):
|
|
1492
|
+
"""Patch the tp group temporarily until this function ends.
|
|
1493
|
+
|
|
1494
|
+
This method is for draft workers of speculative decoding to run draft model
|
|
1495
|
+
with different tp degree from that of target model workers.
|
|
1496
|
+
|
|
1497
|
+
Args:
|
|
1498
|
+
tp_group (GroupCoordinator): the tp group coordinator
|
|
1499
|
+
"""
|
|
1500
|
+
global _TP_STATE_PATCHED
|
|
1501
|
+
assert not _TP_STATE_PATCHED, "Should not call when it's already patched"
|
|
1502
|
+
|
|
1503
|
+
_TP_STATE_PATCHED = True
|
|
1504
|
+
old_tp_group = get_tp_group()
|
|
1505
|
+
global _TP
|
|
1506
|
+
_TP = tp_group
|
|
1507
|
+
try:
|
|
1508
|
+
yield
|
|
1509
|
+
finally:
|
|
1510
|
+
# restore the original state
|
|
1511
|
+
_TP_STATE_PATCHED = False
|
|
1512
|
+
_TP = old_tp_group
|
|
1513
|
+
|
|
1514
|
+
|
|
1515
|
+
def get_tensor_model_parallel_world_size():
|
|
1516
|
+
"""Return world size for the tensor model parallel group."""
|
|
1517
|
+
return get_tp_group().world_size
|
|
1518
|
+
|
|
1519
|
+
|
|
1520
|
+
def get_tensor_model_parallel_rank():
|
|
1521
|
+
"""Return my rank for the tensor model parallel group."""
|
|
1522
|
+
return get_tp_group().rank_in_group
|
|
1523
|
+
|
|
1524
|
+
|
|
1525
|
+
def get_decode_context_model_parallel_world_size():
|
|
1526
|
+
"""Return world size for the decode context model parallel group."""
|
|
1527
|
+
return get_dcp_group().world_size
|
|
1528
|
+
|
|
1529
|
+
|
|
1530
|
+
def get_decode_context_model_parallel_rank():
|
|
1531
|
+
"""Return my rank for the decode context model parallel group."""
|
|
1532
|
+
return get_dcp_group().rank_in_group
|
|
1533
|
+
|
|
1534
|
+
|
|
1535
|
+
def get_node_count() -> int:
|
|
1536
|
+
"""Return the total number of nodes in the distributed environment."""
|
|
1537
|
+
assert _NODE_COUNT is not None, "distributed environment is not initialized"
|
|
1538
|
+
return _NODE_COUNT
|
|
1539
|
+
|
|
1540
|
+
|
|
1541
|
+
def destroy_model_parallel():
|
|
1542
|
+
"""Set the groups to none and destroy them."""
|
|
1543
|
+
global _TP
|
|
1544
|
+
|
|
1545
|
+
if _TP:
|
|
1546
|
+
_TP.destroy()
|
|
1547
|
+
_TP = None
|
|
1548
|
+
|
|
1549
|
+
global _DCP
|
|
1550
|
+
if _DCP:
|
|
1551
|
+
_DCP.destroy()
|
|
1552
|
+
_DCP = None
|
|
1553
|
+
|
|
1554
|
+
global _PCP
|
|
1555
|
+
if _PCP:
|
|
1556
|
+
_PCP.destroy()
|
|
1557
|
+
_PCP = None
|
|
1558
|
+
|
|
1559
|
+
global _PP
|
|
1560
|
+
if _PP:
|
|
1561
|
+
_PP.destroy()
|
|
1562
|
+
_PP = None
|
|
1563
|
+
|
|
1564
|
+
global _DP
|
|
1565
|
+
if _DP:
|
|
1566
|
+
_DP.destroy()
|
|
1567
|
+
_DP = None
|
|
1568
|
+
|
|
1569
|
+
global _EP
|
|
1570
|
+
if _EP:
|
|
1571
|
+
_EP.destroy()
|
|
1572
|
+
_EP = None
|
|
1573
|
+
|
|
1574
|
+
|
|
1575
|
+
def destroy_distributed_environment():
|
|
1576
|
+
global _WORLD, _NODE_COUNT
|
|
1577
|
+
if _WORLD:
|
|
1578
|
+
_WORLD.destroy()
|
|
1579
|
+
_WORLD = None
|
|
1580
|
+
_NODE_COUNT = None
|
|
1581
|
+
if torch.distributed.is_initialized():
|
|
1582
|
+
torch.distributed.destroy_process_group()
|
|
1583
|
+
|
|
1584
|
+
|
|
1585
|
+
def cleanup_dist_env_and_memory(shutdown_ray: bool = False):
|
|
1586
|
+
# Ensure all objects are not frozen before cleanup
|
|
1587
|
+
gc.unfreeze()
|
|
1588
|
+
|
|
1589
|
+
destroy_model_parallel()
|
|
1590
|
+
destroy_distributed_environment()
|
|
1591
|
+
if shutdown_ray:
|
|
1592
|
+
import ray # Lazy import Ray
|
|
1593
|
+
|
|
1594
|
+
ray.shutdown()
|
|
1595
|
+
gc.collect()
|
|
1596
|
+
from vllm.platforms import current_platform
|
|
1597
|
+
|
|
1598
|
+
empty_cache = current_platform.empty_cache
|
|
1599
|
+
if empty_cache is not None:
|
|
1600
|
+
empty_cache()
|
|
1601
|
+
try:
|
|
1602
|
+
if not current_platform.is_cpu():
|
|
1603
|
+
torch._C._host_emptyCache()
|
|
1604
|
+
except AttributeError:
|
|
1605
|
+
logger.warning("torch._C._host_emptyCache() only available in Pytorch >=2.5")
|
|
1606
|
+
|
|
1607
|
+
|
|
1608
|
+
def in_the_same_node_as(
|
|
1609
|
+
pg: ProcessGroup | StatelessProcessGroup, source_rank: int = 0
|
|
1610
|
+
) -> list[bool]:
|
|
1611
|
+
"""
|
|
1612
|
+
This is a collective operation that returns if each rank is in the same node
|
|
1613
|
+
as the source rank. It tests if processes are attached to the same
|
|
1614
|
+
memory system (shared access to shared memory).
|
|
1615
|
+
"""
|
|
1616
|
+
if isinstance(pg, ProcessGroup):
|
|
1617
|
+
assert torch.distributed.get_backend(pg) != torch.distributed.Backend.NCCL, (
|
|
1618
|
+
"in_the_same_node_as should be tested with a non-NCCL group."
|
|
1619
|
+
)
|
|
1620
|
+
# local rank inside the group
|
|
1621
|
+
rank = torch.distributed.get_rank(group=pg)
|
|
1622
|
+
world_size = torch.distributed.get_world_size(group=pg)
|
|
1623
|
+
|
|
1624
|
+
# global ranks of the processes in the group
|
|
1625
|
+
ranks = torch.distributed.get_process_group_ranks(pg)
|
|
1626
|
+
else:
|
|
1627
|
+
rank = pg.rank
|
|
1628
|
+
world_size = pg.world_size
|
|
1629
|
+
ranks = list(range(world_size))
|
|
1630
|
+
|
|
1631
|
+
# local tensor in each process to store the result
|
|
1632
|
+
is_in_the_same_node = torch.tensor(
|
|
1633
|
+
[0] * world_size, dtype=torch.int32, device="cpu"
|
|
1634
|
+
)
|
|
1635
|
+
|
|
1636
|
+
magic_message = b"magic_message"
|
|
1637
|
+
shm = None
|
|
1638
|
+
|
|
1639
|
+
try:
|
|
1640
|
+
with contextlib.suppress(OSError):
|
|
1641
|
+
if rank == source_rank:
|
|
1642
|
+
# create a shared memory segment
|
|
1643
|
+
shm = shared_memory.SharedMemory(create=True, size=128)
|
|
1644
|
+
shm.buf[: len(magic_message)] = magic_message
|
|
1645
|
+
if isinstance(pg, ProcessGroup):
|
|
1646
|
+
torch.distributed.broadcast_object_list(
|
|
1647
|
+
[shm.name], src=ranks[source_rank], group=pg
|
|
1648
|
+
)
|
|
1649
|
+
else:
|
|
1650
|
+
pg.broadcast_obj(shm.name, src=source_rank)
|
|
1651
|
+
is_in_the_same_node[rank] = 1
|
|
1652
|
+
else:
|
|
1653
|
+
# try to open the shared memory segment
|
|
1654
|
+
if isinstance(pg, ProcessGroup):
|
|
1655
|
+
recv = [None]
|
|
1656
|
+
torch.distributed.broadcast_object_list(
|
|
1657
|
+
recv, src=ranks[source_rank], group=pg
|
|
1658
|
+
)
|
|
1659
|
+
name = recv[0]
|
|
1660
|
+
else:
|
|
1661
|
+
name = pg.broadcast_obj(None, src=source_rank)
|
|
1662
|
+
# fix to https://stackoverflow.com/q/62748654/9191338
|
|
1663
|
+
# Python incorrectly tracks shared memory even if it is not
|
|
1664
|
+
# created by the process. The following patch is a workaround.
|
|
1665
|
+
with patch(
|
|
1666
|
+
"multiprocessing.resource_tracker.register",
|
|
1667
|
+
lambda *args, **kwargs: None,
|
|
1668
|
+
):
|
|
1669
|
+
shm = shared_memory.SharedMemory(name=name)
|
|
1670
|
+
if shm.buf[: len(magic_message)] == magic_message:
|
|
1671
|
+
is_in_the_same_node[rank] = 1
|
|
1672
|
+
except Exception as e:
|
|
1673
|
+
logger.error("Error ignored in is_in_the_same_node: %s", e)
|
|
1674
|
+
finally:
|
|
1675
|
+
if shm:
|
|
1676
|
+
shm.close()
|
|
1677
|
+
|
|
1678
|
+
if isinstance(pg, ProcessGroup):
|
|
1679
|
+
torch.distributed.barrier(group=pg)
|
|
1680
|
+
else:
|
|
1681
|
+
pg.barrier()
|
|
1682
|
+
|
|
1683
|
+
# clean up the shared memory segment
|
|
1684
|
+
with contextlib.suppress(OSError):
|
|
1685
|
+
if rank == source_rank and shm:
|
|
1686
|
+
shm.unlink()
|
|
1687
|
+
|
|
1688
|
+
if isinstance(pg, ProcessGroup):
|
|
1689
|
+
torch.distributed.all_reduce(is_in_the_same_node, group=pg)
|
|
1690
|
+
aggregated_data = is_in_the_same_node
|
|
1691
|
+
else:
|
|
1692
|
+
aggregated_data = torch.zeros_like(is_in_the_same_node)
|
|
1693
|
+
for i in range(world_size):
|
|
1694
|
+
rank_data = pg.broadcast_obj(is_in_the_same_node, src=i)
|
|
1695
|
+
aggregated_data += rank_data
|
|
1696
|
+
|
|
1697
|
+
return [x == 1 for x in aggregated_data.tolist()]
|
|
1698
|
+
|
|
1699
|
+
|
|
1700
|
+
def is_global_first_rank() -> bool:
|
|
1701
|
+
"""
|
|
1702
|
+
Check if the current process is the first rank globally across all
|
|
1703
|
+
parallelism strategies (PP, TP, DP, EP, etc.).
|
|
1704
|
+
|
|
1705
|
+
Unlike group-specific checks like `get_tensor_model_parallel_rank() == 0`
|
|
1706
|
+
or `get_pp_group().is_first_rank`, this function checks the global rank
|
|
1707
|
+
across all parallelism dimensions.
|
|
1708
|
+
|
|
1709
|
+
Returns:
|
|
1710
|
+
bool: True if this is the global first rank (rank 0), False otherwise.
|
|
1711
|
+
Returns True if distributed is not initialized (single process).
|
|
1712
|
+
"""
|
|
1713
|
+
try:
|
|
1714
|
+
# If world group is available, use it for the most accurate check
|
|
1715
|
+
global _WORLD
|
|
1716
|
+
if _WORLD is not None:
|
|
1717
|
+
return _WORLD.is_first_rank
|
|
1718
|
+
|
|
1719
|
+
# If torch distributed is not initialized, assume single process
|
|
1720
|
+
if not torch.distributed.is_initialized():
|
|
1721
|
+
return True
|
|
1722
|
+
|
|
1723
|
+
# Fallback to torch's global rank
|
|
1724
|
+
return torch.distributed.get_rank() == 0
|
|
1725
|
+
|
|
1726
|
+
except Exception:
|
|
1727
|
+
# If anything goes wrong, assume this is the first rank
|
|
1728
|
+
return True
|
|
1729
|
+
|
|
1730
|
+
|
|
1731
|
+
def is_local_first_rank() -> bool:
|
|
1732
|
+
"""
|
|
1733
|
+
Check if the current process is the first local rank (rank 0 on its node).
|
|
1734
|
+
"""
|
|
1735
|
+
try:
|
|
1736
|
+
# prefer the initialized world group if available
|
|
1737
|
+
global _WORLD
|
|
1738
|
+
if _WORLD is not None:
|
|
1739
|
+
return _WORLD.local_rank == 0
|
|
1740
|
+
|
|
1741
|
+
if not torch.distributed.is_initialized():
|
|
1742
|
+
return True
|
|
1743
|
+
|
|
1744
|
+
# fallback to environment-provided local rank if available
|
|
1745
|
+
# note: envs.LOCAL_RANK is set when using env:// launchers (e.g., torchrun)
|
|
1746
|
+
try:
|
|
1747
|
+
return int(envs.LOCAL_RANK) == 0 # type: ignore[arg-type]
|
|
1748
|
+
except Exception:
|
|
1749
|
+
return torch.distributed.get_rank() == 0
|
|
1750
|
+
except Exception:
|
|
1751
|
+
return True
|
|
1752
|
+
|
|
1753
|
+
|
|
1754
|
+
def _node_count(pg: ProcessGroup | StatelessProcessGroup) -> int:
|
|
1755
|
+
"""
|
|
1756
|
+
Returns the total number of nodes in the process group.
|
|
1757
|
+
|
|
1758
|
+
Args:
|
|
1759
|
+
pg: The process group to analyze
|
|
1760
|
+
|
|
1761
|
+
Returns:
|
|
1762
|
+
int: The total number of nodes
|
|
1763
|
+
"""
|
|
1764
|
+
if isinstance(pg, ProcessGroup):
|
|
1765
|
+
world_size = torch.distributed.get_world_size(group=pg)
|
|
1766
|
+
else:
|
|
1767
|
+
world_size = pg.world_size
|
|
1768
|
+
|
|
1769
|
+
if world_size == 1:
|
|
1770
|
+
return 1
|
|
1771
|
+
|
|
1772
|
+
# Build node assignment map
|
|
1773
|
+
node_assignment = [0] * world_size # rank -> node_id
|
|
1774
|
+
next_node_id = 0
|
|
1775
|
+
|
|
1776
|
+
for current_rank in range(world_size):
|
|
1777
|
+
if node_assignment[current_rank] != 0:
|
|
1778
|
+
continue # Already assigned to a node
|
|
1779
|
+
|
|
1780
|
+
# Assign current rank to a new node
|
|
1781
|
+
next_node_id += 1
|
|
1782
|
+
node_assignment[current_rank] = next_node_id
|
|
1783
|
+
|
|
1784
|
+
# Find all ranks on the same node as current_rank
|
|
1785
|
+
same_node_flags = in_the_same_node_as(pg, current_rank)
|
|
1786
|
+
for other_rank, is_same_node in enumerate(same_node_flags):
|
|
1787
|
+
if is_same_node and node_assignment[other_rank] == 0:
|
|
1788
|
+
node_assignment[other_rank] = next_node_id
|
|
1789
|
+
|
|
1790
|
+
return next_node_id
|