vllm-cpu-avx512vnni 0.13.0__cp313-cp313-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +225 -0
- vllm/_aiter_ops.py +1260 -0
- vllm/_bc_linter.py +54 -0
- vllm/_custom_ops.py +3080 -0
- vllm/_ipex_ops.py +457 -0
- vllm/_version.py +34 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +43 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +59 -0
- vllm/assets/video.py +149 -0
- vllm/attention/__init__.py +0 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +443 -0
- vllm/attention/backends/registry.py +254 -0
- vllm/attention/backends/utils.py +33 -0
- vllm/attention/layer.py +969 -0
- vllm/attention/layers/__init__.py +0 -0
- vllm/attention/layers/chunked_local_attention.py +120 -0
- vllm/attention/layers/cross_attention.py +178 -0
- vllm/attention/layers/encoder_only_attention.py +103 -0
- vllm/attention/layers/mm_encoder_attention.py +284 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
- vllm/attention/ops/common.py +469 -0
- vllm/attention/ops/flashmla.py +251 -0
- vllm/attention/ops/merge_attn_states.py +47 -0
- vllm/attention/ops/paged_attn.py +51 -0
- vllm/attention/ops/pallas_kv_cache_update.py +130 -0
- vllm/attention/ops/prefix_prefill.py +814 -0
- vllm/attention/ops/rocm_aiter_mla_sparse.py +210 -0
- vllm/attention/ops/triton_decode_attention.py +712 -0
- vllm/attention/ops/triton_merge_attn_states.py +116 -0
- vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
- vllm/attention/ops/triton_unified_attention.py +1047 -0
- vllm/attention/ops/vit_attn_wrappers.py +139 -0
- vllm/attention/selector.py +145 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/fa_utils.py +118 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/attention/utils/kv_transfer_utils.py +60 -0
- vllm/beam_search.py +88 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +3228 -0
- vllm/benchmarks/latency.py +170 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +777 -0
- vllm/benchmarks/lib/ready_checker.py +72 -0
- vllm/benchmarks/lib/utils.py +79 -0
- vllm/benchmarks/serve.py +1538 -0
- vllm/benchmarks/startup.py +326 -0
- vllm/benchmarks/sweep/__init__.py +0 -0
- vllm/benchmarks/sweep/cli.py +41 -0
- vllm/benchmarks/sweep/param_sweep.py +158 -0
- vllm/benchmarks/sweep/plot.py +675 -0
- vllm/benchmarks/sweep/plot_pareto.py +393 -0
- vllm/benchmarks/sweep/serve.py +450 -0
- vllm/benchmarks/sweep/serve_sla.py +492 -0
- vllm/benchmarks/sweep/server.py +114 -0
- vllm/benchmarks/sweep/sla_sweep.py +132 -0
- vllm/benchmarks/sweep/utils.py +4 -0
- vllm/benchmarks/throughput.py +808 -0
- vllm/collect_env.py +857 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +209 -0
- vllm/compilation/backends.py +839 -0
- vllm/compilation/base_static_graph.py +57 -0
- vllm/compilation/caching.py +180 -0
- vllm/compilation/collective_fusion.py +1215 -0
- vllm/compilation/compiler_interface.py +639 -0
- vllm/compilation/counter.py +48 -0
- vllm/compilation/cuda_graph.py +302 -0
- vllm/compilation/decorators.py +626 -0
- vllm/compilation/fix_functionalization.py +266 -0
- vllm/compilation/fusion.py +550 -0
- vllm/compilation/fusion_attn.py +359 -0
- vllm/compilation/fx_utils.py +91 -0
- vllm/compilation/inductor_pass.py +138 -0
- vllm/compilation/matcher_utils.py +361 -0
- vllm/compilation/monitor.py +62 -0
- vllm/compilation/noop_elimination.py +130 -0
- vllm/compilation/partition_rules.py +72 -0
- vllm/compilation/pass_manager.py +155 -0
- vllm/compilation/piecewise_backend.py +178 -0
- vllm/compilation/post_cleanup.py +21 -0
- vllm/compilation/qk_norm_rope_fusion.py +238 -0
- vllm/compilation/rocm_aiter_fusion.py +242 -0
- vllm/compilation/sequence_parallelism.py +364 -0
- vllm/compilation/torch25_custom_graph_pass.py +44 -0
- vllm/compilation/vllm_inductor_pass.py +173 -0
- vllm/compilation/wrapper.py +319 -0
- vllm/config/__init__.py +108 -0
- vllm/config/attention.py +114 -0
- vllm/config/cache.py +232 -0
- vllm/config/compilation.py +1140 -0
- vllm/config/device.py +75 -0
- vllm/config/ec_transfer.py +110 -0
- vllm/config/kv_events.py +56 -0
- vllm/config/kv_transfer.py +119 -0
- vllm/config/load.py +124 -0
- vllm/config/lora.py +96 -0
- vllm/config/model.py +2190 -0
- vllm/config/multimodal.py +247 -0
- vllm/config/observability.py +140 -0
- vllm/config/parallel.py +660 -0
- vllm/config/pooler.py +126 -0
- vllm/config/profiler.py +199 -0
- vllm/config/scheduler.py +299 -0
- vllm/config/speculative.py +644 -0
- vllm/config/speech_to_text.py +38 -0
- vllm/config/structured_outputs.py +78 -0
- vllm/config/utils.py +370 -0
- vllm/config/vllm.py +1434 -0
- vllm/connections.py +189 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +327 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +43 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +490 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
- vllm/distributed/device_communicators/base_device_communicator.py +297 -0
- vllm/distributed/device_communicators/cpu_communicator.py +209 -0
- vllm/distributed/device_communicators/cuda_communicator.py +340 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
- vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
- vllm/distributed/device_communicators/pynccl.py +386 -0
- vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
- vllm/distributed/device_communicators/ray_communicator.py +259 -0
- vllm/distributed/device_communicators/shm_broadcast.py +778 -0
- vllm/distributed/device_communicators/shm_object_storage.py +697 -0
- vllm/distributed/device_communicators/symm_mem.py +156 -0
- vllm/distributed/device_communicators/tpu_communicator.py +99 -0
- vllm/distributed/device_communicators/xpu_communicator.py +95 -0
- vllm/distributed/ec_transfer/__init__.py +14 -0
- vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
- vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
- vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
- vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
- vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
- vllm/distributed/eplb/__init__.py +3 -0
- vllm/distributed/eplb/async_worker.py +115 -0
- vllm/distributed/eplb/eplb_state.py +1164 -0
- vllm/distributed/eplb/policy/__init__.py +19 -0
- vllm/distributed/eplb/policy/abstract.py +40 -0
- vllm/distributed/eplb/policy/default.py +267 -0
- vllm/distributed/eplb/rebalance_execute.py +529 -0
- vllm/distributed/kv_events.py +499 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +20 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +197 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +322 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +597 -0
- vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
- vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +327 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +378 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1418 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +895 -0
- vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
- vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +914 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +464 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2526 -0
- vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +538 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
- vllm/distributed/parallel_state.py +1795 -0
- vllm/distributed/tpu_distributed_utils.py +188 -0
- vllm/distributed/utils.py +545 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +2068 -0
- vllm/engine/async_llm_engine.py +6 -0
- vllm/engine/llm_engine.py +6 -0
- vllm/engine/protocol.py +190 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/anthropic/__init__.py +0 -0
- vllm/entrypoints/anthropic/protocol.py +162 -0
- vllm/entrypoints/anthropic/serving_messages.py +468 -0
- vllm/entrypoints/api_server.py +185 -0
- vllm/entrypoints/chat_utils.py +1903 -0
- vllm/entrypoints/cli/__init__.py +15 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +56 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/startup.py +21 -0
- vllm/entrypoints/cli/benchmark/sweep.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +38 -0
- vllm/entrypoints/cli/main.py +79 -0
- vllm/entrypoints/cli/openai.py +260 -0
- vllm/entrypoints/cli/run_batch.py +68 -0
- vllm/entrypoints/cli/serve.py +249 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +12 -0
- vllm/entrypoints/context.py +835 -0
- vllm/entrypoints/launcher.py +175 -0
- vllm/entrypoints/llm.py +1790 -0
- vllm/entrypoints/logger.py +84 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1469 -0
- vllm/entrypoints/openai/cli_args.py +302 -0
- vllm/entrypoints/openai/orca_metrics.py +120 -0
- vllm/entrypoints/openai/parser/__init__.py +0 -0
- vllm/entrypoints/openai/parser/harmony_utils.py +825 -0
- vllm/entrypoints/openai/parser/responses_parser.py +135 -0
- vllm/entrypoints/openai/protocol.py +2496 -0
- vllm/entrypoints/openai/run_batch.py +631 -0
- vllm/entrypoints/openai/serving_chat.py +1822 -0
- vllm/entrypoints/openai/serving_completion.py +729 -0
- vllm/entrypoints/openai/serving_engine.py +1542 -0
- vllm/entrypoints/openai/serving_models.py +304 -0
- vllm/entrypoints/openai/serving_responses.py +2080 -0
- vllm/entrypoints/openai/serving_transcription.py +168 -0
- vllm/entrypoints/openai/speech_to_text.py +559 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +33 -0
- vllm/entrypoints/openai/utils.py +49 -0
- vllm/entrypoints/pooling/__init__.py +16 -0
- vllm/entrypoints/pooling/classify/__init__.py +0 -0
- vllm/entrypoints/pooling/classify/api_router.py +50 -0
- vllm/entrypoints/pooling/classify/protocol.py +181 -0
- vllm/entrypoints/pooling/classify/serving.py +233 -0
- vllm/entrypoints/pooling/embed/__init__.py +0 -0
- vllm/entrypoints/pooling/embed/api_router.py +67 -0
- vllm/entrypoints/pooling/embed/protocol.py +208 -0
- vllm/entrypoints/pooling/embed/serving.py +684 -0
- vllm/entrypoints/pooling/pooling/__init__.py +0 -0
- vllm/entrypoints/pooling/pooling/api_router.py +63 -0
- vllm/entrypoints/pooling/pooling/protocol.py +148 -0
- vllm/entrypoints/pooling/pooling/serving.py +354 -0
- vllm/entrypoints/pooling/score/__init__.py +0 -0
- vllm/entrypoints/pooling/score/api_router.py +149 -0
- vllm/entrypoints/pooling/score/protocol.py +146 -0
- vllm/entrypoints/pooling/score/serving.py +508 -0
- vllm/entrypoints/renderer.py +410 -0
- vllm/entrypoints/responses_utils.py +249 -0
- vllm/entrypoints/sagemaker/__init__.py +4 -0
- vllm/entrypoints/sagemaker/routes.py +118 -0
- vllm/entrypoints/score_utils.py +237 -0
- vllm/entrypoints/serve/__init__.py +60 -0
- vllm/entrypoints/serve/disagg/__init__.py +0 -0
- vllm/entrypoints/serve/disagg/api_router.py +110 -0
- vllm/entrypoints/serve/disagg/protocol.py +90 -0
- vllm/entrypoints/serve/disagg/serving.py +285 -0
- vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
- vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
- vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
- vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
- vllm/entrypoints/serve/instrumentator/health.py +33 -0
- vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
- vllm/entrypoints/serve/lora/__init__.py +0 -0
- vllm/entrypoints/serve/lora/api_router.py +70 -0
- vllm/entrypoints/serve/profile/__init__.py +0 -0
- vllm/entrypoints/serve/profile/api_router.py +46 -0
- vllm/entrypoints/serve/rlhf/__init__.py +0 -0
- vllm/entrypoints/serve/rlhf/api_router.py +102 -0
- vllm/entrypoints/serve/sleep/__init__.py +0 -0
- vllm/entrypoints/serve/sleep/api_router.py +60 -0
- vllm/entrypoints/serve/tokenize/__init__.py +0 -0
- vllm/entrypoints/serve/tokenize/api_router.py +118 -0
- vllm/entrypoints/serve/tokenize/serving.py +204 -0
- vllm/entrypoints/ssl.py +78 -0
- vllm/entrypoints/tool.py +187 -0
- vllm/entrypoints/tool_server.py +234 -0
- vllm/entrypoints/utils.py +319 -0
- vllm/env_override.py +378 -0
- vllm/envs.py +1744 -0
- vllm/forward_context.py +358 -0
- vllm/inputs/__init__.py +44 -0
- vllm/inputs/data.py +359 -0
- vllm/inputs/parse.py +146 -0
- vllm/inputs/preprocess.py +717 -0
- vllm/logger.py +303 -0
- vllm/logging_utils/__init__.py +13 -0
- vllm/logging_utils/dump_input.py +83 -0
- vllm/logging_utils/formatter.py +127 -0
- vllm/logging_utils/lazy.py +20 -0
- vllm/logging_utils/log_time.py +34 -0
- vllm/logits_process.py +121 -0
- vllm/logprobs.py +206 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +42 -0
- vllm/lora/layers/base.py +66 -0
- vllm/lora/layers/base_linear.py +165 -0
- vllm/lora/layers/column_parallel_linear.py +577 -0
- vllm/lora/layers/fused_moe.py +747 -0
- vllm/lora/layers/logits_processor.py +203 -0
- vllm/lora/layers/replicated_linear.py +70 -0
- vllm/lora/layers/row_parallel_linear.py +176 -0
- vllm/lora/layers/utils.py +74 -0
- vllm/lora/layers/vocal_parallel_embedding.py +140 -0
- vllm/lora/lora_model.py +246 -0
- vllm/lora/lora_weights.py +227 -0
- vllm/lora/model_manager.py +690 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +6 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
- vllm/lora/ops/torch_ops/__init__.py +20 -0
- vllm/lora/ops/torch_ops/lora_ops.py +128 -0
- vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
- vllm/lora/ops/triton_ops/__init__.py +21 -0
- vllm/lora/ops/triton_ops/fused_moe_lora_op.py +665 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
- vllm/lora/ops/triton_ops/utils.py +295 -0
- vllm/lora/ops/xla_ops/__init__.py +6 -0
- vllm/lora/ops/xla_ops/lora_ops.py +141 -0
- vllm/lora/peft_helper.py +128 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +493 -0
- vllm/lora/punica_wrapper/punica_cpu.py +351 -0
- vllm/lora/punica_wrapper/punica_gpu.py +412 -0
- vllm/lora/punica_wrapper/punica_selector.py +21 -0
- vllm/lora/punica_wrapper/punica_tpu.py +358 -0
- vllm/lora/punica_wrapper/punica_xpu.py +276 -0
- vllm/lora/punica_wrapper/utils.py +150 -0
- vllm/lora/request.py +100 -0
- vllm/lora/resolver.py +88 -0
- vllm/lora/utils.py +315 -0
- vllm/lora/worker_manager.py +268 -0
- vllm/model_executor/__init__.py +11 -0
- vllm/model_executor/custom_op.py +199 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +595 -0
- vllm/model_executor/layers/attention_layer_base.py +32 -0
- vllm/model_executor/layers/batch_invariant.py +1067 -0
- vllm/model_executor/layers/conv.py +256 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +240 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
- vllm/model_executor/layers/fla/ops/index.py +41 -0
- vllm/model_executor/layers/fla/ops/kda.py +1351 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
- vllm/model_executor/layers/fla/ops/op.py +60 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
- vllm/model_executor/layers/fla/ops/utils.py +194 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
- vllm/model_executor/layers/fused_moe/__init__.py +114 -0
- vllm/model_executor/layers/fused_moe/all2all_utils.py +171 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +409 -0
- vllm/model_executor/layers/fused_moe/config.py +1043 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +292 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +1453 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +358 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +434 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +376 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +825 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +2223 -0
- vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +103 -0
- vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +119 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +524 -0
- vllm/model_executor/layers/fused_moe/layer.py +2133 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +1302 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +78 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
- vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
- vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +455 -0
- vllm/model_executor/layers/fused_moe/utils.py +332 -0
- vllm/model_executor/layers/kda.py +442 -0
- vllm/model_executor/layers/layernorm.py +442 -0
- vllm/model_executor/layers/lightning_attn.py +735 -0
- vllm/model_executor/layers/linear.py +1424 -0
- vllm/model_executor/layers/logits_processor.py +106 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +68 -0
- vllm/model_executor/layers/mamba/linear_attn.py +388 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +526 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +930 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
- vllm/model_executor/layers/mamba/short_conv.py +255 -0
- vllm/model_executor/layers/mla.py +176 -0
- vllm/model_executor/layers/pooler.py +830 -0
- vllm/model_executor/layers/quantization/__init__.py +179 -0
- vllm/model_executor/layers/quantization/auto_round.py +454 -0
- vllm/model_executor/layers/quantization/awq.py +277 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +793 -0
- vllm/model_executor/layers/quantization/awq_triton.py +337 -0
- vllm/model_executor/layers/quantization/base_config.py +170 -0
- vllm/model_executor/layers/quantization/bitblas.py +502 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +626 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +986 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2645 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/cpu_wna16.py +625 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
- vllm/model_executor/layers/quantization/experts_int8.py +207 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
- vllm/model_executor/layers/quantization/fp8.py +1461 -0
- vllm/model_executor/layers/quantization/fp_quant.py +420 -0
- vllm/model_executor/layers/quantization/gguf.py +677 -0
- vllm/model_executor/layers/quantization/gptq.py +393 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +932 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
- vllm/model_executor/layers/quantization/inc.py +65 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +202 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +487 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +109 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +81 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +71 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +106 -0
- vllm/model_executor/layers/quantization/kv_cache.py +153 -0
- vllm/model_executor/layers/quantization/modelopt.py +1684 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +516 -0
- vllm/model_executor/layers/quantization/mxfp4.py +1140 -0
- vllm/model_executor/layers/quantization/petit.py +319 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +136 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +527 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +622 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
- vllm/model_executor/layers/quantization/rtn.py +621 -0
- vllm/model_executor/layers/quantization/schema.py +90 -0
- vllm/model_executor/layers/quantization/torchao.py +380 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
- vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +412 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +312 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +1453 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +474 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +678 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +452 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +381 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
- vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
- vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +741 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
- vllm/model_executor/layers/resampler.py +283 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +289 -0
- vllm/model_executor/layers/rotary_embedding/base.py +254 -0
- vllm/model_executor/layers/rotary_embedding/common.py +279 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
- vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
- vllm/model_executor/layers/utils.py +251 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
- vllm/model_executor/model_loader/__init__.py +150 -0
- vllm/model_executor/model_loader/base_loader.py +57 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
- vllm/model_executor/model_loader/default_loader.py +321 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +371 -0
- vllm/model_executor/model_loader/online_quantization.py +275 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
- vllm/model_executor/model_loader/tensorizer.py +790 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
- vllm/model_executor/model_loader/tpu.py +118 -0
- vllm/model_executor/model_loader/utils.py +292 -0
- vllm/model_executor/model_loader/weight_utils.py +1157 -0
- vllm/model_executor/models/__init__.py +44 -0
- vllm/model_executor/models/adapters.py +522 -0
- vllm/model_executor/models/afmoe.py +696 -0
- vllm/model_executor/models/aimv2.py +248 -0
- vllm/model_executor/models/apertus.py +565 -0
- vllm/model_executor/models/arcee.py +428 -0
- vllm/model_executor/models/arctic.py +633 -0
- vllm/model_executor/models/aria.py +653 -0
- vllm/model_executor/models/audioflamingo3.py +639 -0
- vllm/model_executor/models/aya_vision.py +448 -0
- vllm/model_executor/models/bagel.py +584 -0
- vllm/model_executor/models/baichuan.py +493 -0
- vllm/model_executor/models/bailing_moe.py +642 -0
- vllm/model_executor/models/bamba.py +511 -0
- vllm/model_executor/models/bee.py +157 -0
- vllm/model_executor/models/bert.py +925 -0
- vllm/model_executor/models/bert_with_rope.py +732 -0
- vllm/model_executor/models/blip.py +350 -0
- vllm/model_executor/models/blip2.py +693 -0
- vllm/model_executor/models/bloom.py +390 -0
- vllm/model_executor/models/chameleon.py +1095 -0
- vllm/model_executor/models/chatglm.py +502 -0
- vllm/model_executor/models/clip.py +1004 -0
- vllm/model_executor/models/cohere2_vision.py +470 -0
- vllm/model_executor/models/commandr.py +469 -0
- vllm/model_executor/models/config.py +531 -0
- vllm/model_executor/models/dbrx.py +484 -0
- vllm/model_executor/models/deepencoder.py +676 -0
- vllm/model_executor/models/deepseek_eagle.py +252 -0
- vllm/model_executor/models/deepseek_mtp.py +446 -0
- vllm/model_executor/models/deepseek_ocr.py +591 -0
- vllm/model_executor/models/deepseek_v2.py +1710 -0
- vllm/model_executor/models/deepseek_vl2.py +642 -0
- vllm/model_executor/models/dots1.py +565 -0
- vllm/model_executor/models/dots_ocr.py +821 -0
- vllm/model_executor/models/ernie45.py +53 -0
- vllm/model_executor/models/ernie45_moe.py +754 -0
- vllm/model_executor/models/ernie45_vl.py +1621 -0
- vllm/model_executor/models/ernie45_vl_moe.py +800 -0
- vllm/model_executor/models/ernie_mtp.py +279 -0
- vllm/model_executor/models/exaone.py +524 -0
- vllm/model_executor/models/exaone4.py +516 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +543 -0
- vllm/model_executor/models/falcon_h1.py +675 -0
- vllm/model_executor/models/flex_olmo.py +155 -0
- vllm/model_executor/models/fuyu.py +371 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +435 -0
- vllm/model_executor/models/gemma3.py +507 -0
- vllm/model_executor/models/gemma3_mm.py +664 -0
- vllm/model_executor/models/gemma3n.py +1166 -0
- vllm/model_executor/models/gemma3n_mm.py +810 -0
- vllm/model_executor/models/glm.py +24 -0
- vllm/model_executor/models/glm4.py +295 -0
- vllm/model_executor/models/glm4_1v.py +1808 -0
- vllm/model_executor/models/glm4_moe.py +736 -0
- vllm/model_executor/models/glm4_moe_mtp.py +359 -0
- vllm/model_executor/models/glm4v.py +783 -0
- vllm/model_executor/models/gpt2.py +397 -0
- vllm/model_executor/models/gpt_bigcode.py +339 -0
- vllm/model_executor/models/gpt_j.py +346 -0
- vllm/model_executor/models/gpt_neox.py +340 -0
- vllm/model_executor/models/gpt_oss.py +744 -0
- vllm/model_executor/models/granite.py +475 -0
- vllm/model_executor/models/granite_speech.py +912 -0
- vllm/model_executor/models/granitemoe.py +560 -0
- vllm/model_executor/models/granitemoehybrid.py +703 -0
- vllm/model_executor/models/granitemoeshared.py +328 -0
- vllm/model_executor/models/gritlm.py +243 -0
- vllm/model_executor/models/grok1.py +554 -0
- vllm/model_executor/models/h2ovl.py +554 -0
- vllm/model_executor/models/hunyuan_v1.py +1040 -0
- vllm/model_executor/models/hunyuan_vision.py +1034 -0
- vllm/model_executor/models/hyperclovax_vision.py +1164 -0
- vllm/model_executor/models/idefics2_vision_model.py +427 -0
- vllm/model_executor/models/idefics3.py +716 -0
- vllm/model_executor/models/interfaces.py +1179 -0
- vllm/model_executor/models/interfaces_base.py +228 -0
- vllm/model_executor/models/intern_vit.py +454 -0
- vllm/model_executor/models/internlm2.py +453 -0
- vllm/model_executor/models/internlm2_ve.py +139 -0
- vllm/model_executor/models/interns1.py +828 -0
- vllm/model_executor/models/interns1_vit.py +433 -0
- vllm/model_executor/models/internvl.py +1450 -0
- vllm/model_executor/models/jais.py +397 -0
- vllm/model_executor/models/jais2.py +529 -0
- vllm/model_executor/models/jamba.py +609 -0
- vllm/model_executor/models/jina_vl.py +147 -0
- vllm/model_executor/models/keye.py +1706 -0
- vllm/model_executor/models/keye_vl1_5.py +726 -0
- vllm/model_executor/models/kimi_linear.py +658 -0
- vllm/model_executor/models/kimi_vl.py +576 -0
- vllm/model_executor/models/lfm2.py +515 -0
- vllm/model_executor/models/lfm2_moe.py +745 -0
- vllm/model_executor/models/lightonocr.py +195 -0
- vllm/model_executor/models/llama.py +700 -0
- vllm/model_executor/models/llama4.py +856 -0
- vllm/model_executor/models/llama4_eagle.py +225 -0
- vllm/model_executor/models/llama_eagle.py +213 -0
- vllm/model_executor/models/llama_eagle3.py +375 -0
- vllm/model_executor/models/llava.py +840 -0
- vllm/model_executor/models/llava_next.py +581 -0
- vllm/model_executor/models/llava_next_video.py +465 -0
- vllm/model_executor/models/llava_onevision.py +921 -0
- vllm/model_executor/models/longcat_flash.py +743 -0
- vllm/model_executor/models/longcat_flash_mtp.py +349 -0
- vllm/model_executor/models/mamba.py +276 -0
- vllm/model_executor/models/mamba2.py +288 -0
- vllm/model_executor/models/medusa.py +179 -0
- vllm/model_executor/models/midashenglm.py +826 -0
- vllm/model_executor/models/mimo.py +188 -0
- vllm/model_executor/models/mimo_mtp.py +294 -0
- vllm/model_executor/models/minicpm.py +656 -0
- vllm/model_executor/models/minicpm3.py +233 -0
- vllm/model_executor/models/minicpm_eagle.py +385 -0
- vllm/model_executor/models/minicpmo.py +768 -0
- vllm/model_executor/models/minicpmv.py +1742 -0
- vllm/model_executor/models/minimax_m2.py +550 -0
- vllm/model_executor/models/minimax_text_01.py +1007 -0
- vllm/model_executor/models/minimax_vl_01.py +394 -0
- vllm/model_executor/models/mistral3.py +635 -0
- vllm/model_executor/models/mistral_large_3.py +63 -0
- vllm/model_executor/models/mistral_large_3_eagle.py +136 -0
- vllm/model_executor/models/mixtral.py +598 -0
- vllm/model_executor/models/mllama4.py +1149 -0
- vllm/model_executor/models/mlp_speculator.py +235 -0
- vllm/model_executor/models/modernbert.py +451 -0
- vllm/model_executor/models/module_mapping.py +74 -0
- vllm/model_executor/models/molmo.py +1550 -0
- vllm/model_executor/models/moonvit.py +686 -0
- vllm/model_executor/models/mpt.py +335 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1730 -0
- vllm/model_executor/models/nemotron.py +499 -0
- vllm/model_executor/models/nemotron_h.py +900 -0
- vllm/model_executor/models/nemotron_nas.py +471 -0
- vllm/model_executor/models/nemotron_vl.py +651 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +412 -0
- vllm/model_executor/models/olmo2.py +454 -0
- vllm/model_executor/models/olmoe.py +493 -0
- vllm/model_executor/models/opencua.py +262 -0
- vllm/model_executor/models/openpangu.py +1049 -0
- vllm/model_executor/models/openpangu_mtp.py +265 -0
- vllm/model_executor/models/opt.py +426 -0
- vllm/model_executor/models/orion.py +365 -0
- vllm/model_executor/models/ouro.py +507 -0
- vllm/model_executor/models/ovis.py +557 -0
- vllm/model_executor/models/ovis2_5.py +661 -0
- vllm/model_executor/models/paddleocr_vl.py +1300 -0
- vllm/model_executor/models/paligemma.py +408 -0
- vllm/model_executor/models/persimmon.py +373 -0
- vllm/model_executor/models/phi.py +363 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3v.py +729 -0
- vllm/model_executor/models/phi4mm.py +1251 -0
- vllm/model_executor/models/phi4mm_audio.py +1296 -0
- vllm/model_executor/models/phi4mm_utils.py +1907 -0
- vllm/model_executor/models/phimoe.py +669 -0
- vllm/model_executor/models/pixtral.py +1379 -0
- vllm/model_executor/models/plamo2.py +965 -0
- vllm/model_executor/models/plamo3.py +440 -0
- vllm/model_executor/models/qwen.py +365 -0
- vllm/model_executor/models/qwen2.py +600 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +1219 -0
- vllm/model_executor/models/qwen2_5_vl.py +1569 -0
- vllm/model_executor/models/qwen2_audio.py +471 -0
- vllm/model_executor/models/qwen2_moe.py +597 -0
- vllm/model_executor/models/qwen2_rm.py +123 -0
- vllm/model_executor/models/qwen2_vl.py +1568 -0
- vllm/model_executor/models/qwen3.py +331 -0
- vllm/model_executor/models/qwen3_moe.py +751 -0
- vllm/model_executor/models/qwen3_next.py +1395 -0
- vllm/model_executor/models/qwen3_next_mtp.py +296 -0
- vllm/model_executor/models/qwen3_omni_moe_thinker.py +1793 -0
- vllm/model_executor/models/qwen3_vl.py +2092 -0
- vllm/model_executor/models/qwen3_vl_moe.py +474 -0
- vllm/model_executor/models/qwen_vl.py +801 -0
- vllm/model_executor/models/radio.py +555 -0
- vllm/model_executor/models/registry.py +1189 -0
- vllm/model_executor/models/roberta.py +259 -0
- vllm/model_executor/models/rvl.py +107 -0
- vllm/model_executor/models/seed_oss.py +492 -0
- vllm/model_executor/models/siglip.py +1244 -0
- vllm/model_executor/models/siglip2navit.py +658 -0
- vllm/model_executor/models/skyworkr1v.py +951 -0
- vllm/model_executor/models/smolvlm.py +38 -0
- vllm/model_executor/models/solar.py +484 -0
- vllm/model_executor/models/stablelm.py +354 -0
- vllm/model_executor/models/starcoder2.py +365 -0
- vllm/model_executor/models/step3_text.py +554 -0
- vllm/model_executor/models/step3_vl.py +1147 -0
- vllm/model_executor/models/swin.py +514 -0
- vllm/model_executor/models/tarsier.py +617 -0
- vllm/model_executor/models/telechat2.py +153 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/terratorch.py +318 -0
- vllm/model_executor/models/transformers/__init__.py +127 -0
- vllm/model_executor/models/transformers/base.py +518 -0
- vllm/model_executor/models/transformers/causal.py +65 -0
- vllm/model_executor/models/transformers/legacy.py +90 -0
- vllm/model_executor/models/transformers/moe.py +325 -0
- vllm/model_executor/models/transformers/multimodal.py +411 -0
- vllm/model_executor/models/transformers/pooling.py +119 -0
- vllm/model_executor/models/transformers/utils.py +213 -0
- vllm/model_executor/models/ultravox.py +766 -0
- vllm/model_executor/models/utils.py +832 -0
- vllm/model_executor/models/vision.py +546 -0
- vllm/model_executor/models/voxtral.py +841 -0
- vllm/model_executor/models/whisper.py +971 -0
- vllm/model_executor/models/zamba2.py +979 -0
- vllm/model_executor/parameter.py +642 -0
- vllm/model_executor/utils.py +119 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
- vllm/model_executor/warmup/kernel_warmup.py +98 -0
- vllm/multimodal/__init__.py +40 -0
- vllm/multimodal/audio.py +147 -0
- vllm/multimodal/base.py +56 -0
- vllm/multimodal/cache.py +823 -0
- vllm/multimodal/evs.py +294 -0
- vllm/multimodal/hasher.py +120 -0
- vllm/multimodal/image.py +142 -0
- vllm/multimodal/inputs.py +1089 -0
- vllm/multimodal/parse.py +565 -0
- vllm/multimodal/processing.py +2240 -0
- vllm/multimodal/profiling.py +351 -0
- vllm/multimodal/registry.py +357 -0
- vllm/multimodal/utils.py +513 -0
- vllm/multimodal/video.py +340 -0
- vllm/outputs.py +345 -0
- vllm/platforms/__init__.py +277 -0
- vllm/platforms/cpu.py +421 -0
- vllm/platforms/cuda.py +618 -0
- vllm/platforms/interface.py +695 -0
- vllm/platforms/rocm.py +564 -0
- vllm/platforms/tpu.py +295 -0
- vllm/platforms/xpu.py +277 -0
- vllm/plugins/__init__.py +81 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +77 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
- vllm/pooling_params.py +230 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +392 -0
- vllm/profiler/utils.py +151 -0
- vllm/profiler/wrapper.py +241 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +30 -0
- vllm/ray/ray_env.py +79 -0
- vllm/reasoning/__init__.py +96 -0
- vllm/reasoning/abs_reasoning_parsers.py +318 -0
- vllm/reasoning/basic_parsers.py +175 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
- vllm/reasoning/deepseek_v3_reasoning_parser.py +67 -0
- vllm/reasoning/ernie45_reasoning_parser.py +165 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
- vllm/reasoning/gptoss_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/holo2_reasoning_parser.py +88 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
- vllm/reasoning/identity_reasoning_parser.py +63 -0
- vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
- vllm/reasoning/mistral_reasoning_parser.py +154 -0
- vllm/reasoning/olmo3_reasoning_parser.py +302 -0
- vllm/reasoning/qwen3_reasoning_parser.py +67 -0
- vllm/reasoning/seedoss_reasoning_parser.py +27 -0
- vllm/reasoning/step3_reasoning_parser.py +107 -0
- vllm/sampling_params.py +597 -0
- vllm/scalar_type.py +355 -0
- vllm/scripts.py +17 -0
- vllm/sequence.py +98 -0
- vllm/tasks.py +13 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tokenizers/__init__.py +20 -0
- vllm/tokenizers/deepseek_v32.py +175 -0
- vllm/tokenizers/deepseek_v32_encoding.py +459 -0
- vllm/tokenizers/detokenizer_utils.py +198 -0
- vllm/tokenizers/hf.py +119 -0
- vllm/tokenizers/mistral.py +567 -0
- vllm/tokenizers/protocol.py +114 -0
- vllm/tokenizers/registry.py +233 -0
- vllm/tool_parsers/__init__.py +150 -0
- vllm/tool_parsers/abstract_tool_parser.py +273 -0
- vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
- vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
- vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
- vllm/tool_parsers/ernie45_tool_parser.py +210 -0
- vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
- vllm/tool_parsers/glm4_moe_tool_parser.py +200 -0
- vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
- vllm/tool_parsers/granite_tool_parser.py +253 -0
- vllm/tool_parsers/hermes_tool_parser.py +495 -0
- vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
- vllm/tool_parsers/internlm2_tool_parser.py +227 -0
- vllm/tool_parsers/jamba_tool_parser.py +323 -0
- vllm/tool_parsers/kimi_k2_tool_parser.py +590 -0
- vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
- vllm/tool_parsers/llama_tool_parser.py +324 -0
- vllm/tool_parsers/longcat_tool_parser.py +37 -0
- vllm/tool_parsers/minimax_m2_tool_parser.py +643 -0
- vllm/tool_parsers/minimax_tool_parser.py +849 -0
- vllm/tool_parsers/mistral_tool_parser.py +585 -0
- vllm/tool_parsers/olmo3_tool_parser.py +366 -0
- vllm/tool_parsers/openai_tool_parser.py +102 -0
- vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
- vllm/tool_parsers/pythonic_tool_parser.py +332 -0
- vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
- vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
- vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
- vllm/tool_parsers/step3_tool_parser.py +303 -0
- vllm/tool_parsers/utils.py +229 -0
- vllm/tool_parsers/xlam_tool_parser.py +556 -0
- vllm/tracing.py +135 -0
- vllm/transformers_utils/__init__.py +26 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +73 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1144 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +102 -0
- vllm/transformers_utils/configs/afmoe.py +87 -0
- vllm/transformers_utils/configs/arctic.py +216 -0
- vllm/transformers_utils/configs/bagel.py +53 -0
- vllm/transformers_utils/configs/chatglm.py +75 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
- vllm/transformers_utils/configs/dotsocr.py +71 -0
- vllm/transformers_utils/configs/eagle.py +90 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/flex_olmo.py +82 -0
- vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
- vllm/transformers_utils/configs/jais.py +243 -0
- vllm/transformers_utils/configs/kimi_linear.py +148 -0
- vllm/transformers_utils/configs/kimi_vl.py +38 -0
- vllm/transformers_utils/configs/lfm2_moe.py +163 -0
- vllm/transformers_utils/configs/medusa.py +65 -0
- vllm/transformers_utils/configs/midashenglm.py +103 -0
- vllm/transformers_utils/configs/mistral.py +235 -0
- vllm/transformers_utils/configs/mlp_speculator.py +69 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +220 -0
- vllm/transformers_utils/configs/nemotron_h.py +284 -0
- vllm/transformers_utils/configs/olmo3.py +83 -0
- vllm/transformers_utils/configs/ovis.py +182 -0
- vllm/transformers_utils/configs/qwen3_next.py +277 -0
- vllm/transformers_utils/configs/radio.py +89 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +38 -0
- vllm/transformers_utils/configs/speculators/base.py +114 -0
- vllm/transformers_utils/configs/step3_vl.py +178 -0
- vllm/transformers_utils/configs/tarsier2.py +24 -0
- vllm/transformers_utils/configs/ultravox.py +120 -0
- vllm/transformers_utils/dynamic_module.py +59 -0
- vllm/transformers_utils/gguf_utils.py +280 -0
- vllm/transformers_utils/processor.py +424 -0
- vllm/transformers_utils/processors/__init__.py +25 -0
- vllm/transformers_utils/processors/bagel.py +73 -0
- vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
- vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
- vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
- vllm/transformers_utils/processors/ovis.py +453 -0
- vllm/transformers_utils/processors/ovis2_5.py +468 -0
- vllm/transformers_utils/repo_utils.py +287 -0
- vllm/transformers_utils/runai_utils.py +102 -0
- vllm/transformers_utils/s3_utils.py +95 -0
- vllm/transformers_utils/tokenizer.py +127 -0
- vllm/transformers_utils/tokenizer_base.py +33 -0
- vllm/transformers_utils/utils.py +112 -0
- vllm/triton_utils/__init__.py +20 -0
- vllm/triton_utils/importing.py +103 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +294 -0
- vllm/utils/__init__.py +66 -0
- vllm/utils/argparse_utils.py +492 -0
- vllm/utils/async_utils.py +310 -0
- vllm/utils/cache.py +214 -0
- vllm/utils/collection_utils.py +112 -0
- vllm/utils/counter.py +45 -0
- vllm/utils/deep_gemm.py +400 -0
- vllm/utils/flashinfer.py +528 -0
- vllm/utils/func_utils.py +236 -0
- vllm/utils/gc_utils.py +151 -0
- vllm/utils/hashing.py +117 -0
- vllm/utils/import_utils.py +449 -0
- vllm/utils/jsontree.py +158 -0
- vllm/utils/math_utils.py +32 -0
- vllm/utils/mem_constants.py +13 -0
- vllm/utils/mem_utils.py +232 -0
- vllm/utils/nccl.py +64 -0
- vllm/utils/network_utils.py +331 -0
- vllm/utils/nvtx_pytorch_hooks.py +286 -0
- vllm/utils/platform_utils.py +59 -0
- vllm/utils/profiling.py +56 -0
- vllm/utils/registry.py +51 -0
- vllm/utils/serial_utils.py +214 -0
- vllm/utils/system_utils.py +269 -0
- vllm/utils/tensor_schema.py +255 -0
- vllm/utils/torch_utils.py +648 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +497 -0
- vllm/v1/attention/backends/flash_attn.py +1051 -0
- vllm/v1/attention/backends/flashinfer.py +1575 -0
- vllm/v1/attention/backends/flex_attention.py +1028 -0
- vllm/v1/attention/backends/gdn_attn.py +375 -0
- vllm/v1/attention/backends/linear_attn.py +77 -0
- vllm/v1/attention/backends/mamba1_attn.py +159 -0
- vllm/v1/attention/backends/mamba2_attn.py +348 -0
- vllm/v1/attention/backends/mamba_attn.py +117 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/aiter_triton_mla.py +74 -0
- vllm/v1/attention/backends/mla/common.py +2114 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +342 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +174 -0
- vllm/v1/attention/backends/mla/flashmla.py +317 -0
- vllm/v1/attention/backends/mla/flashmla_sparse.py +1020 -0
- vllm/v1/attention/backends/mla/indexer.py +345 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +275 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +325 -0
- vllm/v1/attention/backends/mla/triton_mla.py +171 -0
- vllm/v1/attention/backends/pallas.py +436 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
- vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
- vllm/v1/attention/backends/rocm_attn.py +359 -0
- vllm/v1/attention/backends/short_conv_attn.py +104 -0
- vllm/v1/attention/backends/tree_attn.py +428 -0
- vllm/v1/attention/backends/triton_attn.py +497 -0
- vllm/v1/attention/backends/utils.py +1212 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +485 -0
- vllm/v1/core/encoder_cache_manager.py +402 -0
- vllm/v1/core/kv_cache_coordinator.py +570 -0
- vllm/v1/core/kv_cache_manager.py +419 -0
- vllm/v1/core/kv_cache_metrics.py +96 -0
- vllm/v1/core/kv_cache_utils.py +1476 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +68 -0
- vllm/v1/core/sched/interface.py +189 -0
- vllm/v1/core/sched/output.py +230 -0
- vllm/v1/core/sched/request_queue.py +217 -0
- vllm/v1/core/sched/scheduler.py +1826 -0
- vllm/v1/core/sched/utils.py +64 -0
- vllm/v1/core/single_type_kv_cache_manager.py +801 -0
- vllm/v1/cudagraph_dispatcher.py +183 -0
- vllm/v1/engine/__init__.py +217 -0
- vllm/v1/engine/async_llm.py +866 -0
- vllm/v1/engine/coordinator.py +377 -0
- vllm/v1/engine/core.py +1455 -0
- vllm/v1/engine/core_client.py +1416 -0
- vllm/v1/engine/detokenizer.py +351 -0
- vllm/v1/engine/exceptions.py +18 -0
- vllm/v1/engine/input_processor.py +643 -0
- vllm/v1/engine/llm_engine.py +414 -0
- vllm/v1/engine/logprobs.py +189 -0
- vllm/v1/engine/output_processor.py +659 -0
- vllm/v1/engine/parallel_sampling.py +145 -0
- vllm/v1/engine/processor.py +20 -0
- vllm/v1/engine/utils.py +1068 -0
- vllm/v1/executor/__init__.py +6 -0
- vllm/v1/executor/abstract.py +352 -0
- vllm/v1/executor/multiproc_executor.py +890 -0
- vllm/v1/executor/ray_distributed_executor.py +8 -0
- vllm/v1/executor/ray_executor.py +626 -0
- vllm/v1/executor/ray_utils.py +465 -0
- vllm/v1/executor/uniproc_executor.py +186 -0
- vllm/v1/kv_cache_interface.py +404 -0
- vllm/v1/kv_offload/__init__.py +0 -0
- vllm/v1/kv_offload/abstract.py +161 -0
- vllm/v1/kv_offload/arc_manager.py +237 -0
- vllm/v1/kv_offload/backend.py +97 -0
- vllm/v1/kv_offload/backends/__init__.py +0 -0
- vllm/v1/kv_offload/backends/cpu.py +62 -0
- vllm/v1/kv_offload/cpu.py +86 -0
- vllm/v1/kv_offload/factory.py +56 -0
- vllm/v1/kv_offload/lru_manager.py +139 -0
- vllm/v1/kv_offload/mediums.py +39 -0
- vllm/v1/kv_offload/spec.py +66 -0
- vllm/v1/kv_offload/worker/__init__.py +0 -0
- vllm/v1/kv_offload/worker/cpu_gpu.py +280 -0
- vllm/v1/kv_offload/worker/worker.py +144 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +1305 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +194 -0
- vllm/v1/metrics/reader.py +257 -0
- vllm/v1/metrics/stats.py +437 -0
- vllm/v1/outputs.py +245 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +126 -0
- vllm/v1/request.py +282 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +352 -0
- vllm/v1/sample/logits_processor/builtin.py +278 -0
- vllm/v1/sample/logits_processor/interface.py +106 -0
- vllm/v1/sample/logits_processor/state.py +165 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +52 -0
- vllm/v1/sample/ops/logprobs.py +25 -0
- vllm/v1/sample/ops/penalties.py +57 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +384 -0
- vllm/v1/sample/rejection_sampler.py +805 -0
- vllm/v1/sample/sampler.py +319 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +120 -0
- vllm/v1/sample/tpu/sampler.py +215 -0
- vllm/v1/serial_utils.py +514 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +1331 -0
- vllm/v1/spec_decode/medusa.py +73 -0
- vllm/v1/spec_decode/metadata.py +66 -0
- vllm/v1/spec_decode/metrics.py +225 -0
- vllm/v1/spec_decode/ngram_proposer.py +291 -0
- vllm/v1/spec_decode/suffix_decoding.py +101 -0
- vllm/v1/spec_decode/utils.py +121 -0
- vllm/v1/structured_output/__init__.py +353 -0
- vllm/v1/structured_output/backend_guidance.py +265 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
- vllm/v1/structured_output/backend_outlines.py +324 -0
- vllm/v1/structured_output/backend_types.py +136 -0
- vllm/v1/structured_output/backend_xgrammar.py +378 -0
- vllm/v1/structured_output/request.py +94 -0
- vllm/v1/structured_output/utils.py +469 -0
- vllm/v1/utils.py +414 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +343 -0
- vllm/v1/worker/cp_utils.py +42 -0
- vllm/v1/worker/cpu_model_runner.py +122 -0
- vllm/v1/worker/cpu_worker.py +192 -0
- vllm/v1/worker/dp_utils.py +240 -0
- vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
- vllm/v1/worker/gpu/README.md +4 -0
- vllm/v1/worker/gpu/__init__.py +0 -0
- vllm/v1/worker/gpu/async_utils.py +98 -0
- vllm/v1/worker/gpu/attn_utils.py +189 -0
- vllm/v1/worker/gpu/block_table.py +314 -0
- vllm/v1/worker/gpu/cudagraph_utils.py +259 -0
- vllm/v1/worker/gpu/dp_utils.py +31 -0
- vllm/v1/worker/gpu/input_batch.py +479 -0
- vllm/v1/worker/gpu/metrics/__init__.py +0 -0
- vllm/v1/worker/gpu/metrics/logits.py +42 -0
- vllm/v1/worker/gpu/model_runner.py +1006 -0
- vllm/v1/worker/gpu/sample/__init__.py +0 -0
- vllm/v1/worker/gpu/sample/gumbel.py +101 -0
- vllm/v1/worker/gpu/sample/logprob.py +167 -0
- vllm/v1/worker/gpu/sample/metadata.py +192 -0
- vllm/v1/worker/gpu/sample/min_p.py +51 -0
- vllm/v1/worker/gpu/sample/output.py +14 -0
- vllm/v1/worker/gpu/sample/penalties.py +155 -0
- vllm/v1/worker/gpu/sample/sampler.py +87 -0
- vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
- vllm/v1/worker/gpu/spec_decode/eagle.py +565 -0
- vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
- vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
- vllm/v1/worker/gpu/states.py +316 -0
- vllm/v1/worker/gpu/structured_outputs.py +76 -0
- vllm/v1/worker/gpu_input_batch.py +990 -0
- vllm/v1/worker/gpu_model_runner.py +5470 -0
- vllm/v1/worker/gpu_ubatch_wrapper.py +472 -0
- vllm/v1/worker/gpu_worker.py +955 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +302 -0
- vllm/v1/worker/lora_model_runner_mixin.py +212 -0
- vllm/v1/worker/tpu_input_batch.py +583 -0
- vllm/v1/worker/tpu_model_runner.py +2191 -0
- vllm/v1/worker/tpu_worker.py +352 -0
- vllm/v1/worker/ubatch_utils.py +109 -0
- vllm/v1/worker/ubatching.py +231 -0
- vllm/v1/worker/utils.py +375 -0
- vllm/v1/worker/worker_base.py +377 -0
- vllm/v1/worker/workspace.py +253 -0
- vllm/v1/worker/xpu_model_runner.py +48 -0
- vllm/v1/worker/xpu_worker.py +174 -0
- vllm/version.py +39 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm_cpu_avx512vnni-0.13.0.dist-info/METADATA +339 -0
- vllm_cpu_avx512vnni-0.13.0.dist-info/RECORD +1641 -0
- vllm_cpu_avx512vnni-0.13.0.dist-info/WHEEL +5 -0
- vllm_cpu_avx512vnni-0.13.0.dist-info/entry_points.txt +5 -0
- vllm_cpu_avx512vnni-0.13.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2133 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
from collections.abc import Callable, Iterable
|
|
5
|
+
from contextlib import nullcontext
|
|
6
|
+
from enum import Enum
|
|
7
|
+
from functools import partial
|
|
8
|
+
from typing import Literal, cast, get_args, overload
|
|
9
|
+
|
|
10
|
+
import torch
|
|
11
|
+
import torch.nn.functional as F
|
|
12
|
+
from torch.nn.parameter import UninitializedParameter
|
|
13
|
+
|
|
14
|
+
import vllm.envs as envs
|
|
15
|
+
from vllm._aiter_ops import rocm_aiter_ops
|
|
16
|
+
from vllm.config import VllmConfig, get_current_vllm_config
|
|
17
|
+
from vllm.config.parallel import ExpertPlacementStrategy
|
|
18
|
+
from vllm.distributed import (
|
|
19
|
+
get_dp_group,
|
|
20
|
+
get_ep_group,
|
|
21
|
+
get_pcp_group,
|
|
22
|
+
get_tensor_model_parallel_world_size,
|
|
23
|
+
tensor_model_parallel_all_reduce,
|
|
24
|
+
)
|
|
25
|
+
from vllm.distributed.eplb.eplb_state import EplbState
|
|
26
|
+
from vllm.forward_context import ForwardContext, get_forward_context
|
|
27
|
+
from vllm.logger import init_logger
|
|
28
|
+
from vllm.model_executor.custom_op import CustomOp
|
|
29
|
+
from vllm.model_executor.layers.fused_moe.config import (
|
|
30
|
+
FusedMoEConfig,
|
|
31
|
+
FusedMoEParallelConfig,
|
|
32
|
+
FusedMoEQuantConfig,
|
|
33
|
+
RoutingMethodType,
|
|
34
|
+
)
|
|
35
|
+
from vllm.model_executor.layers.fused_moe.fused_moe import zero_experts_compute_triton
|
|
36
|
+
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import (
|
|
37
|
+
init_aiter_topK_meta_data,
|
|
38
|
+
)
|
|
39
|
+
from vllm.model_executor.layers.fused_moe.routing_simulator import RoutingSimulator
|
|
40
|
+
from vllm.model_executor.layers.quantization.base_config import (
|
|
41
|
+
QuantizationConfig,
|
|
42
|
+
)
|
|
43
|
+
from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
|
|
44
|
+
is_flashinfer_supporting_global_sf,
|
|
45
|
+
)
|
|
46
|
+
from vllm.platforms import current_platform
|
|
47
|
+
from vllm.utils.math_utils import cdiv, round_up
|
|
48
|
+
from vllm.utils.torch_utils import (
|
|
49
|
+
aux_stream,
|
|
50
|
+
current_stream,
|
|
51
|
+
direct_register_custom_op,
|
|
52
|
+
)
|
|
53
|
+
from vllm.v1.worker.ubatching import dbo_current_ubatch_id
|
|
54
|
+
|
|
55
|
+
if current_platform.is_cuda_alike():
|
|
56
|
+
from .fused_moe import eplb_map_to_physical_and_record
|
|
57
|
+
else:
|
|
58
|
+
|
|
59
|
+
def _eplb_map_to_physical_and_record(
|
|
60
|
+
topk_ids: torch.Tensor,
|
|
61
|
+
expert_load_view: torch.Tensor,
|
|
62
|
+
logical_to_physical_map: torch.Tensor,
|
|
63
|
+
logical_replica_count: torch.Tensor,
|
|
64
|
+
) -> torch.Tensor:
|
|
65
|
+
# CPU fallback: no EPLB so just return as is
|
|
66
|
+
return topk_ids
|
|
67
|
+
|
|
68
|
+
eplb_map_to_physical_and_record = _eplb_map_to_physical_and_record
|
|
69
|
+
from vllm.model_executor.layers.fused_moe.fused_moe import grouped_topk
|
|
70
|
+
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( # noqa: E501
|
|
71
|
+
rocm_aiter_grouped_topk,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
if current_platform.is_tpu():
|
|
75
|
+
from .moe_pallas import fused_moe as fused_moe_pallas
|
|
76
|
+
else:
|
|
77
|
+
fused_moe_pallas = None # type: ignore
|
|
78
|
+
|
|
79
|
+
from vllm.model_executor.layers.fused_moe.fused_moe_method_base import (
|
|
80
|
+
FusedMoEMethodBase,
|
|
81
|
+
)
|
|
82
|
+
from vllm.model_executor.layers.fused_moe.fused_moe_modular_method import (
|
|
83
|
+
FusedMoEModularMethod,
|
|
84
|
+
)
|
|
85
|
+
from vllm.model_executor.layers.fused_moe.unquantized_fused_moe_method import (
|
|
86
|
+
UnquantizedFusedMoEMethod,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
logger = init_logger(__name__)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class FusedMoeWeightScaleSupported(Enum):
|
|
93
|
+
TENSOR = "tensor"
|
|
94
|
+
CHANNEL = "channel"
|
|
95
|
+
GROUP = "group"
|
|
96
|
+
BLOCK = "block"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def determine_expert_map(
|
|
100
|
+
ep_size: int,
|
|
101
|
+
ep_rank: int,
|
|
102
|
+
global_num_experts: int,
|
|
103
|
+
expert_placement_strategy: ExpertPlacementStrategy = "linear",
|
|
104
|
+
num_fused_shared_experts: int = 0,
|
|
105
|
+
return_expert_mask: bool = False,
|
|
106
|
+
) -> tuple[int, torch.Tensor | None, torch.Tensor | None]:
|
|
107
|
+
"""
|
|
108
|
+
Calculates how many experts should be assigned to each rank for EP and
|
|
109
|
+
creates a mapping from global to local expert index. Experts are
|
|
110
|
+
distributed evenly across ranks. Any remaining are assigned to the
|
|
111
|
+
last rank.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
ep_size: The size of the expert parallel group
|
|
115
|
+
ep_rank: The rank of the current process in the expert parallel
|
|
116
|
+
group
|
|
117
|
+
global_num_experts: The total number of experts in the model.
|
|
118
|
+
expert_placement_strategy: The expert placement strategy.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
tuple[int, Optional[torch.Tensor]]: A tuple containing:
|
|
122
|
+
- local_num_experts (int): The number of experts assigned
|
|
123
|
+
to the current rank.
|
|
124
|
+
- expert_map (Optional[torch.Tensor]): A tensor of shape
|
|
125
|
+
(global_num_experts,) mapping from global to local index.
|
|
126
|
+
Contains -1 for experts not assigned to the current rank.
|
|
127
|
+
Returns None if ep_size is 1.
|
|
128
|
+
- expert_mask (Optional[torch.Tensor]): A tensor of shape
|
|
129
|
+
(global_num_experts + num_fused_shared_experts + 1,)
|
|
130
|
+
containing 1 for experts assigned to the current rank
|
|
131
|
+
and 0 for sentinel.
|
|
132
|
+
Returns None if ep_size is 1.
|
|
133
|
+
Used only when AITER MOE is enabled.
|
|
134
|
+
"""
|
|
135
|
+
assert ep_size > 0
|
|
136
|
+
if ep_size == 1:
|
|
137
|
+
return (global_num_experts, None, None)
|
|
138
|
+
|
|
139
|
+
# Distribute experts as evenly as possible to each rank.
|
|
140
|
+
base_experts = global_num_experts // ep_size
|
|
141
|
+
remainder = global_num_experts % ep_size
|
|
142
|
+
local_num_experts = base_experts + 1 if ep_rank < remainder else base_experts
|
|
143
|
+
|
|
144
|
+
# Create a tensor of size num_experts filled with -1
|
|
145
|
+
expert_map = torch.full((global_num_experts,), -1, dtype=torch.int32)
|
|
146
|
+
# Create an expert map for the local experts
|
|
147
|
+
if expert_placement_strategy == "linear":
|
|
148
|
+
start_idx = ep_rank * base_experts + min(ep_rank, remainder)
|
|
149
|
+
expert_map[start_idx : start_idx + local_num_experts] = torch.arange(
|
|
150
|
+
0, local_num_experts, dtype=torch.int32
|
|
151
|
+
)
|
|
152
|
+
elif expert_placement_strategy == "round_robin":
|
|
153
|
+
local_log_experts = torch.arange(
|
|
154
|
+
ep_rank, global_num_experts, ep_size, dtype=torch.int32
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
expert_map[local_log_experts] = torch.arange(
|
|
158
|
+
0, local_num_experts, dtype=torch.int32
|
|
159
|
+
)
|
|
160
|
+
else:
|
|
161
|
+
raise ValueError(
|
|
162
|
+
"Unsupported expert placement strategy "
|
|
163
|
+
f"'{expert_placement_strategy}', expected one of "
|
|
164
|
+
f"{get_args(ExpertPlacementStrategy)}"
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
expert_mask = None
|
|
168
|
+
if return_expert_mask:
|
|
169
|
+
expert_mask = torch.ones(
|
|
170
|
+
(global_num_experts + num_fused_shared_experts + 1,), dtype=torch.int32
|
|
171
|
+
)
|
|
172
|
+
expert_mask[-1] = 0
|
|
173
|
+
expert_mask[:global_num_experts] = expert_map > -1
|
|
174
|
+
expert_map = torch.cat(
|
|
175
|
+
(
|
|
176
|
+
expert_map,
|
|
177
|
+
torch.tensor(
|
|
178
|
+
[local_num_experts + i for i in range(num_fused_shared_experts)],
|
|
179
|
+
dtype=torch.int32,
|
|
180
|
+
),
|
|
181
|
+
),
|
|
182
|
+
dim=0,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
return (local_num_experts, expert_map, expert_mask)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def determine_expert_placement_strategy(
|
|
189
|
+
expert_placement_strategy: ExpertPlacementStrategy,
|
|
190
|
+
moe_parallel_config: FusedMoEParallelConfig,
|
|
191
|
+
num_expert_group: int | None,
|
|
192
|
+
num_redundant_experts: int,
|
|
193
|
+
enable_eplb: bool,
|
|
194
|
+
) -> ExpertPlacementStrategy:
|
|
195
|
+
if expert_placement_strategy == "round_robin":
|
|
196
|
+
round_robin_supported = (
|
|
197
|
+
(num_expert_group is not None and num_expert_group > 1)
|
|
198
|
+
and num_redundant_experts == 0
|
|
199
|
+
and not enable_eplb
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
if not round_robin_supported:
|
|
203
|
+
logger.warning(
|
|
204
|
+
"Round-robin expert placement is only supported for "
|
|
205
|
+
"models with multiple expert groups and no redundant "
|
|
206
|
+
"experts. Falling back to linear expert placement."
|
|
207
|
+
)
|
|
208
|
+
return "linear"
|
|
209
|
+
if (
|
|
210
|
+
moe_parallel_config.use_all2all_kernels
|
|
211
|
+
and not moe_parallel_config.use_deepep_ll_kernels
|
|
212
|
+
):
|
|
213
|
+
logger.warning(
|
|
214
|
+
"Round-robin expert placement currently only supports "
|
|
215
|
+
"the DeepEP low-latency backend, but '%s' was configured. "
|
|
216
|
+
"Falling back to linear expert placement.",
|
|
217
|
+
moe_parallel_config.all2all_backend,
|
|
218
|
+
)
|
|
219
|
+
return "linear"
|
|
220
|
+
|
|
221
|
+
return expert_placement_strategy
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def get_compressed_expert_map(expert_map: torch.Tensor) -> str:
|
|
225
|
+
"""
|
|
226
|
+
Compresses the expert map by removing any -1 entries.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
expert_map (torch.Tensor): A tensor of shape (global_num_experts,)
|
|
230
|
+
mapping from global to local index. Contains -1 for experts not
|
|
231
|
+
assigned to the current rank.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
str: A string mapping from local to global index.
|
|
235
|
+
Using str to support hashing for logging once only.
|
|
236
|
+
"""
|
|
237
|
+
global_indices = torch.where(expert_map != -1)[0]
|
|
238
|
+
local_indices = expert_map[global_indices]
|
|
239
|
+
return ", ".join(
|
|
240
|
+
f"{local_index.item()}->{global_index.item()}"
|
|
241
|
+
for local_index, global_index in zip(local_indices, global_indices)
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def maybe_roundup_hidden_size(
|
|
246
|
+
hidden_size: int,
|
|
247
|
+
act_dtype: torch.dtype,
|
|
248
|
+
quant_config: QuantizationConfig | None,
|
|
249
|
+
moe_parallel_config: FusedMoEParallelConfig,
|
|
250
|
+
is_lora_enabled: bool,
|
|
251
|
+
) -> int:
|
|
252
|
+
"""
|
|
253
|
+
Given layer hidden size and MoE configurations, round up hidden_size
|
|
254
|
+
if necessary.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
hidden_size: Layer hidden-size
|
|
258
|
+
act_dtype: Data type of the layer activations.
|
|
259
|
+
quant_config: Fused MoE quantization configuration.
|
|
260
|
+
moe_parallel_config: Fused MoE parallelization strategy configuration.
|
|
261
|
+
is_lora_enabled: True if the engine is enabled with LoRA. This
|
|
262
|
+
is used in the case of mxfp4 quantization in selecting the
|
|
263
|
+
MxFP4Backend.
|
|
264
|
+
|
|
265
|
+
Return:
|
|
266
|
+
Rounded up hidden_size if rounding up is required based on the configs.
|
|
267
|
+
Original hidden size otherwise.
|
|
268
|
+
"""
|
|
269
|
+
from vllm.model_executor.layers.fused_moe.all2all_utils import (
|
|
270
|
+
maybe_roundup_layer_hidden_size,
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
hidden_size = maybe_roundup_layer_hidden_size(
|
|
274
|
+
hidden_size, act_dtype, moe_parallel_config
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# we are padding globally so EP buffer allocation works
|
|
278
|
+
if quant_config and quant_config.get_name() == "mxfp4":
|
|
279
|
+
from vllm.model_executor.layers.quantization.mxfp4 import (
|
|
280
|
+
Mxfp4Backend,
|
|
281
|
+
get_mxfp4_backend,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
current_mxfp4_backend = get_mxfp4_backend(is_lora_enabled)
|
|
285
|
+
if (
|
|
286
|
+
current_mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16
|
|
287
|
+
or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS
|
|
288
|
+
):
|
|
289
|
+
hidden_size = round_up(hidden_size, 128)
|
|
290
|
+
elif (
|
|
291
|
+
current_platform.is_rocm()
|
|
292
|
+
or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
|
|
293
|
+
or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16
|
|
294
|
+
):
|
|
295
|
+
hidden_size = round_up(hidden_size, 256)
|
|
296
|
+
|
|
297
|
+
return hidden_size
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
@CustomOp.register("fused_moe")
|
|
301
|
+
class FusedMoE(CustomOp):
|
|
302
|
+
"""FusedMoE layer for MoE models.
|
|
303
|
+
|
|
304
|
+
This layer contains both MergedColumnParallel weights (gate_up_proj /
|
|
305
|
+
w13) and RowParallelLinear weights (down_proj/ w2).
|
|
306
|
+
|
|
307
|
+
Note: Mixtral uses w1, w2, and w3 for gate, up, and down_proj. We
|
|
308
|
+
copy that naming convention here and handle any remapping in the
|
|
309
|
+
load_weights function in each model implementation.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
num_experts: Number of experts in the model
|
|
313
|
+
top_k: Number of experts selected for each token
|
|
314
|
+
hidden_size: Input hidden state size of the transformer
|
|
315
|
+
intermediate_size: Intermediate size of the experts
|
|
316
|
+
params_dtype: Data type for the parameters.
|
|
317
|
+
reduce_results: Whether to all_reduce on the output of the layer
|
|
318
|
+
renormalize: Whether to renormalize the logits in the fused_moe kernel
|
|
319
|
+
quant_config: Quantization configure.
|
|
320
|
+
enable_eplb: Whether to enable expert parallelism load balancer.
|
|
321
|
+
"""
|
|
322
|
+
|
|
323
|
+
def __init__(
|
|
324
|
+
self,
|
|
325
|
+
num_experts: int, # Global number of experts
|
|
326
|
+
top_k: int,
|
|
327
|
+
hidden_size: int,
|
|
328
|
+
intermediate_size: int,
|
|
329
|
+
params_dtype: torch.dtype | None = None,
|
|
330
|
+
reduce_results: bool = False,
|
|
331
|
+
renormalize: bool = True,
|
|
332
|
+
use_grouped_topk: bool = False,
|
|
333
|
+
num_expert_group: int | None = None,
|
|
334
|
+
topk_group: int | None = None,
|
|
335
|
+
quant_config: QuantizationConfig | None = None,
|
|
336
|
+
tp_size: int | None = None,
|
|
337
|
+
ep_size: int | None = None,
|
|
338
|
+
dp_size: int | None = None,
|
|
339
|
+
pcp_size: int | None = None,
|
|
340
|
+
prefix: str = "",
|
|
341
|
+
custom_routing_function: Callable | None = None,
|
|
342
|
+
scoring_func: str = "softmax",
|
|
343
|
+
routed_scaling_factor: float = 1.0,
|
|
344
|
+
e_score_correction_bias: torch.Tensor | None = None,
|
|
345
|
+
apply_router_weight_on_input: bool = False,
|
|
346
|
+
activation: str = "silu",
|
|
347
|
+
is_act_and_mul: bool = True,
|
|
348
|
+
enable_eplb: bool = False,
|
|
349
|
+
num_redundant_experts: int = 0,
|
|
350
|
+
has_bias: bool = False,
|
|
351
|
+
is_sequence_parallel=False,
|
|
352
|
+
zero_expert_num: int | None = 0,
|
|
353
|
+
zero_expert_type: str | None = None,
|
|
354
|
+
expert_mapping: list[tuple[str, str, int, str]] | None = None,
|
|
355
|
+
n_shared_experts: int | None = None,
|
|
356
|
+
routing_method_type: int | None = None,
|
|
357
|
+
):
|
|
358
|
+
super().__init__()
|
|
359
|
+
|
|
360
|
+
# Allow disabling of the separate shared experts stream for
|
|
361
|
+
# debug purposes.
|
|
362
|
+
# TODO: Remove this after more extensive testings with TP/DP
|
|
363
|
+
# and other execution modes
|
|
364
|
+
if envs.VLLM_DISABLE_SHARED_EXPERTS_STREAM:
|
|
365
|
+
logger.info_once("Disabling MoE shared_experts cuda stream")
|
|
366
|
+
self.shared_experts_stream = None
|
|
367
|
+
else:
|
|
368
|
+
# TODO(rob): enable shared expert overlap with non-cuda-alike.
|
|
369
|
+
# aux_stream() returns None on non-cuda-alike platforms.
|
|
370
|
+
self.shared_experts_stream = aux_stream()
|
|
371
|
+
if self.shared_experts_stream is not None:
|
|
372
|
+
logger.info_once(
|
|
373
|
+
"Enabled separate cuda stream for MoE shared_experts", scope="local"
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
if params_dtype is None:
|
|
377
|
+
params_dtype = torch.get_default_dtype()
|
|
378
|
+
self.params_dtype = params_dtype
|
|
379
|
+
|
|
380
|
+
vllm_config = get_current_vllm_config()
|
|
381
|
+
self.vllm_config = vllm_config
|
|
382
|
+
|
|
383
|
+
# FIXME (varun): We should have a better way of inferring the activation
|
|
384
|
+
# datatype. This works for now as the tensor datatype entering the MoE
|
|
385
|
+
# operation is typically unquantized (i.e. float16/bfloat16).
|
|
386
|
+
if vllm_config.model_config is not None:
|
|
387
|
+
moe_in_dtype = vllm_config.model_config.dtype
|
|
388
|
+
else:
|
|
389
|
+
# TODO (bnell): This is a hack to get test_mixtral_moe to work
|
|
390
|
+
# since model_config is not set in the pytest test.
|
|
391
|
+
moe_in_dtype = params_dtype
|
|
392
|
+
|
|
393
|
+
tp_size_ = (
|
|
394
|
+
tp_size if tp_size is not None else get_tensor_model_parallel_world_size()
|
|
395
|
+
)
|
|
396
|
+
dp_size_ = dp_size if dp_size is not None else get_dp_group().world_size
|
|
397
|
+
pcp_size_ = pcp_size if pcp_size is not None else get_pcp_group().world_size
|
|
398
|
+
|
|
399
|
+
self.is_sequence_parallel = is_sequence_parallel
|
|
400
|
+
self.sp_size = tp_size_ if is_sequence_parallel else 1
|
|
401
|
+
|
|
402
|
+
self.moe_parallel_config: FusedMoEParallelConfig = FusedMoEParallelConfig.make(
|
|
403
|
+
tp_size_=tp_size_,
|
|
404
|
+
pcp_size_=pcp_size_,
|
|
405
|
+
dp_size_=dp_size_,
|
|
406
|
+
vllm_parallel_config=vllm_config.parallel_config,
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
self.global_num_experts = num_experts + num_redundant_experts
|
|
410
|
+
self.logical_num_experts = num_experts
|
|
411
|
+
self.zero_expert_num = zero_expert_num
|
|
412
|
+
self.zero_expert_type = zero_expert_type
|
|
413
|
+
|
|
414
|
+
# Expert mapping used in self.load_weights
|
|
415
|
+
self.expert_mapping = expert_mapping
|
|
416
|
+
|
|
417
|
+
# Round up hidden size if needed.
|
|
418
|
+
hidden_size = maybe_roundup_hidden_size(
|
|
419
|
+
hidden_size,
|
|
420
|
+
moe_in_dtype,
|
|
421
|
+
quant_config,
|
|
422
|
+
self.moe_parallel_config,
|
|
423
|
+
is_lora_enabled=self.vllm_config.lora_config is not None,
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
# For smuggling this layer into the fused moe custom op
|
|
427
|
+
compilation_config = vllm_config.compilation_config
|
|
428
|
+
if prefix in compilation_config.static_forward_context:
|
|
429
|
+
raise ValueError("Duplicate layer name: {}".format(prefix))
|
|
430
|
+
compilation_config.static_forward_context[prefix] = self
|
|
431
|
+
self.layer_name = prefix
|
|
432
|
+
|
|
433
|
+
self.enable_eplb = enable_eplb
|
|
434
|
+
self.expert_load_view: torch.Tensor | None = None
|
|
435
|
+
self.logical_to_physical_map: torch.Tensor | None = None
|
|
436
|
+
self.logical_replica_count: torch.Tensor | None = None
|
|
437
|
+
self.expert_placement_strategy: ExpertPlacementStrategy = (
|
|
438
|
+
vllm_config.parallel_config.expert_placement_strategy
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
# ROCm aiter shared experts fusion
|
|
442
|
+
self.rocm_aiter_fmoe_enabled = rocm_aiter_ops.is_fused_moe_enabled()
|
|
443
|
+
self.aiter_fmoe_shared_expert_enabled = (
|
|
444
|
+
rocm_aiter_ops.is_fusion_moe_shared_experts_enabled()
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
self.num_fused_shared_experts = (
|
|
448
|
+
n_shared_experts
|
|
449
|
+
if n_shared_experts is not None and self.aiter_fmoe_shared_expert_enabled
|
|
450
|
+
else 0
|
|
451
|
+
)
|
|
452
|
+
if (
|
|
453
|
+
not self.aiter_fmoe_shared_expert_enabled
|
|
454
|
+
and self.num_fused_shared_experts != 0
|
|
455
|
+
):
|
|
456
|
+
raise ValueError(
|
|
457
|
+
"n_shared_experts is only supported on ROCm aiter when "
|
|
458
|
+
"VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS is enabled"
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
# Determine expert maps
|
|
462
|
+
if self.use_ep:
|
|
463
|
+
if self.enable_eplb:
|
|
464
|
+
assert self.global_num_experts % self.ep_size == 0, (
|
|
465
|
+
"EPLB currently only supports even distribution of "
|
|
466
|
+
"experts across ranks."
|
|
467
|
+
)
|
|
468
|
+
else:
|
|
469
|
+
assert num_redundant_experts == 0, (
|
|
470
|
+
"Redundant experts are only supported with EPLB."
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
self.expert_placement_strategy = determine_expert_placement_strategy(
|
|
474
|
+
expert_placement_strategy=self.expert_placement_strategy,
|
|
475
|
+
moe_parallel_config=self.moe_parallel_config,
|
|
476
|
+
num_expert_group=num_expert_group,
|
|
477
|
+
num_redundant_experts=num_redundant_experts,
|
|
478
|
+
enable_eplb=self.enable_eplb,
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
self._expert_map: torch.Tensor | None
|
|
482
|
+
local_num_experts, expert_map, expert_mask = determine_expert_map(
|
|
483
|
+
ep_size=self.ep_size,
|
|
484
|
+
ep_rank=self.ep_rank,
|
|
485
|
+
global_num_experts=self.global_num_experts,
|
|
486
|
+
expert_placement_strategy=self.expert_placement_strategy,
|
|
487
|
+
num_fused_shared_experts=self.num_fused_shared_experts,
|
|
488
|
+
return_expert_mask=self.rocm_aiter_fmoe_enabled,
|
|
489
|
+
)
|
|
490
|
+
self.local_num_experts = local_num_experts
|
|
491
|
+
self.register_buffer("_expert_map", expert_map)
|
|
492
|
+
self.register_buffer("expert_mask", expert_mask)
|
|
493
|
+
self._maybe_init_expert_routing_tables()
|
|
494
|
+
logger.info_once(
|
|
495
|
+
"[EP Rank %s/%s] Expert parallelism is enabled. Expert "
|
|
496
|
+
"placement strategy: %s. Local/global"
|
|
497
|
+
" number of experts: %s/%s. Experts local to global index map:"
|
|
498
|
+
" %s.",
|
|
499
|
+
self.ep_rank,
|
|
500
|
+
self.ep_size,
|
|
501
|
+
self.expert_placement_strategy,
|
|
502
|
+
self.local_num_experts,
|
|
503
|
+
self.global_num_experts,
|
|
504
|
+
get_compressed_expert_map(self._expert_map),
|
|
505
|
+
)
|
|
506
|
+
else:
|
|
507
|
+
self.local_num_experts, self._expert_map, self.expert_mask = (
|
|
508
|
+
self.global_num_experts,
|
|
509
|
+
None,
|
|
510
|
+
None,
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
self.top_k = top_k
|
|
514
|
+
|
|
515
|
+
self._init_aiter_shared_experts_topK_buffer(
|
|
516
|
+
vllm_config=vllm_config, dp_size=dp_size_
|
|
517
|
+
)
|
|
518
|
+
if self.use_ep and self.rocm_aiter_fmoe_enabled:
|
|
519
|
+
assert self.expert_mask is None or torch.all(
|
|
520
|
+
(expert_mask == 0) | (expert_mask == 1)
|
|
521
|
+
), "Aiter Fused MoE kernel only supports expert_map with 0 and 1s."
|
|
522
|
+
|
|
523
|
+
assert intermediate_size % self.tp_size == 0
|
|
524
|
+
self.hidden_size = hidden_size
|
|
525
|
+
self.intermediate_size_per_partition = intermediate_size // self.tp_size
|
|
526
|
+
self.reduce_results = reduce_results
|
|
527
|
+
self.renormalize = renormalize
|
|
528
|
+
self.use_grouped_topk = use_grouped_topk
|
|
529
|
+
if self.use_grouped_topk:
|
|
530
|
+
assert num_expert_group is not None and topk_group is not None
|
|
531
|
+
self.num_expert_group = num_expert_group
|
|
532
|
+
self.topk_group = topk_group
|
|
533
|
+
self.custom_routing_function = custom_routing_function
|
|
534
|
+
self.scoring_func = scoring_func
|
|
535
|
+
self.routed_scaling_factor = routed_scaling_factor
|
|
536
|
+
self.e_score_correction_bias = e_score_correction_bias
|
|
537
|
+
self.apply_router_weight_on_input = apply_router_weight_on_input
|
|
538
|
+
self.activation = activation
|
|
539
|
+
|
|
540
|
+
if self.scoring_func != "softmax" and not self.use_grouped_topk:
|
|
541
|
+
raise ValueError(
|
|
542
|
+
"Only softmax scoring function is supported for non-grouped topk."
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
# ToDo: Better logic to determine the routing method type
|
|
546
|
+
if routing_method_type is not None:
|
|
547
|
+
self.routing_method_type = routing_method_type
|
|
548
|
+
else:
|
|
549
|
+
if scoring_func == "sigmoid":
|
|
550
|
+
if self.use_grouped_topk:
|
|
551
|
+
self.routing_method_type = RoutingMethodType.DeepSeekV3
|
|
552
|
+
elif self.top_k == 1:
|
|
553
|
+
self.routing_method_type = RoutingMethodType.Llama4
|
|
554
|
+
elif self.scoring_func == "softmax":
|
|
555
|
+
self.routing_method_type = (
|
|
556
|
+
RoutingMethodType.Renormalize
|
|
557
|
+
if not self.renormalize
|
|
558
|
+
else RoutingMethodType.RenormalizeNaive
|
|
559
|
+
)
|
|
560
|
+
else:
|
|
561
|
+
self.routing_method_type = RoutingMethodType.TopK
|
|
562
|
+
|
|
563
|
+
self.moe_config: FusedMoEConfig = FusedMoEConfig(
|
|
564
|
+
num_experts=self.global_num_experts,
|
|
565
|
+
experts_per_token=top_k,
|
|
566
|
+
hidden_dim=hidden_size,
|
|
567
|
+
num_local_experts=self.local_num_experts,
|
|
568
|
+
moe_parallel_config=self.moe_parallel_config,
|
|
569
|
+
in_dtype=moe_in_dtype,
|
|
570
|
+
max_num_tokens=envs.VLLM_MOE_DP_CHUNK_SIZE,
|
|
571
|
+
has_bias=has_bias,
|
|
572
|
+
is_act_and_mul=is_act_and_mul,
|
|
573
|
+
is_lora_enabled=vllm_config.lora_config is not None,
|
|
574
|
+
)
|
|
575
|
+
self.moe_config_use_flashinfer_cutlass_kernels = (
|
|
576
|
+
self.moe_config.use_flashinfer_cutlass_kernels
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
self.quant_config = quant_config
|
|
580
|
+
|
|
581
|
+
def _get_quant_method() -> FusedMoEMethodBase:
|
|
582
|
+
"""
|
|
583
|
+
Helper method to ensure self.quant_method is never None and
|
|
584
|
+
of the proper type.
|
|
585
|
+
"""
|
|
586
|
+
quant_method = None
|
|
587
|
+
if self.quant_config is not None:
|
|
588
|
+
quant_method = self.quant_config.get_quant_method(self, prefix)
|
|
589
|
+
if quant_method is None:
|
|
590
|
+
quant_method = UnquantizedFusedMoEMethod(self.moe_config)
|
|
591
|
+
assert isinstance(quant_method, FusedMoEMethodBase)
|
|
592
|
+
return quant_method
|
|
593
|
+
|
|
594
|
+
# Note: get_quant_method will look at the layer's local_num_experts
|
|
595
|
+
# for heuristic purposes, so it must be initialized first.
|
|
596
|
+
self.quant_method: FusedMoEMethodBase = _get_quant_method()
|
|
597
|
+
|
|
598
|
+
if not self.moe_config.is_act_and_mul:
|
|
599
|
+
# Avoid circular import
|
|
600
|
+
from vllm.model_executor.layers.quantization.modelopt import (
|
|
601
|
+
ModelOptFp8MoEMethod,
|
|
602
|
+
ModelOptNvFp4FusedMoE,
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
if not isinstance(
|
|
606
|
+
self.quant_method,
|
|
607
|
+
(
|
|
608
|
+
UnquantizedFusedMoEMethod,
|
|
609
|
+
ModelOptFp8MoEMethod,
|
|
610
|
+
ModelOptNvFp4FusedMoE,
|
|
611
|
+
),
|
|
612
|
+
):
|
|
613
|
+
raise NotImplementedError(
|
|
614
|
+
"is_act_and_mul=False is supported only for unquantized "
|
|
615
|
+
", ModelOpt FP8, and ModelOpt NvFp4 checkpoints"
|
|
616
|
+
)
|
|
617
|
+
if not current_platform.is_cuda():
|
|
618
|
+
raise NotImplementedError(
|
|
619
|
+
"is_act_and_mul=False is supported only for CUDA for now"
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
if self.enable_eplb and not self.quant_method.supports_eplb:
|
|
623
|
+
# TODO: Add support for additional quantization methods.
|
|
624
|
+
# The implementation for other quantization methods does not
|
|
625
|
+
# contain essential differences, but the current quant API
|
|
626
|
+
# design causes duplicated work when extending to new
|
|
627
|
+
# quantization methods, so I'm leaving it for now.
|
|
628
|
+
# If you plan to add support for more quantization methods,
|
|
629
|
+
# please refer to the implementation in `Fp8MoEMethod`.
|
|
630
|
+
raise NotImplementedError(
|
|
631
|
+
f"EPLB is not supported {self.quant_method.__class__.__name__}. "
|
|
632
|
+
"EPLB is only supported for FP8 quantization for now."
|
|
633
|
+
)
|
|
634
|
+
|
|
635
|
+
moe_quant_params = {
|
|
636
|
+
"num_experts": self.local_num_experts,
|
|
637
|
+
"hidden_size": hidden_size,
|
|
638
|
+
"intermediate_size_per_partition": self.intermediate_size_per_partition,
|
|
639
|
+
"params_dtype": params_dtype,
|
|
640
|
+
"weight_loader": self.weight_loader,
|
|
641
|
+
"global_num_experts": self.global_num_experts,
|
|
642
|
+
}
|
|
643
|
+
# need full intermediate size pre-sharding for WNA16 act order
|
|
644
|
+
if self.quant_method.__class__.__name__ in (
|
|
645
|
+
"GPTQMarlinMoEMethod",
|
|
646
|
+
"CompressedTensorsWNA16MarlinMoEMethod",
|
|
647
|
+
"CompressedTensorsWNA16MoEMethod",
|
|
648
|
+
):
|
|
649
|
+
moe_quant_params["intermediate_size_full"] = intermediate_size
|
|
650
|
+
|
|
651
|
+
self.quant_method.create_weights(layer=self, **moe_quant_params)
|
|
652
|
+
|
|
653
|
+
# Chunked all2all staging tensor
|
|
654
|
+
self.batched_hidden_states: torch.Tensor | None = None
|
|
655
|
+
self.batched_router_logits: torch.Tensor | None = None
|
|
656
|
+
|
|
657
|
+
# Note: maybe_init_modular_kernel should only be called by
|
|
658
|
+
# prepare_communication_buffer_for_model.
|
|
659
|
+
# This is called after all weight loading and post-processing, so it
|
|
660
|
+
# should be safe to swap out the quant_method.
|
|
661
|
+
def maybe_init_modular_kernel(self) -> None:
|
|
662
|
+
self.ensure_moe_quant_config_init()
|
|
663
|
+
# routing_tables only needed for round-robin expert placement with
|
|
664
|
+
# DeepEP all2all backend.
|
|
665
|
+
routing_tables = self._maybe_init_expert_routing_tables()
|
|
666
|
+
prepare_finalize = self.quant_method.maybe_make_prepare_finalize(
|
|
667
|
+
routing_tables=routing_tables
|
|
668
|
+
)
|
|
669
|
+
if prepare_finalize is not None:
|
|
670
|
+
logger.debug(
|
|
671
|
+
"%s for %s(%s)", prepare_finalize.__class__.__name__, self, id(self)
|
|
672
|
+
)
|
|
673
|
+
self.quant_method = FusedMoEModularMethod.make(
|
|
674
|
+
self, self.quant_method, prepare_finalize, self.shared_experts
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
@property
|
|
678
|
+
def shared_experts(self) -> torch.nn.Module | None:
|
|
679
|
+
return None
|
|
680
|
+
|
|
681
|
+
@property
|
|
682
|
+
def gate(self) -> torch.nn.Module | None:
|
|
683
|
+
return None
|
|
684
|
+
|
|
685
|
+
@property
|
|
686
|
+
def tp_size(self):
|
|
687
|
+
return self.moe_parallel_config.tp_size
|
|
688
|
+
|
|
689
|
+
@property
|
|
690
|
+
def dp_size(self):
|
|
691
|
+
return self.moe_parallel_config.dp_size
|
|
692
|
+
|
|
693
|
+
@property
|
|
694
|
+
def pcp_size(self):
|
|
695
|
+
return self.moe_parallel_config.pcp_size
|
|
696
|
+
|
|
697
|
+
@property
|
|
698
|
+
def ep_size(self):
|
|
699
|
+
return self.moe_parallel_config.ep_size
|
|
700
|
+
|
|
701
|
+
@property
|
|
702
|
+
def tp_rank(self):
|
|
703
|
+
return self.moe_parallel_config.tp_rank
|
|
704
|
+
|
|
705
|
+
@property
|
|
706
|
+
def dp_rank(self):
|
|
707
|
+
return self.moe_parallel_config.dp_rank
|
|
708
|
+
|
|
709
|
+
@property
|
|
710
|
+
def pcp_rank(self):
|
|
711
|
+
return self.moe_parallel_config.pcp_rank
|
|
712
|
+
|
|
713
|
+
@property
|
|
714
|
+
def ep_rank(self):
|
|
715
|
+
return self.moe_parallel_config.ep_rank
|
|
716
|
+
|
|
717
|
+
@property
|
|
718
|
+
def use_ep(self):
|
|
719
|
+
return self.moe_parallel_config.use_ep
|
|
720
|
+
|
|
721
|
+
@property
|
|
722
|
+
def use_pplx_kernels(self):
|
|
723
|
+
return self.moe_parallel_config.use_pplx_kernels
|
|
724
|
+
|
|
725
|
+
@property
|
|
726
|
+
def use_deepep_ht_kernels(self):
|
|
727
|
+
return self.moe_parallel_config.use_deepep_ht_kernels
|
|
728
|
+
|
|
729
|
+
@property
|
|
730
|
+
def use_deepep_ll_kernels(self):
|
|
731
|
+
return self.moe_parallel_config.use_deepep_ll_kernels
|
|
732
|
+
|
|
733
|
+
@property
|
|
734
|
+
def use_flashinfer_cutlass_kernels(self):
|
|
735
|
+
return (
|
|
736
|
+
self.moe_quant_config is not None
|
|
737
|
+
and self.moe_quant_config.quant_dtype == "nvfp4"
|
|
738
|
+
and self.moe_config_use_flashinfer_cutlass_kernels
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
@property
|
|
742
|
+
def use_marlin_kernels(self):
|
|
743
|
+
return getattr(self.quant_method, "use_marlin", False)
|
|
744
|
+
|
|
745
|
+
@property
|
|
746
|
+
def use_dp_chunking(self) -> bool:
|
|
747
|
+
return (
|
|
748
|
+
self.moe_parallel_config.use_pplx_kernels
|
|
749
|
+
or self.moe_parallel_config.use_deepep_ll_kernels
|
|
750
|
+
or (self.dp_size > 1 and self.use_flashinfer_cutlass_kernels)
|
|
751
|
+
) and envs.VLLM_ENABLE_MOE_DP_CHUNK
|
|
752
|
+
|
|
753
|
+
@property
|
|
754
|
+
def is_internal_router(self) -> bool:
|
|
755
|
+
# By default, router/gate is called before FusedMoE forward pass
|
|
756
|
+
return False
|
|
757
|
+
|
|
758
|
+
def _maybe_init_expert_routing_tables(
|
|
759
|
+
self,
|
|
760
|
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None:
|
|
761
|
+
# Currently routing_tables only needed for round-robin expert placement
|
|
762
|
+
# with DeepEP-ll all2all backend.
|
|
763
|
+
if (
|
|
764
|
+
self.expert_placement_strategy != "round_robin"
|
|
765
|
+
or not self.use_deepep_ll_kernels
|
|
766
|
+
):
|
|
767
|
+
return None
|
|
768
|
+
|
|
769
|
+
if hasattr(self, "expert_global_to_physical"):
|
|
770
|
+
return cast(
|
|
771
|
+
tuple[torch.Tensor, torch.Tensor, torch.Tensor],
|
|
772
|
+
(
|
|
773
|
+
self.expert_global_to_physical,
|
|
774
|
+
self.expert_physical_to_global,
|
|
775
|
+
self.expert_local_to_global,
|
|
776
|
+
),
|
|
777
|
+
)
|
|
778
|
+
|
|
779
|
+
if self._expert_map is None:
|
|
780
|
+
return None
|
|
781
|
+
|
|
782
|
+
routing_tables = self.ensure_round_robin_expert_routing_tables(
|
|
783
|
+
global_num_experts=self.global_num_experts,
|
|
784
|
+
ep_size=self.ep_size,
|
|
785
|
+
ep_rank=self.ep_rank,
|
|
786
|
+
local_num_experts=self.local_num_experts,
|
|
787
|
+
device=self._expert_map.device,
|
|
788
|
+
)
|
|
789
|
+
|
|
790
|
+
global_to_physical, physical_to_global, local_global = routing_tables
|
|
791
|
+
self.register_buffer("expert_global_to_physical", global_to_physical)
|
|
792
|
+
self.register_buffer("expert_physical_to_global", physical_to_global)
|
|
793
|
+
self.register_buffer("expert_local_to_global", local_global)
|
|
794
|
+
|
|
795
|
+
return routing_tables
|
|
796
|
+
|
|
797
|
+
@staticmethod
|
|
798
|
+
def ensure_round_robin_expert_routing_tables(
|
|
799
|
+
global_num_experts: int,
|
|
800
|
+
ep_size: int,
|
|
801
|
+
ep_rank: int,
|
|
802
|
+
local_num_experts: int,
|
|
803
|
+
device: torch.device | None = None,
|
|
804
|
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
|
805
|
+
device_kwargs = {"device": device} if device is not None else {}
|
|
806
|
+
global_indices = torch.arange(
|
|
807
|
+
global_num_experts, dtype=torch.long, **device_kwargs
|
|
808
|
+
)
|
|
809
|
+
owner = torch.remainder(global_indices, ep_size)
|
|
810
|
+
local_index = torch.div(global_indices, ep_size, rounding_mode="floor")
|
|
811
|
+
base = global_num_experts // ep_size
|
|
812
|
+
remainder = global_num_experts % ep_size
|
|
813
|
+
physical_offset = owner * base
|
|
814
|
+
if remainder > 0:
|
|
815
|
+
remainder_tensor = torch.tensor(
|
|
816
|
+
remainder, dtype=torch.long, **device_kwargs
|
|
817
|
+
)
|
|
818
|
+
physical_offset = physical_offset + torch.minimum(owner, remainder_tensor)
|
|
819
|
+
|
|
820
|
+
global_to_physical = physical_offset + local_index
|
|
821
|
+
physical_to_global = torch.empty_like(global_to_physical)
|
|
822
|
+
physical_to_global[global_to_physical] = global_indices
|
|
823
|
+
|
|
824
|
+
local_global = torch.arange(
|
|
825
|
+
ep_rank,
|
|
826
|
+
global_num_experts,
|
|
827
|
+
ep_size,
|
|
828
|
+
dtype=torch.long,
|
|
829
|
+
**device_kwargs,
|
|
830
|
+
)
|
|
831
|
+
if local_global.numel() != local_num_experts:
|
|
832
|
+
local_global = local_global[:local_num_experts]
|
|
833
|
+
|
|
834
|
+
return (global_to_physical, physical_to_global, local_global)
|
|
835
|
+
|
|
836
|
+
def update_expert_map(self):
|
|
837
|
+
# ep_size and ep_rank should already be updated
|
|
838
|
+
assert self._expert_map is not None
|
|
839
|
+
with self._expert_map.device:
|
|
840
|
+
local_num_experts, expert_map, expert_mask = determine_expert_map(
|
|
841
|
+
ep_size=self.ep_size,
|
|
842
|
+
ep_rank=self.ep_rank,
|
|
843
|
+
global_num_experts=self.global_num_experts,
|
|
844
|
+
expert_placement_strategy=self.expert_placement_strategy,
|
|
845
|
+
num_fused_shared_experts=self.num_fused_shared_experts,
|
|
846
|
+
return_expert_mask=self.rocm_aiter_fmoe_enabled,
|
|
847
|
+
)
|
|
848
|
+
self.local_num_experts = local_num_experts
|
|
849
|
+
self.register_buffer("_expert_map", expert_map)
|
|
850
|
+
self.register_buffer("expert_mask", expert_mask)
|
|
851
|
+
self._maybe_init_expert_routing_tables()
|
|
852
|
+
if self.aiter_fmoe_shared_expert_enabled:
|
|
853
|
+
self._init_aiter_shared_experts_topK_buffer(
|
|
854
|
+
vllm_config=get_current_vllm_config(),
|
|
855
|
+
dp_size=get_dp_group().world_size,
|
|
856
|
+
)
|
|
857
|
+
|
|
858
|
+
def _maybe_setup_shared_experts_stream(
|
|
859
|
+
self,
|
|
860
|
+
hidden_states: torch.Tensor,
|
|
861
|
+
has_separate_shared_experts: bool,
|
|
862
|
+
use_chunked_impl: bool,
|
|
863
|
+
) -> tuple[bool, torch.Tensor | None]:
|
|
864
|
+
use_shared_experts_stream = (
|
|
865
|
+
current_platform.is_cuda()
|
|
866
|
+
and has_separate_shared_experts
|
|
867
|
+
and not use_chunked_impl
|
|
868
|
+
and self.shared_experts_stream is not None
|
|
869
|
+
and (
|
|
870
|
+
hidden_states.shape[0]
|
|
871
|
+
<= envs.VLLM_SHARED_EXPERTS_STREAM_TOKEN_THRESHOLD
|
|
872
|
+
)
|
|
873
|
+
)
|
|
874
|
+
|
|
875
|
+
hidden_states_clone: torch.Tensor | None = None
|
|
876
|
+
if use_shared_experts_stream:
|
|
877
|
+
assert self.shared_experts_stream is not None
|
|
878
|
+
|
|
879
|
+
# Clone BEFORE switching streams to avoid race condition
|
|
880
|
+
# where routed_expert kernel may mutate hidden_states.
|
|
881
|
+
hidden_states_clone = hidden_states.clone()
|
|
882
|
+
|
|
883
|
+
# Record that the clone will be used by shared_experts_stream
|
|
884
|
+
# to avoid gc issue from deallocation of hidden_states_clone
|
|
885
|
+
# For more details: https://docs.pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html # noqa: E501
|
|
886
|
+
# NOTE: We don't need shared_output.record_stream(current_stream())
|
|
887
|
+
# because we synch the streams before using shared_output.
|
|
888
|
+
hidden_states_clone.record_stream(self.shared_experts_stream)
|
|
889
|
+
|
|
890
|
+
# Mark sync start point for the separate shared experts
|
|
891
|
+
# stream here since we want to run in parallel with the
|
|
892
|
+
# router/gate (next op below)
|
|
893
|
+
assert self.shared_experts_stream is not None
|
|
894
|
+
self.shared_experts_stream.wait_stream(current_stream())
|
|
895
|
+
|
|
896
|
+
return use_shared_experts_stream, hidden_states_clone
|
|
897
|
+
|
|
898
|
+
def _load_per_tensor_weight_scale(
|
|
899
|
+
self,
|
|
900
|
+
shard_id: str,
|
|
901
|
+
param: torch.nn.Parameter,
|
|
902
|
+
loaded_weight: torch.Tensor,
|
|
903
|
+
expert_id: int,
|
|
904
|
+
):
|
|
905
|
+
param_data = param.data
|
|
906
|
+
# for per tensor weight quantization
|
|
907
|
+
if shard_id in ("w1", "w3"):
|
|
908
|
+
# We have to keep the weight scales of w1 and w3 because
|
|
909
|
+
# we need to re-quantize w1/w3 weights after weight loading.
|
|
910
|
+
idx = 0 if shard_id == "w1" else 1
|
|
911
|
+
param_data[expert_id][idx] = loaded_weight
|
|
912
|
+
# If we are in the row parallel case (down_proj)
|
|
913
|
+
elif shard_id == "w2":
|
|
914
|
+
param_data[expert_id] = loaded_weight
|
|
915
|
+
|
|
916
|
+
def _load_combined_w13_weight_scale(
|
|
917
|
+
self,
|
|
918
|
+
shard_dim: int,
|
|
919
|
+
loaded_weight: torch.Tensor,
|
|
920
|
+
param: torch.Tensor,
|
|
921
|
+
tp_rank: int,
|
|
922
|
+
):
|
|
923
|
+
"""
|
|
924
|
+
Load w13 weight scales assuming that w1 weight scales and w3 weight
|
|
925
|
+
scales are stored in the same loaded_weight tensor.
|
|
926
|
+
"""
|
|
927
|
+
shard_size = param.shape[shard_dim]
|
|
928
|
+
loaded_weight = loaded_weight.narrow(
|
|
929
|
+
shard_dim, shard_size * tp_rank, shard_size
|
|
930
|
+
)
|
|
931
|
+
param.copy_(loaded_weight)
|
|
932
|
+
|
|
933
|
+
def _load_model_weight_or_group_weight_scale(
|
|
934
|
+
self,
|
|
935
|
+
shard_dim: int,
|
|
936
|
+
expert_data: torch.Tensor,
|
|
937
|
+
shard_id: str,
|
|
938
|
+
loaded_weight: torch.Tensor,
|
|
939
|
+
tp_rank: int,
|
|
940
|
+
load_full_w2: bool = False,
|
|
941
|
+
):
|
|
942
|
+
"""
|
|
943
|
+
Load grouped weight scales for group quantization or model weights
|
|
944
|
+
:param shard_dim: dimension to shard
|
|
945
|
+
:param expert_data: parameter for a particular expert
|
|
946
|
+
:param shard_id: either w1, w2, or w3
|
|
947
|
+
:param loaded_weight: checkpoint weight to load into the param
|
|
948
|
+
:param tp_rank: tensor parallel rank
|
|
949
|
+
:param load_full_w2: whether or not the w2 loaded should be sharded.
|
|
950
|
+
"""
|
|
951
|
+
if shard_id == "w2":
|
|
952
|
+
# In the case where we have actorder/g_idx, we do not partition the
|
|
953
|
+
# w2 scales, as indicated by `load_full` argument, for all tp cases
|
|
954
|
+
self._load_w2(
|
|
955
|
+
shard_dim=shard_dim,
|
|
956
|
+
loaded_weight=loaded_weight,
|
|
957
|
+
expert_data=expert_data,
|
|
958
|
+
tp_rank=tp_rank,
|
|
959
|
+
load_full=load_full_w2,
|
|
960
|
+
)
|
|
961
|
+
elif shard_id in ("w1", "w3"):
|
|
962
|
+
self._load_w13(
|
|
963
|
+
shard_id=shard_id,
|
|
964
|
+
shard_dim=shard_dim,
|
|
965
|
+
loaded_weight=loaded_weight,
|
|
966
|
+
expert_data=expert_data,
|
|
967
|
+
tp_rank=tp_rank,
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
def _load_per_channel_weight_scale(
|
|
971
|
+
self,
|
|
972
|
+
expert_data: torch.Tensor,
|
|
973
|
+
shard_dim: int,
|
|
974
|
+
shard_id: str,
|
|
975
|
+
loaded_weight: torch.Tensor,
|
|
976
|
+
tp_rank: int,
|
|
977
|
+
):
|
|
978
|
+
# for per channel weight quantization
|
|
979
|
+
if shard_id == "w2":
|
|
980
|
+
expert_data.copy_(loaded_weight)
|
|
981
|
+
elif shard_id in ("w1", "w3"):
|
|
982
|
+
self._load_w13(
|
|
983
|
+
shard_id=shard_id,
|
|
984
|
+
shard_dim=shard_dim,
|
|
985
|
+
loaded_weight=loaded_weight,
|
|
986
|
+
expert_data=expert_data,
|
|
987
|
+
tp_rank=tp_rank,
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
def _load_w13(
|
|
991
|
+
self,
|
|
992
|
+
expert_data: torch.Tensor,
|
|
993
|
+
shard_dim: int,
|
|
994
|
+
shard_id: str,
|
|
995
|
+
loaded_weight: torch.Tensor,
|
|
996
|
+
tp_rank: int,
|
|
997
|
+
load_full: bool = False,
|
|
998
|
+
):
|
|
999
|
+
# Index the loaded weight for tp sharding.
|
|
1000
|
+
# gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim
|
|
1001
|
+
if self.moe_config.is_act_and_mul:
|
|
1002
|
+
shard_size = expert_data.shape[shard_dim] // 2
|
|
1003
|
+
else:
|
|
1004
|
+
shard_size = expert_data.shape[shard_dim]
|
|
1005
|
+
if not load_full:
|
|
1006
|
+
loaded_weight = loaded_weight.narrow(
|
|
1007
|
+
shard_dim, shard_size * tp_rank, shard_size
|
|
1008
|
+
)
|
|
1009
|
+
# Narrow parameter and load.
|
|
1010
|
+
# w1, gate_proj: Load into first logical weight of w13.
|
|
1011
|
+
if shard_id == "w1":
|
|
1012
|
+
expert_data = expert_data.narrow(shard_dim, 0, shard_size)
|
|
1013
|
+
# w3, up_proj: Load into second logical weight of w13.
|
|
1014
|
+
else:
|
|
1015
|
+
assert shard_id == "w3"
|
|
1016
|
+
expert_data = expert_data.narrow(shard_dim, shard_size, shard_size)
|
|
1017
|
+
expert_data.copy_(loaded_weight)
|
|
1018
|
+
|
|
1019
|
+
def _load_w2(
|
|
1020
|
+
self,
|
|
1021
|
+
expert_data: torch.Tensor,
|
|
1022
|
+
shard_dim: int,
|
|
1023
|
+
loaded_weight: torch.Tensor,
|
|
1024
|
+
tp_rank: int,
|
|
1025
|
+
load_full: bool = False,
|
|
1026
|
+
):
|
|
1027
|
+
# Index the loaded weight for tp sharding.
|
|
1028
|
+
# down_proj: "RowParallel" so tp sharding on input_dim
|
|
1029
|
+
# Narrow parameter and load.
|
|
1030
|
+
shard_size = expert_data.shape[shard_dim]
|
|
1031
|
+
if not load_full:
|
|
1032
|
+
loaded_weight = loaded_weight.narrow(
|
|
1033
|
+
shard_dim, shard_size * tp_rank, shard_size
|
|
1034
|
+
)
|
|
1035
|
+
# w2, down_proj: Load into only logical weight of w2.
|
|
1036
|
+
expert_data.copy_(loaded_weight)
|
|
1037
|
+
|
|
1038
|
+
def _load_single_value(
|
|
1039
|
+
self, param: torch.nn.Parameter, loaded_weight: torch.Tensor, expert_id: int
|
|
1040
|
+
):
|
|
1041
|
+
param_data = param.data
|
|
1042
|
+
|
|
1043
|
+
# Input scales can be loaded directly and should be equal.
|
|
1044
|
+
param_data[expert_id] = loaded_weight
|
|
1045
|
+
|
|
1046
|
+
def _load_g_idx(
|
|
1047
|
+
self,
|
|
1048
|
+
shard_id: str,
|
|
1049
|
+
expert_data: torch.Tensor,
|
|
1050
|
+
shard_dim: int,
|
|
1051
|
+
loaded_weight: torch.Tensor,
|
|
1052
|
+
tp_rank: int,
|
|
1053
|
+
):
|
|
1054
|
+
if shard_id == "w2":
|
|
1055
|
+
self._load_w2(
|
|
1056
|
+
shard_dim=shard_dim,
|
|
1057
|
+
loaded_weight=loaded_weight,
|
|
1058
|
+
expert_data=expert_data,
|
|
1059
|
+
tp_rank=tp_rank,
|
|
1060
|
+
)
|
|
1061
|
+
else:
|
|
1062
|
+
assert shard_id in ("w1", "w3")
|
|
1063
|
+
expert_data.copy_(loaded_weight)
|
|
1064
|
+
|
|
1065
|
+
def _map_global_expert_id_to_local_expert_id(self, expert_id: int) -> int:
|
|
1066
|
+
if self._expert_map is None:
|
|
1067
|
+
return expert_id
|
|
1068
|
+
return self._expert_map[expert_id].item()
|
|
1069
|
+
|
|
1070
|
+
def _init_aiter_shared_experts_topK_buffer(
|
|
1071
|
+
self, vllm_config: VllmConfig, dp_size: int
|
|
1072
|
+
):
|
|
1073
|
+
if self.num_fused_shared_experts > 0:
|
|
1074
|
+
init_aiter_topK_meta_data(
|
|
1075
|
+
n_routed_experts=self.global_num_experts,
|
|
1076
|
+
n_shared_experts=self.num_fused_shared_experts,
|
|
1077
|
+
top_k=self.top_k,
|
|
1078
|
+
tp_rank=self.ep_rank if self.use_ep else self.tp_rank,
|
|
1079
|
+
tp_size=self.ep_size if self.use_ep else self.tp_size,
|
|
1080
|
+
shared_experts_score=1.0,
|
|
1081
|
+
max_num_tokens=vllm_config.scheduler_config.max_num_batched_tokens
|
|
1082
|
+
* dp_size,
|
|
1083
|
+
is_EP=self.use_ep,
|
|
1084
|
+
)
|
|
1085
|
+
self.local_num_experts += self.num_fused_shared_experts
|
|
1086
|
+
|
|
1087
|
+
@overload
|
|
1088
|
+
def weight_loader(
|
|
1089
|
+
self,
|
|
1090
|
+
param: torch.nn.Parameter,
|
|
1091
|
+
loaded_weight: torch.Tensor,
|
|
1092
|
+
weight_name: str,
|
|
1093
|
+
shard_id: str,
|
|
1094
|
+
expert_id: int,
|
|
1095
|
+
return_success: Literal[False],
|
|
1096
|
+
) -> None: ...
|
|
1097
|
+
|
|
1098
|
+
@overload
|
|
1099
|
+
def weight_loader(
|
|
1100
|
+
self,
|
|
1101
|
+
param: torch.nn.Parameter,
|
|
1102
|
+
loaded_weight: torch.Tensor,
|
|
1103
|
+
weight_name: str,
|
|
1104
|
+
shard_id: str,
|
|
1105
|
+
expert_id: int,
|
|
1106
|
+
return_success: Literal[True],
|
|
1107
|
+
) -> bool: ...
|
|
1108
|
+
|
|
1109
|
+
def weight_loader(
|
|
1110
|
+
self,
|
|
1111
|
+
param: torch.nn.Parameter,
|
|
1112
|
+
loaded_weight: torch.Tensor,
|
|
1113
|
+
weight_name: str,
|
|
1114
|
+
shard_id: str,
|
|
1115
|
+
expert_id: int,
|
|
1116
|
+
return_success: bool = False,
|
|
1117
|
+
) -> bool | None:
|
|
1118
|
+
if self.quant_config and self.quant_config.get_name() == "mxfp4":
|
|
1119
|
+
# (FIXME) for gpt-oss all experts are combined
|
|
1120
|
+
if "bias" in weight_name:
|
|
1121
|
+
dim1 = loaded_weight.shape[1]
|
|
1122
|
+
param.data[:, :dim1].copy_(loaded_weight)
|
|
1123
|
+
else:
|
|
1124
|
+
dim1 = loaded_weight.shape[1]
|
|
1125
|
+
dim2 = loaded_weight.shape[2]
|
|
1126
|
+
param.data[:, :dim1, :dim2].copy_(loaded_weight)
|
|
1127
|
+
return True if return_success else None
|
|
1128
|
+
|
|
1129
|
+
quant_method_name = self.quant_method.__class__.__name__
|
|
1130
|
+
global_expert_id = expert_id
|
|
1131
|
+
expert_id = self._map_global_expert_id_to_local_expert_id(global_expert_id)
|
|
1132
|
+
|
|
1133
|
+
allow_flashinfer = getattr(self.quant_method, "allow_flashinfer", False)
|
|
1134
|
+
moe_backend = getattr(self.quant_method, "flashinfer_moe_backend", None)
|
|
1135
|
+
|
|
1136
|
+
use_global_sf = (
|
|
1137
|
+
allow_flashinfer
|
|
1138
|
+
and is_flashinfer_supporting_global_sf(moe_backend)
|
|
1139
|
+
and "input_scale" in weight_name
|
|
1140
|
+
and quant_method_name == "ModelOptNvFp4FusedMoE"
|
|
1141
|
+
)
|
|
1142
|
+
|
|
1143
|
+
if expert_id == -1 and not use_global_sf:
|
|
1144
|
+
# Failed to load this param since it's not local to this rank
|
|
1145
|
+
return False if return_success else None
|
|
1146
|
+
# Hereafter, `expert_id` is local physical id
|
|
1147
|
+
|
|
1148
|
+
# compressed-tensors checkpoints with packed weights are stored flipped
|
|
1149
|
+
# TODO (mgoin): check self.quant_method.quant_config.quant_format
|
|
1150
|
+
# against known CompressionFormat enum values that have this quality
|
|
1151
|
+
if self.quant_method.__class__.__name__ in (
|
|
1152
|
+
"CompressedTensorsWNA16MarlinMoEMethod",
|
|
1153
|
+
"CompressedTensorsWNA16MoEMethod",
|
|
1154
|
+
):
|
|
1155
|
+
loaded_weight = loaded_weight.t().contiguous()
|
|
1156
|
+
|
|
1157
|
+
if shard_id not in ("w1", "w2", "w3"):
|
|
1158
|
+
raise ValueError(f"shard_id must be ['w1','w2','w3'] but got {shard_id}.")
|
|
1159
|
+
|
|
1160
|
+
# Fetch the dim to shard the parameter/loaded weight
|
|
1161
|
+
# based on the shard id. This will be whatever
|
|
1162
|
+
# dimension intermediate_size_per_partition is used.
|
|
1163
|
+
SHARD_ID_TO_SHARDED_DIM = {"w1": 0, "w2": 1, "w3": 0}
|
|
1164
|
+
|
|
1165
|
+
is_gguf_weight = getattr(param, "is_gguf_weight", False)
|
|
1166
|
+
is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
|
|
1167
|
+
if is_gguf_weight_type:
|
|
1168
|
+
param.weight_type = loaded_weight.item()
|
|
1169
|
+
param.data.copy_(loaded_weight)
|
|
1170
|
+
return True if return_success else None
|
|
1171
|
+
|
|
1172
|
+
# Case for BitsAndBytes
|
|
1173
|
+
use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
|
|
1174
|
+
if use_bitsandbytes_4bit:
|
|
1175
|
+
shard_dim = 0
|
|
1176
|
+
|
|
1177
|
+
expert_data = param.data[expert_id]
|
|
1178
|
+
if shard_id == "w2":
|
|
1179
|
+
expert_data.copy_(loaded_weight)
|
|
1180
|
+
elif shard_id in ("w1", "w3"):
|
|
1181
|
+
# BNB inflight quantization has already sharded the weights
|
|
1182
|
+
full_load = True
|
|
1183
|
+
self._load_w13(
|
|
1184
|
+
shard_id=shard_id,
|
|
1185
|
+
shard_dim=shard_dim,
|
|
1186
|
+
loaded_weight=loaded_weight,
|
|
1187
|
+
expert_data=expert_data,
|
|
1188
|
+
tp_rank=self.tp_rank,
|
|
1189
|
+
load_full=full_load,
|
|
1190
|
+
)
|
|
1191
|
+
return True if return_success else None
|
|
1192
|
+
|
|
1193
|
+
# is_transposed: if the dim to shard the weight
|
|
1194
|
+
# should be flipped. Required by GPTQ, compressed-tensors
|
|
1195
|
+
# should be whatever dimension intermediate_size_per_partition is
|
|
1196
|
+
is_transposed = getattr(param, "is_transposed", False)
|
|
1197
|
+
shard_dim = SHARD_ID_TO_SHARDED_DIM[shard_id]
|
|
1198
|
+
if is_transposed:
|
|
1199
|
+
shard_dim = int(not shard_dim)
|
|
1200
|
+
|
|
1201
|
+
full_load = len(loaded_weight.shape) == 3
|
|
1202
|
+
if full_load:
|
|
1203
|
+
shard_dim += 1
|
|
1204
|
+
|
|
1205
|
+
# Materialize GGUF UninitializedParameter accounting merged weights
|
|
1206
|
+
if is_gguf_weight and isinstance(param, UninitializedParameter):
|
|
1207
|
+
# To materialize a tensor, we must have full shape including
|
|
1208
|
+
# number of experts, making this portion to require `full_load`.
|
|
1209
|
+
assert full_load
|
|
1210
|
+
final_shape = list(loaded_weight.shape)
|
|
1211
|
+
# w1 and w3 are merged per expert.
|
|
1212
|
+
if shard_id in {"w1", "w3"}:
|
|
1213
|
+
final_shape[1] *= 2
|
|
1214
|
+
final_shape[shard_dim] = final_shape[shard_dim] // self.tp_size
|
|
1215
|
+
param.materialize(final_shape, dtype=loaded_weight.dtype)
|
|
1216
|
+
|
|
1217
|
+
expert_data = param.data if full_load else param.data[expert_id]
|
|
1218
|
+
|
|
1219
|
+
# Case input scale: input_scale loading is only supported for fp8
|
|
1220
|
+
if "input_scale" in weight_name:
|
|
1221
|
+
# this is needed for compressed-tensors only
|
|
1222
|
+
loaded_weight = loaded_weight.to(param.data.device)
|
|
1223
|
+
|
|
1224
|
+
if (
|
|
1225
|
+
"compressed" in quant_method_name.lower()
|
|
1226
|
+
and param.data[expert_id] != 1
|
|
1227
|
+
and (param.data[expert_id] - loaded_weight).abs() > 1e-5
|
|
1228
|
+
):
|
|
1229
|
+
raise ValueError(
|
|
1230
|
+
"input_scales of w1 and w3 of a layer "
|
|
1231
|
+
f"must be equal. But got {param.data[expert_id]} "
|
|
1232
|
+
f"vs. {loaded_weight}"
|
|
1233
|
+
)
|
|
1234
|
+
|
|
1235
|
+
self._load_single_value(
|
|
1236
|
+
param=param,
|
|
1237
|
+
loaded_weight=loaded_weight,
|
|
1238
|
+
expert_id=global_expert_id if use_global_sf else expert_id,
|
|
1239
|
+
)
|
|
1240
|
+
return True if return_success else None
|
|
1241
|
+
|
|
1242
|
+
# Case g_idx
|
|
1243
|
+
if "g_idx" in weight_name:
|
|
1244
|
+
self._load_g_idx(
|
|
1245
|
+
shard_dim=0,
|
|
1246
|
+
shard_id=shard_id,
|
|
1247
|
+
loaded_weight=loaded_weight,
|
|
1248
|
+
expert_data=expert_data,
|
|
1249
|
+
tp_rank=self.tp_rank,
|
|
1250
|
+
)
|
|
1251
|
+
return True if return_success else None
|
|
1252
|
+
|
|
1253
|
+
# TODO @dsikka: ModelOpt should follow the proper MoE loading pattern
|
|
1254
|
+
if "ModelOpt" in quant_method_name:
|
|
1255
|
+
# Determine per-tensor weight scale patterns based on variant
|
|
1256
|
+
# Use the dedicated method instead of brittle string matching
|
|
1257
|
+
uses_weight_scale_2 = self.quant_method.uses_weight_scale_2_pattern()
|
|
1258
|
+
|
|
1259
|
+
# Call _load_per_tensor_weight_scale() to load per-tensor (scalar)
|
|
1260
|
+
# weights scales.
|
|
1261
|
+
# Input scales are always per-tensor.
|
|
1262
|
+
# Weight scales: FP4 uses "weight_scale_2" and FP8 uses
|
|
1263
|
+
# "weight_scale" for per-tensor scales.
|
|
1264
|
+
is_per_tensor = (
|
|
1265
|
+
"weight_scale_2" in weight_name
|
|
1266
|
+
if uses_weight_scale_2
|
|
1267
|
+
else "weight_scale" in weight_name
|
|
1268
|
+
) or "input_scale" in weight_name
|
|
1269
|
+
if is_per_tensor:
|
|
1270
|
+
self._load_per_tensor_weight_scale(
|
|
1271
|
+
shard_id=shard_id,
|
|
1272
|
+
param=param,
|
|
1273
|
+
loaded_weight=loaded_weight,
|
|
1274
|
+
expert_id=expert_id,
|
|
1275
|
+
)
|
|
1276
|
+
return True if return_success else None
|
|
1277
|
+
|
|
1278
|
+
# If the weight is w13_weight_scale and w13_weight_scales are
|
|
1279
|
+
# combined into single loaded_weight, call
|
|
1280
|
+
# _load_combined_w13_weight_scale() to load it.
|
|
1281
|
+
# This is checked by comparing the hidden_out dims of the
|
|
1282
|
+
# loaded_weight and the param.
|
|
1283
|
+
if "w13_weight_scale" in weight_name:
|
|
1284
|
+
loaded_weight_hidden_out = loaded_weight.shape[-2]
|
|
1285
|
+
param_hidden_out = param.data.shape[-2] * self.tp_size
|
|
1286
|
+
if loaded_weight_hidden_out == param_hidden_out:
|
|
1287
|
+
self._load_combined_w13_weight_scale(
|
|
1288
|
+
shard_dim=shard_dim,
|
|
1289
|
+
loaded_weight=loaded_weight,
|
|
1290
|
+
param=expert_data,
|
|
1291
|
+
tp_rank=self.tp_rank,
|
|
1292
|
+
)
|
|
1293
|
+
return True if return_success else None
|
|
1294
|
+
|
|
1295
|
+
# For other weights, call _load_model_weight_or_group_weight_scale()
|
|
1296
|
+
# to load it.
|
|
1297
|
+
if "weight" in weight_name:
|
|
1298
|
+
self._load_model_weight_or_group_weight_scale(
|
|
1299
|
+
shard_id=shard_id,
|
|
1300
|
+
shard_dim=shard_dim,
|
|
1301
|
+
loaded_weight=loaded_weight,
|
|
1302
|
+
expert_data=expert_data,
|
|
1303
|
+
tp_rank=self.tp_rank,
|
|
1304
|
+
)
|
|
1305
|
+
return True if return_success else None
|
|
1306
|
+
|
|
1307
|
+
# Case weight scales, zero_points and offset, weight/input global scales
|
|
1308
|
+
if "scale" in weight_name or "zero" in weight_name or "offset" in weight_name:
|
|
1309
|
+
# load the weight scales and zp based on the quantization scheme
|
|
1310
|
+
# supported weight scales/zp can be found in
|
|
1311
|
+
# FusedMoeWeightScaleSupported
|
|
1312
|
+
# TODO @dsikka: once hardened, refactor to use vLLM Parameters
|
|
1313
|
+
# specific to each case
|
|
1314
|
+
quant_method = getattr(param, "quant_method", None)
|
|
1315
|
+
if quant_method == FusedMoeWeightScaleSupported.CHANNEL.value:
|
|
1316
|
+
self._load_per_channel_weight_scale(
|
|
1317
|
+
shard_id=shard_id,
|
|
1318
|
+
shard_dim=shard_dim,
|
|
1319
|
+
loaded_weight=loaded_weight,
|
|
1320
|
+
expert_data=expert_data,
|
|
1321
|
+
tp_rank=self.tp_rank,
|
|
1322
|
+
)
|
|
1323
|
+
elif quant_method in [
|
|
1324
|
+
FusedMoeWeightScaleSupported.GROUP.value,
|
|
1325
|
+
FusedMoeWeightScaleSupported.BLOCK.value,
|
|
1326
|
+
]:
|
|
1327
|
+
self._load_model_weight_or_group_weight_scale(
|
|
1328
|
+
shard_id=shard_id,
|
|
1329
|
+
shard_dim=shard_dim,
|
|
1330
|
+
loaded_weight=loaded_weight,
|
|
1331
|
+
expert_data=expert_data,
|
|
1332
|
+
tp_rank=self.tp_rank,
|
|
1333
|
+
load_full_w2=getattr(param, "load_full_w2", False),
|
|
1334
|
+
)
|
|
1335
|
+
elif quant_method == FusedMoeWeightScaleSupported.TENSOR.value:
|
|
1336
|
+
self._load_per_tensor_weight_scale(
|
|
1337
|
+
shard_id=shard_id,
|
|
1338
|
+
param=param,
|
|
1339
|
+
loaded_weight=loaded_weight,
|
|
1340
|
+
expert_id=expert_id,
|
|
1341
|
+
)
|
|
1342
|
+
else:
|
|
1343
|
+
WEIGHT_SCALE_SUPPORTED = [e.value for e in FusedMoeWeightScaleSupported]
|
|
1344
|
+
raise ValueError(
|
|
1345
|
+
f"quant method must be one of {WEIGHT_SCALE_SUPPORTED}"
|
|
1346
|
+
)
|
|
1347
|
+
return True if return_success else None
|
|
1348
|
+
|
|
1349
|
+
# Case weight_shape
|
|
1350
|
+
if "weight_shape" in weight_name:
|
|
1351
|
+
# only required by compressed-tensors
|
|
1352
|
+
self._load_single_value(
|
|
1353
|
+
param=param, loaded_weight=loaded_weight, expert_id=expert_id
|
|
1354
|
+
)
|
|
1355
|
+
return True if return_success else None
|
|
1356
|
+
|
|
1357
|
+
# Case model weights
|
|
1358
|
+
if "weight" in weight_name:
|
|
1359
|
+
self._load_model_weight_or_group_weight_scale(
|
|
1360
|
+
shard_id=shard_id,
|
|
1361
|
+
shard_dim=shard_dim,
|
|
1362
|
+
loaded_weight=loaded_weight,
|
|
1363
|
+
expert_data=expert_data,
|
|
1364
|
+
tp_rank=self.tp_rank,
|
|
1365
|
+
)
|
|
1366
|
+
return True if return_success else None
|
|
1367
|
+
|
|
1368
|
+
return False if return_success else None
|
|
1369
|
+
|
|
1370
|
+
def load_weights(
|
|
1371
|
+
self, weights: Iterable[tuple[str, torch.Tensor]]
|
|
1372
|
+
) -> Iterable[str]:
|
|
1373
|
+
if (expert_mapping := self.expert_mapping) is None:
|
|
1374
|
+
raise ValueError(
|
|
1375
|
+
"`self.expert_mapping` must be provided to "
|
|
1376
|
+
"load weights using `self.load_weights`."
|
|
1377
|
+
)
|
|
1378
|
+
for expert_name, loaded_weight in weights:
|
|
1379
|
+
qual_name = f"{self.layer_name}.{expert_name}"
|
|
1380
|
+
for param_name, weight_name, expert_id, shard_id in expert_mapping:
|
|
1381
|
+
if weight_name not in qual_name:
|
|
1382
|
+
continue
|
|
1383
|
+
weight_name = qual_name.replace(weight_name, param_name)
|
|
1384
|
+
param_name = weight_name.removeprefix(f"{self.layer_name}.")
|
|
1385
|
+
param = getattr(self, param_name)
|
|
1386
|
+
success = self.weight_loader(
|
|
1387
|
+
param=param,
|
|
1388
|
+
loaded_weight=loaded_weight,
|
|
1389
|
+
weight_name=weight_name,
|
|
1390
|
+
shard_id=shard_id,
|
|
1391
|
+
expert_id=expert_id,
|
|
1392
|
+
return_success=True,
|
|
1393
|
+
)
|
|
1394
|
+
if success:
|
|
1395
|
+
logger.debug(
|
|
1396
|
+
"Loaded %s for expert %d into %s",
|
|
1397
|
+
param_name,
|
|
1398
|
+
expert_id,
|
|
1399
|
+
self.layer_name,
|
|
1400
|
+
)
|
|
1401
|
+
yield param_name
|
|
1402
|
+
|
|
1403
|
+
def get_expert_weights(self) -> Iterable[torch.Tensor]:
|
|
1404
|
+
def _maybe_make_contiguous(
|
|
1405
|
+
name: str, p: torch.nn.Parameter
|
|
1406
|
+
) -> torch.nn.Parameter:
|
|
1407
|
+
"""
|
|
1408
|
+
In some cases, the last 2 dimensions (the non-expert dimensions)
|
|
1409
|
+
of the weight scale tensor are transposed. This function
|
|
1410
|
+
transforms the tensor (view update) so the tensor is contiguous().
|
|
1411
|
+
Example: A non-contiguous scale tensor,
|
|
1412
|
+
`x` of shape (E, 32, 16) and stride (512, 1, 32) is transformed to
|
|
1413
|
+
`x_` of shape (E, 16, 32) and stride (512, 32, 1).
|
|
1414
|
+
Note that we specifically use torch.transpose() so `x_` refers
|
|
1415
|
+
to the same underlying memory. The tensors `x` and `x_`, pointing
|
|
1416
|
+
to the same underlying memory make this transformation safe in the
|
|
1417
|
+
context of EPLB. i.e. It is the same memory and just the view
|
|
1418
|
+
is different.
|
|
1419
|
+
Note: This function handles the "weight_scale" tensors specifically.
|
|
1420
|
+
This could however be generalized to handle similar tensors.
|
|
1421
|
+
"""
|
|
1422
|
+
if p.ndim != 3:
|
|
1423
|
+
return p
|
|
1424
|
+
if p.is_contiguous():
|
|
1425
|
+
# Already contiguous. do nothing.
|
|
1426
|
+
return p
|
|
1427
|
+
# p is non-contiguous. We only handle the case where the last 2
|
|
1428
|
+
# dimensions of the scales tensor is transposed. We can handle
|
|
1429
|
+
# other cases when they become relevant.
|
|
1430
|
+
is_transposed_12 = p.stride(1) == 1 and p.stride(2) != 1
|
|
1431
|
+
if "weight_scale" not in name or not is_transposed_12:
|
|
1432
|
+
# do nothing.
|
|
1433
|
+
return p
|
|
1434
|
+
|
|
1435
|
+
# Do not update the layer parameter as the layer's MoE operations would
|
|
1436
|
+
# expect the parameter's tensor to the same shape / stride. Instead,
|
|
1437
|
+
# make a new torch.nn.Parameter that is used just in the context of
|
|
1438
|
+
# EPLB.
|
|
1439
|
+
return torch.nn.Parameter(
|
|
1440
|
+
torch.transpose(p.data, 1, 2), requires_grad=False
|
|
1441
|
+
)
|
|
1442
|
+
|
|
1443
|
+
weights = list(self.named_parameters())
|
|
1444
|
+
weights = [(name, _maybe_make_contiguous(name, p)) for name, p in weights]
|
|
1445
|
+
|
|
1446
|
+
assert all(
|
|
1447
|
+
weight.is_contiguous()
|
|
1448
|
+
for name, weight in weights
|
|
1449
|
+
if not name.startswith("_shared_experts.")
|
|
1450
|
+
)
|
|
1451
|
+
|
|
1452
|
+
# Filter out the non-expert weights.
|
|
1453
|
+
# `e_score_correction_bias` is a bias for each logical expert,
|
|
1454
|
+
# with shape (num_logical_experts,), not an expert weight.
|
|
1455
|
+
NON_EXPERT_WEIGHTS = {
|
|
1456
|
+
"e_score_correction_bias",
|
|
1457
|
+
}
|
|
1458
|
+
|
|
1459
|
+
return [
|
|
1460
|
+
weight.view(self.local_num_experts, -1)
|
|
1461
|
+
for name, weight in weights
|
|
1462
|
+
if name not in NON_EXPERT_WEIGHTS
|
|
1463
|
+
and weight.shape != torch.Size([])
|
|
1464
|
+
and not name.startswith("_shared_experts.")
|
|
1465
|
+
# exclude parameters from non-expert submodules (e.g. gate/shared)
|
|
1466
|
+
and not name.startswith("_gate.")
|
|
1467
|
+
]
|
|
1468
|
+
|
|
1469
|
+
def set_eplb_state(
|
|
1470
|
+
self,
|
|
1471
|
+
moe_layer_idx: int,
|
|
1472
|
+
expert_load_view: torch.Tensor,
|
|
1473
|
+
logical_to_physical_map: torch.Tensor,
|
|
1474
|
+
logical_replica_count: torch.Tensor,
|
|
1475
|
+
) -> None:
|
|
1476
|
+
"""
|
|
1477
|
+
Register the EPLB state in this layer.
|
|
1478
|
+
|
|
1479
|
+
This is used later in forward pass, where we get the expert mapping
|
|
1480
|
+
and record the load metrics in `expert_load_view`.
|
|
1481
|
+
"""
|
|
1482
|
+
self.expert_load_view = expert_load_view[moe_layer_idx]
|
|
1483
|
+
self.logical_to_physical_map = logical_to_physical_map[moe_layer_idx]
|
|
1484
|
+
self.logical_replica_count = logical_replica_count[moe_layer_idx]
|
|
1485
|
+
|
|
1486
|
+
def ensure_moe_quant_config_init(self):
|
|
1487
|
+
if self.quant_method.moe_quant_config is None:
|
|
1488
|
+
# Note: the moe_quant_config can't be constructed until after
|
|
1489
|
+
# weight loading post processing.
|
|
1490
|
+
self.quant_method.moe_quant_config = (
|
|
1491
|
+
self.quant_method.get_fused_moe_quant_config(self)
|
|
1492
|
+
)
|
|
1493
|
+
|
|
1494
|
+
@property
|
|
1495
|
+
def moe_quant_config(self) -> FusedMoEQuantConfig | None:
|
|
1496
|
+
self.ensure_moe_quant_config_init()
|
|
1497
|
+
return self.quant_method.moe_quant_config
|
|
1498
|
+
|
|
1499
|
+
def ensure_dp_chunking_init(self):
|
|
1500
|
+
if not self.use_dp_chunking or self.batched_hidden_states is not None:
|
|
1501
|
+
return
|
|
1502
|
+
|
|
1503
|
+
states_shape: tuple[int, ...]
|
|
1504
|
+
logits_shape: tuple[int, ...]
|
|
1505
|
+
|
|
1506
|
+
moe = self.moe_config
|
|
1507
|
+
|
|
1508
|
+
if self.vllm_config.parallel_config.enable_dbo:
|
|
1509
|
+
states_shape = (2, moe.max_num_tokens, self.hidden_size)
|
|
1510
|
+
logits_shape = (2, moe.max_num_tokens, self.logical_num_experts)
|
|
1511
|
+
else:
|
|
1512
|
+
states_shape = (moe.max_num_tokens, self.hidden_size)
|
|
1513
|
+
logits_shape = (moe.max_num_tokens, self.logical_num_experts)
|
|
1514
|
+
|
|
1515
|
+
self.batched_hidden_states = torch.zeros(
|
|
1516
|
+
states_shape, dtype=moe.in_dtype, device=torch.cuda.current_device()
|
|
1517
|
+
)
|
|
1518
|
+
|
|
1519
|
+
self.batched_router_logits = torch.zeros(
|
|
1520
|
+
logits_shape, dtype=moe.in_dtype, device=torch.cuda.current_device()
|
|
1521
|
+
)
|
|
1522
|
+
|
|
1523
|
+
def select_experts(
|
|
1524
|
+
self,
|
|
1525
|
+
hidden_states: torch.Tensor,
|
|
1526
|
+
router_logits: torch.Tensor,
|
|
1527
|
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]:
|
|
1528
|
+
"""
|
|
1529
|
+
Route the input hidden states to the top-k experts based on the
|
|
1530
|
+
router logits.
|
|
1531
|
+
|
|
1532
|
+
Returns:
|
|
1533
|
+
(topk_weights, topk_ids, zero_expert_result)
|
|
1534
|
+
(tuple[torch.Tensor, torch.Tensor, torch.Tensor]):
|
|
1535
|
+
The weights, expert ids, and zero expert computation result.
|
|
1536
|
+
|
|
1537
|
+
**Compatibility**: When EPLB is not enabled, the returned ids are
|
|
1538
|
+
equivalent to global logical ids, so should be compatible with
|
|
1539
|
+
plain MoE implementations without redundant experts.
|
|
1540
|
+
"""
|
|
1541
|
+
from vllm.model_executor.layers.fused_moe.fused_moe import (
|
|
1542
|
+
fused_topk,
|
|
1543
|
+
fused_topk_bias,
|
|
1544
|
+
)
|
|
1545
|
+
|
|
1546
|
+
if self.enable_eplb:
|
|
1547
|
+
if self.quant_method.supports_eplb:
|
|
1548
|
+
if self.expert_load_view is None:
|
|
1549
|
+
raise ValueError(
|
|
1550
|
+
"enable_eplb=True requiere expert_load_view != None"
|
|
1551
|
+
)
|
|
1552
|
+
if self.logical_to_physical_map is None:
|
|
1553
|
+
raise ValueError(
|
|
1554
|
+
"enable_eplb=True requiere logical_to_physical_map != None"
|
|
1555
|
+
)
|
|
1556
|
+
if self.logical_replica_count is None:
|
|
1557
|
+
raise ValueError(
|
|
1558
|
+
"enable_eplb=True requiere logical_replica_count != None"
|
|
1559
|
+
)
|
|
1560
|
+
else:
|
|
1561
|
+
raise NotImplementedError(
|
|
1562
|
+
f"EPLB is not supported for {self.quant_method.method_name}."
|
|
1563
|
+
)
|
|
1564
|
+
|
|
1565
|
+
def valid_grouping() -> bool:
|
|
1566
|
+
# Check if num_experts is greater than num_expert_group
|
|
1567
|
+
# and is divisible by num_expert_group
|
|
1568
|
+
num_experts = router_logits.shape[-1]
|
|
1569
|
+
if num_experts <= self.num_expert_group:
|
|
1570
|
+
return False
|
|
1571
|
+
return num_experts % self.num_expert_group == 0
|
|
1572
|
+
|
|
1573
|
+
indices_type = self.quant_method.topk_indices_dtype
|
|
1574
|
+
|
|
1575
|
+
# Check if we should use a routing simulation strategy
|
|
1576
|
+
routing_strategy = envs.VLLM_MOE_ROUTING_SIMULATION_STRATEGY
|
|
1577
|
+
if routing_strategy != "":
|
|
1578
|
+
topk_weights, topk_ids = RoutingSimulator.simulate_routing(
|
|
1579
|
+
hidden_states=hidden_states,
|
|
1580
|
+
router_logits=router_logits,
|
|
1581
|
+
strategy_name=routing_strategy,
|
|
1582
|
+
top_k=self.top_k,
|
|
1583
|
+
indices_type=indices_type,
|
|
1584
|
+
)
|
|
1585
|
+
|
|
1586
|
+
# DeepSeekv2 uses grouped_top_k
|
|
1587
|
+
elif self.use_grouped_topk and valid_grouping():
|
|
1588
|
+
assert self.topk_group is not None
|
|
1589
|
+
assert self.num_expert_group is not None
|
|
1590
|
+
if rocm_aiter_ops.is_fused_moe_enabled():
|
|
1591
|
+
if not rocm_aiter_ops.is_fusion_moe_shared_experts_enabled():
|
|
1592
|
+
assert self.num_fused_shared_experts == 0
|
|
1593
|
+
grouped_topk_impl = partial(
|
|
1594
|
+
rocm_aiter_grouped_topk,
|
|
1595
|
+
num_fused_shared_experts=self.num_fused_shared_experts,
|
|
1596
|
+
)
|
|
1597
|
+
else:
|
|
1598
|
+
grouped_topk_impl = grouped_topk
|
|
1599
|
+
|
|
1600
|
+
topk_weights, topk_ids = grouped_topk_impl(
|
|
1601
|
+
hidden_states=hidden_states,
|
|
1602
|
+
gating_output=router_logits,
|
|
1603
|
+
topk=self.top_k,
|
|
1604
|
+
renormalize=self.renormalize,
|
|
1605
|
+
num_expert_group=self.num_expert_group,
|
|
1606
|
+
topk_group=self.topk_group,
|
|
1607
|
+
scoring_func=self.scoring_func,
|
|
1608
|
+
routed_scaling_factor=self.routed_scaling_factor,
|
|
1609
|
+
e_score_correction_bias=self.e_score_correction_bias,
|
|
1610
|
+
)
|
|
1611
|
+
elif self.e_score_correction_bias is not None:
|
|
1612
|
+
topk_weights, topk_ids = fused_topk_bias(
|
|
1613
|
+
hidden_states=hidden_states,
|
|
1614
|
+
gating_output=router_logits,
|
|
1615
|
+
e_score_correction_bias=self.e_score_correction_bias.data,
|
|
1616
|
+
topk=self.top_k,
|
|
1617
|
+
renormalize=self.renormalize,
|
|
1618
|
+
)
|
|
1619
|
+
if self.routed_scaling_factor != 1.0:
|
|
1620
|
+
topk_weights *= self.routed_scaling_factor
|
|
1621
|
+
elif self.custom_routing_function is None:
|
|
1622
|
+
topk_weights, topk_ids, token_expert_indices = fused_topk(
|
|
1623
|
+
hidden_states=hidden_states,
|
|
1624
|
+
gating_output=router_logits,
|
|
1625
|
+
topk=self.top_k,
|
|
1626
|
+
renormalize=self.renormalize,
|
|
1627
|
+
indices_type=indices_type,
|
|
1628
|
+
)
|
|
1629
|
+
else:
|
|
1630
|
+
topk_weights, topk_ids = self.custom_routing_function(
|
|
1631
|
+
hidden_states=hidden_states,
|
|
1632
|
+
gating_output=router_logits,
|
|
1633
|
+
topk=self.top_k,
|
|
1634
|
+
renormalize=self.renormalize,
|
|
1635
|
+
)
|
|
1636
|
+
|
|
1637
|
+
if self.enable_eplb:
|
|
1638
|
+
topk_ids = eplb_map_to_physical_and_record(
|
|
1639
|
+
topk_ids=topk_ids,
|
|
1640
|
+
expert_load_view=self.expert_load_view,
|
|
1641
|
+
logical_to_physical_map=self.logical_to_physical_map,
|
|
1642
|
+
logical_replica_count=self.logical_replica_count,
|
|
1643
|
+
)
|
|
1644
|
+
|
|
1645
|
+
if (indices_type is not None) and topk_ids.dtype != indices_type:
|
|
1646
|
+
topk_ids = topk_ids.to(dtype=indices_type)
|
|
1647
|
+
|
|
1648
|
+
assert topk_ids.dtype == indices_type or indices_type is None
|
|
1649
|
+
|
|
1650
|
+
# Compute zero expert result if needed
|
|
1651
|
+
if (
|
|
1652
|
+
self.zero_expert_num is not None
|
|
1653
|
+
and self.zero_expert_num > 0
|
|
1654
|
+
and self.zero_expert_type is not None
|
|
1655
|
+
and self.global_num_experts is not None
|
|
1656
|
+
):
|
|
1657
|
+
zero_expert_result = zero_experts_compute_triton(
|
|
1658
|
+
expert_indices=topk_ids,
|
|
1659
|
+
expert_scales=topk_weights,
|
|
1660
|
+
num_experts=self.global_num_experts,
|
|
1661
|
+
zero_expert_type=self.zero_expert_type,
|
|
1662
|
+
hidden_states=hidden_states,
|
|
1663
|
+
)
|
|
1664
|
+
else:
|
|
1665
|
+
zero_expert_result = None
|
|
1666
|
+
return topk_weights, topk_ids, zero_expert_result
|
|
1667
|
+
|
|
1668
|
+
def must_reduce_shared_expert_outputs(self) -> bool:
|
|
1669
|
+
"""
|
|
1670
|
+
The shared_experts are typically computed using the RowParallelLinear
|
|
1671
|
+
layer. The result of this function is typically used as
|
|
1672
|
+
the reduce_results argument to the module.
|
|
1673
|
+
When just tensor-parallel is used, it is not required to reduce
|
|
1674
|
+
the shared_experts results immediately. Instead we reduce at the
|
|
1675
|
+
once at the end of the MoE op. (Refer to DeepSeekV2MoE module)
|
|
1676
|
+
With EP and all2all kernels - this is no longer viable as all
|
|
1677
|
+
GPU ranks in DP, produce the complete set of hidden_states.
|
|
1678
|
+
Therefore it is required that we reduce the shared_experts output
|
|
1679
|
+
early.
|
|
1680
|
+
"""
|
|
1681
|
+
assert self.quant_method is not None
|
|
1682
|
+
return (
|
|
1683
|
+
isinstance(self.quant_method, FusedMoEModularMethod)
|
|
1684
|
+
and self.quant_method.fused_experts.output_is_reduced()
|
|
1685
|
+
)
|
|
1686
|
+
|
|
1687
|
+
def maybe_all_reduce_tensor_model_parallel(self, final_hidden_states: torch.Tensor):
|
|
1688
|
+
"""
|
|
1689
|
+
Some combine kernels reduce across GPU ranks by default.
|
|
1690
|
+
"""
|
|
1691
|
+
if self.must_reduce_shared_expert_outputs():
|
|
1692
|
+
return final_hidden_states
|
|
1693
|
+
else:
|
|
1694
|
+
return tensor_model_parallel_all_reduce(final_hidden_states)
|
|
1695
|
+
|
|
1696
|
+
def forward_native(
|
|
1697
|
+
self,
|
|
1698
|
+
hidden_states: torch.Tensor,
|
|
1699
|
+
router_logits: torch.Tensor,
|
|
1700
|
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
|
1701
|
+
og_hidden_states = hidden_states.shape[-1]
|
|
1702
|
+
if self.hidden_size != og_hidden_states:
|
|
1703
|
+
hidden_states = F.pad(
|
|
1704
|
+
hidden_states,
|
|
1705
|
+
(0, self.hidden_size - og_hidden_states),
|
|
1706
|
+
mode="constant",
|
|
1707
|
+
value=0.0,
|
|
1708
|
+
)
|
|
1709
|
+
|
|
1710
|
+
def reduce_output(states: torch.Tensor) -> torch.Tensor:
|
|
1711
|
+
if (
|
|
1712
|
+
not self.is_sequence_parallel
|
|
1713
|
+
and not self.use_dp_chunking
|
|
1714
|
+
and self.reduce_results
|
|
1715
|
+
and (self.tp_size > 1 or self.ep_size > 1)
|
|
1716
|
+
):
|
|
1717
|
+
states = self.maybe_all_reduce_tensor_model_parallel(states)
|
|
1718
|
+
return states
|
|
1719
|
+
|
|
1720
|
+
if self.shared_experts is None:
|
|
1721
|
+
if current_platform.is_tpu():
|
|
1722
|
+
# TODO: Once the OOM issue for the TPU backend is resolved, we
|
|
1723
|
+
# will switch to using the moe_forward custom op.
|
|
1724
|
+
fused_output = self.forward_impl(hidden_states, router_logits)
|
|
1725
|
+
assert not isinstance(fused_output, tuple)
|
|
1726
|
+
else:
|
|
1727
|
+
fused_output = torch.ops.vllm.moe_forward(
|
|
1728
|
+
hidden_states, router_logits, self.layer_name
|
|
1729
|
+
)
|
|
1730
|
+
if self.zero_expert_num is not None and self.zero_expert_num > 0:
|
|
1731
|
+
assert isinstance(fused_output, tuple)
|
|
1732
|
+
fused_output, zero_expert_result = fused_output
|
|
1733
|
+
return (reduce_output(fused_output) + zero_expert_result)[
|
|
1734
|
+
..., :og_hidden_states
|
|
1735
|
+
]
|
|
1736
|
+
else:
|
|
1737
|
+
return reduce_output(fused_output)[..., :og_hidden_states]
|
|
1738
|
+
else:
|
|
1739
|
+
if current_platform.is_tpu():
|
|
1740
|
+
# TODO: Once the OOM issue for the TPU backend is resolved, we
|
|
1741
|
+
# will switch to using the moe_forward custom op.
|
|
1742
|
+
shared_output, fused_output = self.forward_impl(
|
|
1743
|
+
hidden_states, router_logits
|
|
1744
|
+
)
|
|
1745
|
+
else:
|
|
1746
|
+
shared_output, fused_output = torch.ops.vllm.moe_forward_shared(
|
|
1747
|
+
hidden_states, router_logits, self.layer_name
|
|
1748
|
+
)
|
|
1749
|
+
return (
|
|
1750
|
+
reduce_output(shared_output)[..., :og_hidden_states],
|
|
1751
|
+
reduce_output(fused_output)[..., :og_hidden_states],
|
|
1752
|
+
)
|
|
1753
|
+
|
|
1754
|
+
@property
|
|
1755
|
+
def expert_map(self) -> torch.Tensor | None:
|
|
1756
|
+
return (
|
|
1757
|
+
self._expert_map if not self.rocm_aiter_fmoe_enabled else self.expert_mask
|
|
1758
|
+
)
|
|
1759
|
+
|
|
1760
|
+
def forward_cuda(
|
|
1761
|
+
self,
|
|
1762
|
+
hidden_states: torch.Tensor,
|
|
1763
|
+
router_logits: torch.Tensor,
|
|
1764
|
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
|
1765
|
+
return self.forward_native(hidden_states, router_logits)
|
|
1766
|
+
|
|
1767
|
+
def forward_impl_chunked(
|
|
1768
|
+
self,
|
|
1769
|
+
full_hidden_states: torch.Tensor,
|
|
1770
|
+
full_router_logits: torch.Tensor,
|
|
1771
|
+
has_separate_shared_experts: bool,
|
|
1772
|
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
|
1773
|
+
assert self.batched_hidden_states is not None
|
|
1774
|
+
assert self.batched_router_logits is not None
|
|
1775
|
+
assert self.batched_hidden_states.dtype == full_hidden_states.dtype
|
|
1776
|
+
assert self.batched_router_logits.dtype == full_router_logits.dtype
|
|
1777
|
+
# Check size compatibility.
|
|
1778
|
+
assert self.batched_hidden_states.size(-1) == full_hidden_states.size(-1)
|
|
1779
|
+
assert self.batched_router_logits.size(-1) == full_router_logits.size(-1)
|
|
1780
|
+
|
|
1781
|
+
full_fused_final_hidden_states = torch.empty_like(full_hidden_states)
|
|
1782
|
+
if self.shared_experts is not None:
|
|
1783
|
+
full_shared_final_hidden_states = torch.empty_like(full_hidden_states)
|
|
1784
|
+
|
|
1785
|
+
def process_chunk(chunk_start, chunk_end, skip_result_store=False):
|
|
1786
|
+
chunk_size = chunk_end - chunk_start
|
|
1787
|
+
hidden_states = full_hidden_states[chunk_start:chunk_end, :]
|
|
1788
|
+
router_logits = full_router_logits[chunk_start:chunk_end, :]
|
|
1789
|
+
|
|
1790
|
+
assert self.batched_hidden_states is not None
|
|
1791
|
+
assert self.batched_router_logits is not None
|
|
1792
|
+
# This is only true when DBO has been enabled in the config.
|
|
1793
|
+
# Both tensors will have an outer dimension for the ubatch id
|
|
1794
|
+
if self.batched_hidden_states.dim() == 3:
|
|
1795
|
+
assert self.batched_router_logits.dim() == 3
|
|
1796
|
+
batch_buffer_idx = dbo_current_ubatch_id()
|
|
1797
|
+
batched_hidden_states = self.batched_hidden_states[batch_buffer_idx, :]
|
|
1798
|
+
batched_router_logits = self.batched_router_logits[batch_buffer_idx, :]
|
|
1799
|
+
else:
|
|
1800
|
+
batched_hidden_states = self.batched_hidden_states
|
|
1801
|
+
batched_router_logits = self.batched_router_logits
|
|
1802
|
+
|
|
1803
|
+
assert (
|
|
1804
|
+
batched_hidden_states.size(0) # type: ignore
|
|
1805
|
+
>= chunk_size
|
|
1806
|
+
)
|
|
1807
|
+
assert (
|
|
1808
|
+
batched_router_logits.size(0) # type: ignore
|
|
1809
|
+
>= chunk_size
|
|
1810
|
+
)
|
|
1811
|
+
staged_hidden_states = batched_hidden_states[:chunk_size, :] # type: ignore
|
|
1812
|
+
staged_router_logits = batched_router_logits[:chunk_size, :] # type: ignore
|
|
1813
|
+
staged_hidden_states.copy_(hidden_states, non_blocking=True)
|
|
1814
|
+
staged_router_logits.copy_(router_logits, non_blocking=True)
|
|
1815
|
+
|
|
1816
|
+
# Matrix multiply.
|
|
1817
|
+
final_hidden_states = self.quant_method.apply(
|
|
1818
|
+
layer=self,
|
|
1819
|
+
x=staged_hidden_states,
|
|
1820
|
+
router_logits=staged_router_logits,
|
|
1821
|
+
)
|
|
1822
|
+
|
|
1823
|
+
if has_separate_shared_experts:
|
|
1824
|
+
assert not isinstance(final_hidden_states, tuple)
|
|
1825
|
+
assert self.shared_experts is not None
|
|
1826
|
+
|
|
1827
|
+
shared_output = self.shared_experts(staged_hidden_states)
|
|
1828
|
+
|
|
1829
|
+
final_hidden_states = (
|
|
1830
|
+
shared_output,
|
|
1831
|
+
final_hidden_states,
|
|
1832
|
+
)
|
|
1833
|
+
|
|
1834
|
+
if self.zero_expert_num is not None and self.zero_expert_num > 0:
|
|
1835
|
+
assert isinstance(final_hidden_states, tuple)
|
|
1836
|
+
assert self.shared_experts is None
|
|
1837
|
+
final_hidden_states, zero_expert_result = final_hidden_states
|
|
1838
|
+
if zero_expert_result is not None:
|
|
1839
|
+
final_hidden_states += zero_expert_result
|
|
1840
|
+
|
|
1841
|
+
if not skip_result_store:
|
|
1842
|
+
if self.shared_experts is None:
|
|
1843
|
+
full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
|
|
1844
|
+
final_hidden_states, non_blocking=True
|
|
1845
|
+
)
|
|
1846
|
+
else:
|
|
1847
|
+
full_shared_final_hidden_states[chunk_start:chunk_end, :].copy_(
|
|
1848
|
+
final_hidden_states[0], non_blocking=True
|
|
1849
|
+
)
|
|
1850
|
+
full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
|
|
1851
|
+
final_hidden_states[1], non_blocking=True
|
|
1852
|
+
)
|
|
1853
|
+
|
|
1854
|
+
ctx = get_forward_context()
|
|
1855
|
+
# flashinfer_cutlass_kernels can handle: optional DP + TP/EP
|
|
1856
|
+
max_tokens_across_dispatchers = ctx.dp_metadata.max_tokens_across_dp_cpu
|
|
1857
|
+
moe_dp_chunk_size_per_rank = self.moe_config.max_num_tokens
|
|
1858
|
+
|
|
1859
|
+
# If the input to the MoE is sequence parallel then divide by sp_size
|
|
1860
|
+
# to find the maximum number of tokens for any individual dispatcher.
|
|
1861
|
+
if self.is_sequence_parallel:
|
|
1862
|
+
max_tokens_across_dispatchers = cdiv(
|
|
1863
|
+
max_tokens_across_dispatchers, self.sp_size
|
|
1864
|
+
)
|
|
1865
|
+
|
|
1866
|
+
num_tokens = full_hidden_states.size(0)
|
|
1867
|
+
for chunk_idx, chunk_start_ in enumerate(
|
|
1868
|
+
range(0, max_tokens_across_dispatchers, moe_dp_chunk_size_per_rank)
|
|
1869
|
+
):
|
|
1870
|
+
chunk_start = chunk_start_
|
|
1871
|
+
chunk_end = min(
|
|
1872
|
+
chunk_start + moe_dp_chunk_size_per_rank, max_tokens_across_dispatchers
|
|
1873
|
+
)
|
|
1874
|
+
# clamp start and end
|
|
1875
|
+
chunk_start = min(chunk_start, num_tokens - 1)
|
|
1876
|
+
chunk_end = min(chunk_end, num_tokens)
|
|
1877
|
+
with ctx.dp_metadata.chunked_sizes(
|
|
1878
|
+
self.sp_size, moe_dp_chunk_size_per_rank, chunk_idx
|
|
1879
|
+
):
|
|
1880
|
+
process_chunk(
|
|
1881
|
+
chunk_start, chunk_end, skip_result_store=chunk_start_ >= num_tokens
|
|
1882
|
+
)
|
|
1883
|
+
|
|
1884
|
+
if self.shared_experts is None:
|
|
1885
|
+
return full_fused_final_hidden_states
|
|
1886
|
+
else:
|
|
1887
|
+
return (full_shared_final_hidden_states, full_fused_final_hidden_states)
|
|
1888
|
+
|
|
1889
|
+
def forward_impl(
|
|
1890
|
+
self,
|
|
1891
|
+
hidden_states: torch.Tensor,
|
|
1892
|
+
router_logits: torch.Tensor,
|
|
1893
|
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
|
1894
|
+
assert self.quant_method is not None
|
|
1895
|
+
|
|
1896
|
+
self.ensure_moe_quant_config_init()
|
|
1897
|
+
self.ensure_dp_chunking_init()
|
|
1898
|
+
|
|
1899
|
+
has_separate_shared_experts = (
|
|
1900
|
+
not isinstance(self.quant_method, FusedMoEModularMethod)
|
|
1901
|
+
and self.shared_experts is not None
|
|
1902
|
+
)
|
|
1903
|
+
|
|
1904
|
+
use_chunked_impl = self.use_dp_chunking
|
|
1905
|
+
|
|
1906
|
+
use_shared_experts_stream, hidden_states_clone = (
|
|
1907
|
+
self._maybe_setup_shared_experts_stream(
|
|
1908
|
+
hidden_states, has_separate_shared_experts, use_chunked_impl
|
|
1909
|
+
)
|
|
1910
|
+
)
|
|
1911
|
+
|
|
1912
|
+
# If router/gate provided, then apply it here.
|
|
1913
|
+
# (Note: This code runs only when "overlapped mode" is on to allow
|
|
1914
|
+
# parallel execution of shared experts with the FusedMoE via
|
|
1915
|
+
# separate cuda stream)
|
|
1916
|
+
if self.gate is not None:
|
|
1917
|
+
router_logits, _ = self.gate(hidden_states)
|
|
1918
|
+
|
|
1919
|
+
if use_chunked_impl:
|
|
1920
|
+
return self.forward_impl_chunked(
|
|
1921
|
+
hidden_states, router_logits, has_separate_shared_experts
|
|
1922
|
+
)
|
|
1923
|
+
|
|
1924
|
+
do_naive_dispatch_combine: bool = self.dp_size > 1 and not isinstance(
|
|
1925
|
+
self.quant_method, FusedMoEModularMethod
|
|
1926
|
+
)
|
|
1927
|
+
|
|
1928
|
+
ctx = get_forward_context()
|
|
1929
|
+
sp_ctx = (
|
|
1930
|
+
ctx.dp_metadata.sp_local_sizes(self.sp_size)
|
|
1931
|
+
if ctx.dp_metadata
|
|
1932
|
+
else nullcontext()
|
|
1933
|
+
)
|
|
1934
|
+
|
|
1935
|
+
with sp_ctx:
|
|
1936
|
+
if do_naive_dispatch_combine:
|
|
1937
|
+
hidden_states_combined, router_logits = get_ep_group().dispatch(
|
|
1938
|
+
hidden_states, router_logits, self.is_sequence_parallel
|
|
1939
|
+
)
|
|
1940
|
+
# Run shared experts before matrix multiply.
|
|
1941
|
+
# because matrix multiply maybe modify the hidden_states.
|
|
1942
|
+
if has_separate_shared_experts and not use_shared_experts_stream:
|
|
1943
|
+
assert self.shared_experts is not None
|
|
1944
|
+
shared_output = self.shared_experts(hidden_states)
|
|
1945
|
+
|
|
1946
|
+
# NOTE: Similar with DP, PCP also needs dispatch and combine. For
|
|
1947
|
+
# simplicity, AgRsAll2All was added separately for PCP here. Maybe
|
|
1948
|
+
# we should modify All2AllManager abstract to better support PCP.
|
|
1949
|
+
if self.pcp_size > 1:
|
|
1950
|
+
hidden_states = get_pcp_group().all_gather(
|
|
1951
|
+
hidden_states,
|
|
1952
|
+
dim=0,
|
|
1953
|
+
)
|
|
1954
|
+
router_logits = get_pcp_group().all_gather(
|
|
1955
|
+
router_logits,
|
|
1956
|
+
dim=0,
|
|
1957
|
+
)
|
|
1958
|
+
|
|
1959
|
+
# Matrix multiply.
|
|
1960
|
+
final_hidden_states = self.quant_method.apply(
|
|
1961
|
+
layer=self,
|
|
1962
|
+
x=hidden_states_combined
|
|
1963
|
+
if do_naive_dispatch_combine
|
|
1964
|
+
else hidden_states,
|
|
1965
|
+
router_logits=router_logits,
|
|
1966
|
+
)
|
|
1967
|
+
|
|
1968
|
+
if has_separate_shared_experts:
|
|
1969
|
+
assert self.shared_experts is not None
|
|
1970
|
+
|
|
1971
|
+
if use_shared_experts_stream:
|
|
1972
|
+
# Run shared experts in parallel on a separate stream
|
|
1973
|
+
# NOTE: We start the separate stream here and mark the
|
|
1974
|
+
# sync end point immediately after it is done. This is
|
|
1975
|
+
# important to avoid excessive stream allocations by the cuda
|
|
1976
|
+
# graph replay later.
|
|
1977
|
+
with torch.cuda.stream(self.shared_experts_stream):
|
|
1978
|
+
# Note that hidden_states clone() is necessary here to avoid
|
|
1979
|
+
# conflict with the main stream
|
|
1980
|
+
shared_output = self.shared_experts(hidden_states_clone)
|
|
1981
|
+
current_stream().wait_stream(self.shared_experts_stream)
|
|
1982
|
+
|
|
1983
|
+
final_hidden_states = (
|
|
1984
|
+
shared_output,
|
|
1985
|
+
final_hidden_states,
|
|
1986
|
+
)
|
|
1987
|
+
elif self.zero_expert_num is not None and self.zero_expert_num > 0:
|
|
1988
|
+
assert isinstance(final_hidden_states, tuple)
|
|
1989
|
+
final_hidden_states, zero_expert_result = final_hidden_states
|
|
1990
|
+
|
|
1991
|
+
def combine_output(states: torch.Tensor) -> torch.Tensor:
|
|
1992
|
+
if do_naive_dispatch_combine:
|
|
1993
|
+
states = get_ep_group().combine(states, self.is_sequence_parallel)
|
|
1994
|
+
|
|
1995
|
+
if self.pcp_size > 1:
|
|
1996
|
+
states = get_pcp_group().reduce_scatter(
|
|
1997
|
+
states,
|
|
1998
|
+
dim=0,
|
|
1999
|
+
)
|
|
2000
|
+
|
|
2001
|
+
return states
|
|
2002
|
+
|
|
2003
|
+
if self.shared_experts is not None:
|
|
2004
|
+
return (
|
|
2005
|
+
final_hidden_states[0],
|
|
2006
|
+
combine_output(final_hidden_states[1]),
|
|
2007
|
+
)
|
|
2008
|
+
elif self.zero_expert_num is not None and self.zero_expert_num > 0:
|
|
2009
|
+
assert isinstance(final_hidden_states, torch.Tensor)
|
|
2010
|
+
return (combine_output(final_hidden_states), zero_expert_result)
|
|
2011
|
+
else:
|
|
2012
|
+
return combine_output(final_hidden_states)
|
|
2013
|
+
|
|
2014
|
+
@classmethod
|
|
2015
|
+
def make_expert_params_mapping(
|
|
2016
|
+
cls,
|
|
2017
|
+
ckpt_gate_proj_name: str,
|
|
2018
|
+
ckpt_down_proj_name: str,
|
|
2019
|
+
ckpt_up_proj_name: str,
|
|
2020
|
+
num_experts: int,
|
|
2021
|
+
num_redundant_experts: int = 0,
|
|
2022
|
+
) -> list[tuple[str, str, int, str]]:
|
|
2023
|
+
num_physical_experts = num_experts + num_redundant_experts
|
|
2024
|
+
|
|
2025
|
+
# In the returned mapping:
|
|
2026
|
+
# - `expert_id` is the physical expert id
|
|
2027
|
+
# - `weight_name` contains the weight name of the logical expert
|
|
2028
|
+
# So that we should map the expert id to logical in `weight_name`
|
|
2029
|
+
physical_to_logical_map = (
|
|
2030
|
+
EplbState.build_initial_global_physical_to_logical_map(
|
|
2031
|
+
num_experts, num_redundant_experts
|
|
2032
|
+
)
|
|
2033
|
+
)
|
|
2034
|
+
|
|
2035
|
+
return [
|
|
2036
|
+
# (param_name, weight_name, expert_id, shard_id)
|
|
2037
|
+
(
|
|
2038
|
+
"experts.w13_"
|
|
2039
|
+
if weight_name in [ckpt_gate_proj_name, ckpt_up_proj_name]
|
|
2040
|
+
else "experts.w2_",
|
|
2041
|
+
f"experts.{physical_to_logical_map[expert_id]}.{weight_name}.",
|
|
2042
|
+
expert_id,
|
|
2043
|
+
shard_id,
|
|
2044
|
+
)
|
|
2045
|
+
for expert_id in range(num_physical_experts)
|
|
2046
|
+
for shard_id, weight_name in [
|
|
2047
|
+
("w1", ckpt_gate_proj_name),
|
|
2048
|
+
("w2", ckpt_down_proj_name),
|
|
2049
|
+
("w3", ckpt_up_proj_name),
|
|
2050
|
+
]
|
|
2051
|
+
]
|
|
2052
|
+
|
|
2053
|
+
def extra_repr(self) -> str:
|
|
2054
|
+
s = (
|
|
2055
|
+
f"global_num_experts={self.global_num_experts}, "
|
|
2056
|
+
f"local_num_experts={self.local_num_experts}, "
|
|
2057
|
+
f"top_k={self.top_k}, "
|
|
2058
|
+
f"intermediate_size_per_partition={self.intermediate_size_per_partition}, " # noqa: E501
|
|
2059
|
+
f"tp_size={self.tp_size},\n"
|
|
2060
|
+
f"ep_size={self.ep_size}, "
|
|
2061
|
+
f"reduce_results={self.reduce_results}, "
|
|
2062
|
+
f"renormalize={self.renormalize}, "
|
|
2063
|
+
f"use_grouped_topk={self.use_grouped_topk}"
|
|
2064
|
+
)
|
|
2065
|
+
|
|
2066
|
+
if self.use_grouped_topk:
|
|
2067
|
+
s += f", num_expert_group={self.num_expert_group}, topk_group={self.topk_group}" # noqa: E501
|
|
2068
|
+
|
|
2069
|
+
s += f", scoring_func='{self.scoring_func}', activation='{self.activation}'" # noqa: E501
|
|
2070
|
+
|
|
2071
|
+
return s
|
|
2072
|
+
|
|
2073
|
+
|
|
2074
|
+
def moe_forward(
|
|
2075
|
+
hidden_states: torch.Tensor,
|
|
2076
|
+
router_logits: torch.Tensor,
|
|
2077
|
+
layer_name: str,
|
|
2078
|
+
) -> torch.Tensor:
|
|
2079
|
+
forward_context: ForwardContext = get_forward_context()
|
|
2080
|
+
self = forward_context.no_compile_layers[layer_name]
|
|
2081
|
+
assert self.shared_experts is None
|
|
2082
|
+
return self.forward_impl(hidden_states, router_logits)
|
|
2083
|
+
|
|
2084
|
+
|
|
2085
|
+
def moe_forward_fake(
|
|
2086
|
+
hidden_states: torch.Tensor,
|
|
2087
|
+
router_logits: torch.Tensor,
|
|
2088
|
+
layer_name: str,
|
|
2089
|
+
) -> torch.Tensor:
|
|
2090
|
+
return torch.empty_like(hidden_states)
|
|
2091
|
+
|
|
2092
|
+
|
|
2093
|
+
direct_register_custom_op(
|
|
2094
|
+
op_name="moe_forward",
|
|
2095
|
+
op_func=moe_forward,
|
|
2096
|
+
mutates_args=["hidden_states"],
|
|
2097
|
+
fake_impl=moe_forward_fake,
|
|
2098
|
+
tags=(torch.Tag.needs_fixed_stride_order,),
|
|
2099
|
+
)
|
|
2100
|
+
|
|
2101
|
+
|
|
2102
|
+
def moe_forward_shared(
|
|
2103
|
+
hidden_states: torch.Tensor,
|
|
2104
|
+
router_logits: torch.Tensor,
|
|
2105
|
+
layer_name: str,
|
|
2106
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
2107
|
+
forward_context: ForwardContext = get_forward_context()
|
|
2108
|
+
self = forward_context.no_compile_layers[layer_name]
|
|
2109
|
+
assert self.shared_experts is not None
|
|
2110
|
+
return self.forward_impl(hidden_states, router_logits)
|
|
2111
|
+
|
|
2112
|
+
|
|
2113
|
+
def moe_forward_shared_fake(
|
|
2114
|
+
hidden_states: torch.Tensor,
|
|
2115
|
+
router_logits: torch.Tensor,
|
|
2116
|
+
layer_name: str,
|
|
2117
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
2118
|
+
shared_out = torch.empty_like(hidden_states)
|
|
2119
|
+
fused_out = torch.empty_like(hidden_states)
|
|
2120
|
+
return shared_out, fused_out
|
|
2121
|
+
|
|
2122
|
+
|
|
2123
|
+
direct_register_custom_op(
|
|
2124
|
+
op_name="moe_forward_shared",
|
|
2125
|
+
op_func=moe_forward_shared,
|
|
2126
|
+
mutates_args=["hidden_states"],
|
|
2127
|
+
fake_impl=moe_forward_shared_fake,
|
|
2128
|
+
tags=(torch.Tag.needs_fixed_stride_order,),
|
|
2129
|
+
)
|
|
2130
|
+
|
|
2131
|
+
# Mark the FusedMoE weight_loader as supporting MoE-specific parameters
|
|
2132
|
+
# to avoid expensive runtime reflection in model loading code
|
|
2133
|
+
FusedMoE.weight_loader.supports_moe_loading = True # type: ignore[attr-defined]
|